Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 80a40e43 authored by Lars Ellenberg's avatar Lars Ellenberg Committed by Philipp Reisner
Browse files

drbd: reduce code duplication when receiving data requests



also canonicalize the return values of read_for_csum
and drbd_rs_begin_io to return -ESOMETHING, or 0 for success.

Signed-off-by: default avatarPhilipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: default avatarLars Ellenberg <lars.ellenberg@linbit.com>
parent 1d7734a0
Loading
Loading
Loading
Loading
+5 −7
Original line number Original line Diff line number Diff line
@@ -1119,7 +1119,7 @@ static int _is_in_al(struct drbd_conf *mdev, unsigned int enr)
 * @mdev:	DRBD device.
 * @mdev:	DRBD device.
 * @sector:	The sector number.
 * @sector:	The sector number.
 *
 *
 * This functions sleeps on al_wait. Returns 1 on success, 0 if interrupted.
 * This functions sleeps on al_wait. Returns 0 on success, -EINTR if interrupted.
 */
 */
int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
{
{
@@ -1130,10 +1130,10 @@ int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
	sig = wait_event_interruptible(mdev->al_wait,
	sig = wait_event_interruptible(mdev->al_wait,
			(bm_ext = _bme_get(mdev, enr)));
			(bm_ext = _bme_get(mdev, enr)));
	if (sig)
	if (sig)
		return 0;
		return -EINTR;


	if (test_bit(BME_LOCKED, &bm_ext->flags))
	if (test_bit(BME_LOCKED, &bm_ext->flags))
		return 1;
		return 0;


	for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
	for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
		sig = wait_event_interruptible(mdev->al_wait,
		sig = wait_event_interruptible(mdev->al_wait,
@@ -1146,13 +1146,11 @@ int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
				wake_up(&mdev->al_wait);
				wake_up(&mdev->al_wait);
			}
			}
			spin_unlock_irq(&mdev->al_lock);
			spin_unlock_irq(&mdev->al_lock);
			return 0;
			return -EINTR;
		}
		}
	}
	}

	set_bit(BME_LOCKED, &bm_ext->flags);
	set_bit(BME_LOCKED, &bm_ext->flags);

	return 0;
	return 1;
}
}


/**
/**
+12 −34
Original line number Original line Diff line number Diff line
@@ -2068,21 +2068,12 @@ static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
	case P_DATA_REQUEST:
	case P_DATA_REQUEST:
		e->w.cb = w_e_end_data_req;
		e->w.cb = w_e_end_data_req;
		fault_type = DRBD_FAULT_DT_RD;
		fault_type = DRBD_FAULT_DT_RD;
		break;
		/* application IO, don't drbd_rs_begin_io */
		goto submit;

	case P_RS_DATA_REQUEST:
	case P_RS_DATA_REQUEST:
		e->w.cb = w_e_end_rsdata_req;
		e->w.cb = w_e_end_rsdata_req;
		fault_type = DRBD_FAULT_RS_RD;
		fault_type = DRBD_FAULT_RS_RD;
		/* Eventually this should become asynchronously. Currently it
		 * blocks the whole receiver just to delay the reading of a
		 * resync data block.
		 * the drbd_work_queue mechanism is made for this...
		 */
		if (!drbd_rs_begin_io(mdev, sector)) {
			/* we have been interrupted,
			 * probably connection lost! */
			D_ASSERT(signal_pending(current));
			goto out_free_e;
		}
		break;
		break;


	case P_OV_REPLY:
	case P_OV_REPLY:
@@ -2108,13 +2099,8 @@ static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
		} else if (h->command == P_OV_REPLY) {
		} else if (h->command == P_OV_REPLY) {
			e->w.cb = w_e_end_ov_reply;
			e->w.cb = w_e_end_ov_reply;
			dec_rs_pending(mdev);
			dec_rs_pending(mdev);
			break;
			/* drbd_rs_begin_io done when we sent this request */
		}
			goto submit;

		if (!drbd_rs_begin_io(mdev, sector)) {
			/* we have been interrupted, probably connection lost! */
			D_ASSERT(signal_pending(current));
			goto out_free_e;
		}
		}
		break;
		break;


@@ -2133,31 +2119,23 @@ static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
		}
		}
		e->w.cb = w_e_end_ov_req;
		e->w.cb = w_e_end_ov_req;
		fault_type = DRBD_FAULT_RS_RD;
		fault_type = DRBD_FAULT_RS_RD;
		/* Eventually this should become asynchronous. Currently it
		 * blocks the whole receiver just to delay the reading of a
		 * resync data block.
		 * the drbd_work_queue mechanism is made for this...
		 */
		if (!drbd_rs_begin_io(mdev, sector)) {
			/* we have been interrupted,
			 * probably connection lost! */
			D_ASSERT(signal_pending(current));
			goto out_free_e;
		}
		break;
		break;



	default:
	default:
		dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
		dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
		    cmdname(h->command));
		    cmdname(h->command));
		fault_type = DRBD_FAULT_MAX;
		fault_type = DRBD_FAULT_MAX;
		goto out_free_e;
	}
	}


	spin_lock_irq(&mdev->req_lock);
	if (drbd_rs_begin_io(mdev, e->sector))
	list_add(&e->w.list, &mdev->read_ee);
		goto out_free_e;
	spin_unlock_irq(&mdev->req_lock);


submit:
	inc_unacked(mdev);
	inc_unacked(mdev);
	spin_lock_irq(&mdev->req_lock);
	list_add_tail(&e->w.list, &mdev->read_ee);
	spin_unlock_irq(&mdev->req_lock);


	if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
	if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
		return TRUE;
		return TRUE;
+13 −9
Original line number Original line Diff line number Diff line
@@ -374,26 +374,26 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
	struct drbd_epoch_entry *e;
	struct drbd_epoch_entry *e;


	if (!get_ldev(mdev))
	if (!get_ldev(mdev))
		return 0;
		return -EIO;


	/* GFP_TRY, because if there is no memory available right now, this may
	/* GFP_TRY, because if there is no memory available right now, this may
	 * be rescheduled for later. It is "only" background resync, after all. */
	 * be rescheduled for later. It is "only" background resync, after all. */
	e = drbd_alloc_ee(mdev, DRBD_MAGIC+0xbeef, sector, size, GFP_TRY);
	e = drbd_alloc_ee(mdev, DRBD_MAGIC+0xbeef, sector, size, GFP_TRY);
	if (!e)
	if (!e)
		goto fail;
		goto defer;


	e->w.cb = w_e_send_csum;
	spin_lock_irq(&mdev->req_lock);
	spin_lock_irq(&mdev->req_lock);
	list_add(&e->w.list, &mdev->read_ee);
	list_add(&e->w.list, &mdev->read_ee);
	spin_unlock_irq(&mdev->req_lock);
	spin_unlock_irq(&mdev->req_lock);


	e->w.cb = w_e_send_csum;
	if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0)
	if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0)
		return 1;
		return 0;


	drbd_free_ee(mdev, e);
	drbd_free_ee(mdev, e);
fail:
defer:
	put_ldev(mdev);
	put_ldev(mdev);
	return 2;
	return -EAGAIN;
}
}


void resync_timer_fn(unsigned long data)
void resync_timer_fn(unsigned long data)
@@ -649,15 +649,19 @@ int w_make_resync_request(struct drbd_conf *mdev,
			size = (capacity-sector)<<9;
			size = (capacity-sector)<<9;
		if (mdev->agreed_pro_version >= 89 && mdev->csums_tfm) {
		if (mdev->agreed_pro_version >= 89 && mdev->csums_tfm) {
			switch (read_for_csum(mdev, sector, size)) {
			switch (read_for_csum(mdev, sector, size)) {
			case 0: /* Disk failure*/
			case -EIO: /* Disk failure */
				put_ldev(mdev);
				put_ldev(mdev);
				return 0;
				return 0;
			case 2: /* Allocation failed */
			case -EAGAIN: /* allocation failed, or ldev busy */
				drbd_rs_complete_io(mdev, sector);
				drbd_rs_complete_io(mdev, sector);
				mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
				mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
				i = rollback_i;
				i = rollback_i;
				goto requeue;
				goto requeue;
			/* case 1: everything ok */
			case 0:
				/* everything ok */
				break;
			default:
				BUG();
			}
			}
		} else {
		} else {
			inc_rs_pending(mdev);
			inc_rs_pending(mdev);