Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6780139c authored by Andreas Gruenbacher's avatar Andreas Gruenbacher Committed by Philipp Reisner
Browse files

drbd: Use the right peer device



in w_e_ (peer request) callbacks and in peer request I/O completion handlers

Signed-off-by: default avatarAndreas Gruenbacher <agruen@linbit.com>
Signed-off-by: default avatarPhilipp Reisner <philipp.reisner@linbit.com>
parent 81f0ffd2
Loading
Loading
Loading
Loading
+38 −31
Original line number Original line Diff line number Diff line
@@ -100,7 +100,8 @@ void drbd_md_io_complete(struct bio *bio, int error)
static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local)
static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local)
{
{
	unsigned long flags = 0;
	unsigned long flags = 0;
	struct drbd_device *device = peer_req->peer_device->device;
	struct drbd_peer_device *peer_device = peer_req->peer_device;
	struct drbd_device *device = peer_device->device;


	spin_lock_irqsave(&device->resource->req_lock, flags);
	spin_lock_irqsave(&device->resource->req_lock, flags);
	device->read_cnt += peer_req->i.size >> 9;
	device->read_cnt += peer_req->i.size >> 9;
@@ -111,8 +112,7 @@ static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __rele
		__drbd_chk_io_error(device, DRBD_READ_ERROR);
		__drbd_chk_io_error(device, DRBD_READ_ERROR);
	spin_unlock_irqrestore(&device->resource->req_lock, flags);
	spin_unlock_irqrestore(&device->resource->req_lock, flags);


	drbd_queue_work(&first_peer_device(device)->connection->sender_work,
	drbd_queue_work(&peer_device->connection->sender_work, &peer_req->w);
			&peer_req->w);
	put_ldev(device);
	put_ldev(device);
}
}


@@ -121,7 +121,8 @@ static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __rele
static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local)
static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local)
{
{
	unsigned long flags = 0;
	unsigned long flags = 0;
	struct drbd_device *device = peer_req->peer_device->device;
	struct drbd_peer_device *peer_device = peer_req->peer_device;
	struct drbd_device *device = peer_device->device;
	struct drbd_interval i;
	struct drbd_interval i;
	int do_wake;
	int do_wake;
	u64 block_id;
	u64 block_id;
@@ -162,7 +163,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel
	if (do_al_complete_io)
	if (do_al_complete_io)
		drbd_al_complete_io(device, &i);
		drbd_al_complete_io(device, &i);


	wake_asender(first_peer_device(device)->connection);
	wake_asender(peer_device->connection);
	put_ldev(device);
	put_ldev(device);
}
}


@@ -334,7 +335,8 @@ void drbd_csum_bio(struct crypto_hash *tfm, struct bio *bio, void *digest)
static int w_e_send_csum(struct drbd_work *w, int cancel)
static int w_e_send_csum(struct drbd_work *w, int cancel)
{
{
	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
	struct drbd_device *device = peer_req->peer_device->device;
	struct drbd_peer_device *peer_device = peer_req->peer_device;
	struct drbd_device *device = peer_device->device;
	int digest_size;
	int digest_size;
	void *digest;
	void *digest;
	int err = 0;
	int err = 0;
@@ -345,12 +347,12 @@ static int w_e_send_csum(struct drbd_work *w, int cancel)
	if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0))
	if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0))
		goto out;
		goto out;


	digest_size = crypto_hash_digestsize(first_peer_device(device)->connection->csums_tfm);
	digest_size = crypto_hash_digestsize(peer_device->connection->csums_tfm);
	digest = kmalloc(digest_size, GFP_NOIO);
	digest = kmalloc(digest_size, GFP_NOIO);
	if (digest) {
	if (digest) {
		sector_t sector = peer_req->i.sector;
		sector_t sector = peer_req->i.sector;
		unsigned int size = peer_req->i.size;
		unsigned int size = peer_req->i.size;
		drbd_csum_ee(first_peer_device(device)->connection->csums_tfm, peer_req, digest);
		drbd_csum_ee(peer_device->connection->csums_tfm, peer_req, digest);
		/* Free peer_req and pages before send.
		/* Free peer_req and pages before send.
		 * In case we block on congestion, we could otherwise run into
		 * In case we block on congestion, we could otherwise run into
		 * some distributed deadlock, if the other side blocks on
		 * some distributed deadlock, if the other side blocks on
@@ -359,7 +361,7 @@ static int w_e_send_csum(struct drbd_work *w, int cancel)
		drbd_free_peer_req(device, peer_req);
		drbd_free_peer_req(device, peer_req);
		peer_req = NULL;
		peer_req = NULL;
		inc_rs_pending(device);
		inc_rs_pending(device);
		err = drbd_send_drequest_csum(first_peer_device(device), sector, size,
		err = drbd_send_drequest_csum(peer_device, sector, size,
					      digest, digest_size,
					      digest, digest_size,
					      P_CSUM_RS_REQUEST);
					      P_CSUM_RS_REQUEST);
		kfree(digest);
		kfree(digest);
@@ -995,7 +997,8 @@ static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_
int w_e_end_data_req(struct drbd_work *w, int cancel)
int w_e_end_data_req(struct drbd_work *w, int cancel)
{
{
	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
	struct drbd_device *device = peer_req->peer_device->device;
	struct drbd_peer_device *peer_device = peer_req->peer_device;
	struct drbd_device *device = peer_device->device;
	int err;
	int err;


	if (unlikely(cancel)) {
	if (unlikely(cancel)) {
@@ -1005,13 +1008,13 @@ int w_e_end_data_req(struct drbd_work *w, int cancel)
	}
	}


	if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
	if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
		err = drbd_send_block(first_peer_device(device), P_DATA_REPLY, peer_req);
		err = drbd_send_block(peer_device, P_DATA_REPLY, peer_req);
	} else {
	} else {
		if (__ratelimit(&drbd_ratelimit_state))
		if (__ratelimit(&drbd_ratelimit_state))
			drbd_err(device, "Sending NegDReply. sector=%llus.\n",
			drbd_err(device, "Sending NegDReply. sector=%llus.\n",
			    (unsigned long long)peer_req->i.sector);
			    (unsigned long long)peer_req->i.sector);


		err = drbd_send_ack(first_peer_device(device), P_NEG_DREPLY, peer_req);
		err = drbd_send_ack(peer_device, P_NEG_DREPLY, peer_req);
	}
	}


	dec_unacked(device);
	dec_unacked(device);
@@ -1031,7 +1034,8 @@ int w_e_end_data_req(struct drbd_work *w, int cancel)
int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
{
{
	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
	struct drbd_device *device = peer_req->peer_device->device;
	struct drbd_peer_device *peer_device = peer_req->peer_device;
	struct drbd_device *device = peer_device->device;
	int err;
	int err;


	if (unlikely(cancel)) {
	if (unlikely(cancel)) {
@@ -1046,11 +1050,11 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
	}
	}


	if (device->state.conn == C_AHEAD) {
	if (device->state.conn == C_AHEAD) {
		err = drbd_send_ack(first_peer_device(device), P_RS_CANCEL, peer_req);
		err = drbd_send_ack(peer_device, P_RS_CANCEL, peer_req);
	} else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
	} else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
		if (likely(device->state.pdsk >= D_INCONSISTENT)) {
		if (likely(device->state.pdsk >= D_INCONSISTENT)) {
			inc_rs_pending(device);
			inc_rs_pending(device);
			err = drbd_send_block(first_peer_device(device), P_RS_DATA_REPLY, peer_req);
			err = drbd_send_block(peer_device, P_RS_DATA_REPLY, peer_req);
		} else {
		} else {
			if (__ratelimit(&drbd_ratelimit_state))
			if (__ratelimit(&drbd_ratelimit_state))
				drbd_err(device, "Not sending RSDataReply, "
				drbd_err(device, "Not sending RSDataReply, "
@@ -1062,7 +1066,7 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
			drbd_err(device, "Sending NegRSDReply. sector %llus.\n",
			drbd_err(device, "Sending NegRSDReply. sector %llus.\n",
			    (unsigned long long)peer_req->i.sector);
			    (unsigned long long)peer_req->i.sector);


		err = drbd_send_ack(first_peer_device(device), P_NEG_RS_DREPLY, peer_req);
		err = drbd_send_ack(peer_device, P_NEG_RS_DREPLY, peer_req);


		/* update resync data with failure */
		/* update resync data with failure */
		drbd_rs_failed_io(device, peer_req->i.sector, peer_req->i.size);
		drbd_rs_failed_io(device, peer_req->i.sector, peer_req->i.size);
@@ -1080,7 +1084,8 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
{
{
	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
	struct drbd_device *device = peer_req->peer_device->device;
	struct drbd_peer_device *peer_device = peer_req->peer_device;
	struct drbd_device *device = peer_device->device;
	struct digest_info *di;
	struct digest_info *di;
	int digest_size;
	int digest_size;
	void *digest = NULL;
	void *digest = NULL;
@@ -1103,13 +1108,13 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
		/* quick hack to try to avoid a race against reconfiguration.
		/* quick hack to try to avoid a race against reconfiguration.
		 * a real fix would be much more involved,
		 * a real fix would be much more involved,
		 * introducing more locking mechanisms */
		 * introducing more locking mechanisms */
		if (first_peer_device(device)->connection->csums_tfm) {
		if (peer_device->connection->csums_tfm) {
			digest_size = crypto_hash_digestsize(first_peer_device(device)->connection->csums_tfm);
			digest_size = crypto_hash_digestsize(peer_device->connection->csums_tfm);
			D_ASSERT(device, digest_size == di->digest_size);
			D_ASSERT(device, digest_size == di->digest_size);
			digest = kmalloc(digest_size, GFP_NOIO);
			digest = kmalloc(digest_size, GFP_NOIO);
		}
		}
		if (digest) {
		if (digest) {
			drbd_csum_ee(first_peer_device(device)->connection->csums_tfm, peer_req, digest);
			drbd_csum_ee(peer_device->connection->csums_tfm, peer_req, digest);
			eq = !memcmp(digest, di->digest, digest_size);
			eq = !memcmp(digest, di->digest, digest_size);
			kfree(digest);
			kfree(digest);
		}
		}
@@ -1118,16 +1123,16 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
			drbd_set_in_sync(device, peer_req->i.sector, peer_req->i.size);
			drbd_set_in_sync(device, peer_req->i.sector, peer_req->i.size);
			/* rs_same_csums unit is BM_BLOCK_SIZE */
			/* rs_same_csums unit is BM_BLOCK_SIZE */
			device->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT;
			device->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT;
			err = drbd_send_ack(first_peer_device(device), P_RS_IS_IN_SYNC, peer_req);
			err = drbd_send_ack(peer_device, P_RS_IS_IN_SYNC, peer_req);
		} else {
		} else {
			inc_rs_pending(device);
			inc_rs_pending(device);
			peer_req->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */
			peer_req->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */
			peer_req->flags &= ~EE_HAS_DIGEST; /* This peer request no longer has a digest pointer */
			peer_req->flags &= ~EE_HAS_DIGEST; /* This peer request no longer has a digest pointer */
			kfree(di);
			kfree(di);
			err = drbd_send_block(first_peer_device(device), P_RS_DATA_REPLY, peer_req);
			err = drbd_send_block(peer_device, P_RS_DATA_REPLY, peer_req);
		}
		}
	} else {
	} else {
		err = drbd_send_ack(first_peer_device(device), P_NEG_RS_DREPLY, peer_req);
		err = drbd_send_ack(peer_device, P_NEG_RS_DREPLY, peer_req);
		if (__ratelimit(&drbd_ratelimit_state))
		if (__ratelimit(&drbd_ratelimit_state))
			drbd_err(device, "Sending NegDReply. I guess it gets messy.\n");
			drbd_err(device, "Sending NegDReply. I guess it gets messy.\n");
	}
	}
@@ -1143,7 +1148,8 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
int w_e_end_ov_req(struct drbd_work *w, int cancel)
int w_e_end_ov_req(struct drbd_work *w, int cancel)
{
{
	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
	struct drbd_device *device = peer_req->peer_device->device;
	struct drbd_peer_device *peer_device = peer_req->peer_device;
	struct drbd_device *device = peer_device->device;
	sector_t sector = peer_req->i.sector;
	sector_t sector = peer_req->i.sector;
	unsigned int size = peer_req->i.size;
	unsigned int size = peer_req->i.size;
	int digest_size;
	int digest_size;
@@ -1153,7 +1159,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
	if (unlikely(cancel))
	if (unlikely(cancel))
		goto out;
		goto out;


	digest_size = crypto_hash_digestsize(first_peer_device(device)->connection->verify_tfm);
	digest_size = crypto_hash_digestsize(peer_device->connection->verify_tfm);
	digest = kmalloc(digest_size, GFP_NOIO);
	digest = kmalloc(digest_size, GFP_NOIO);
	if (!digest) {
	if (!digest) {
		err = 1;	/* terminate the connection in case the allocation failed */
		err = 1;	/* terminate the connection in case the allocation failed */
@@ -1161,7 +1167,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
	}
	}


	if (likely(!(peer_req->flags & EE_WAS_ERROR)))
	if (likely(!(peer_req->flags & EE_WAS_ERROR)))
		drbd_csum_ee(first_peer_device(device)->connection->verify_tfm, peer_req, digest);
		drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest);
	else
	else
		memset(digest, 0, digest_size);
		memset(digest, 0, digest_size);


@@ -1173,7 +1179,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
	drbd_free_peer_req(device, peer_req);
	drbd_free_peer_req(device, peer_req);
	peer_req = NULL;
	peer_req = NULL;
	inc_rs_pending(device);
	inc_rs_pending(device);
	err = drbd_send_drequest_csum(first_peer_device(device), sector, size, digest, digest_size, P_OV_REPLY);
	err = drbd_send_drequest_csum(peer_device, sector, size, digest, digest_size, P_OV_REPLY);
	if (err)
	if (err)
		dec_rs_pending(device);
		dec_rs_pending(device);
	kfree(digest);
	kfree(digest);
@@ -1199,7 +1205,8 @@ void drbd_ov_out_of_sync_found(struct drbd_device *device, sector_t sector, int
int w_e_end_ov_reply(struct drbd_work *w, int cancel)
int w_e_end_ov_reply(struct drbd_work *w, int cancel)
{
{
	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
	struct drbd_device *device = peer_req->peer_device->device;
	struct drbd_peer_device *peer_device = peer_req->peer_device;
	struct drbd_device *device = peer_device->device;
	struct digest_info *di;
	struct digest_info *di;
	void *digest;
	void *digest;
	sector_t sector = peer_req->i.sector;
	sector_t sector = peer_req->i.sector;
@@ -1224,10 +1231,10 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel)
	di = peer_req->digest;
	di = peer_req->digest;


	if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
	if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
		digest_size = crypto_hash_digestsize(first_peer_device(device)->connection->verify_tfm);
		digest_size = crypto_hash_digestsize(peer_device->connection->verify_tfm);
		digest = kmalloc(digest_size, GFP_NOIO);
		digest = kmalloc(digest_size, GFP_NOIO);
		if (digest) {
		if (digest) {
			drbd_csum_ee(first_peer_device(device)->connection->verify_tfm, peer_req, digest);
			drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest);


			D_ASSERT(device, digest_size == di->digest_size);
			D_ASSERT(device, digest_size == di->digest_size);
			eq = !memcmp(digest, di->digest, digest_size);
			eq = !memcmp(digest, di->digest, digest_size);
@@ -1246,7 +1253,7 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel)
	else
	else
		ov_out_of_sync_print(device);
		ov_out_of_sync_print(device);


	err = drbd_send_ack_ex(first_peer_device(device), P_OV_RESULT, sector, size,
	err = drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size,
			       eq ? ID_IN_SYNC : ID_OUT_OF_SYNC);
			       eq ? ID_IN_SYNC : ID_OUT_OF_SYNC);


	dec_unacked(device);
	dec_unacked(device);