Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d5b27b01 authored by Lars Ellenberg's avatar Lars Ellenberg Committed by Philipp Reisner
Browse files

drbd: move the drbd_work_queue from drbd_socket to drbd_connection



cherry-picked and adapted from drbd 9 devel branch
In 8.4, we don't distinguish between "resource work" and "connection
work" yet, we have one worker for both, as we still have only one connection.

We only ever used the "data.work",
no need to keep the "meta.work" around.

Move tconn->data.work to tconn->sender_work.

Signed-off-by: default avatarPhilipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: default avatarLars Ellenberg <lars.ellenberg@linbit.com>
parent 8c0785a5
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -485,7 +485,7 @@ static int al_write_transaction(struct drbd_conf *mdev)
	init_completion(&al_work.event);
	al_work.w.cb = w_al_write_transaction;
	al_work.w.mdev = mdev;
	drbd_queue_work_front(&mdev->tconn->data.work, &al_work.w);
	drbd_queue_work_front(&mdev->tconn->sender_work, &al_work.w);
	wait_for_completion(&al_work.event);

	return al_work.err;
@@ -645,7 +645,7 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
				udw->enr = ext->lce.lc_number;
				udw->w.cb = w_update_odbm;
				udw->w.mdev = mdev;
				drbd_queue_work_front(&mdev->tconn->data.work, &udw->w);
				drbd_queue_work_front(&mdev->tconn->sender_work, &udw->w);
			} else {
				dev_warn(DEV, "Could not kmalloc an udw\n");
			}
+2 −2
Original line number Diff line number Diff line
@@ -740,7 +740,6 @@ struct drbd_work_queue {
};

struct drbd_socket {
	struct drbd_work_queue work;
	struct mutex mutex;
	struct socket    *socket;
	/* this way we get our
@@ -871,6 +870,7 @@ struct drbd_tconn { /* is a resource from the config file */
	struct drbd_thread worker;
	struct drbd_thread asender;
	cpumask_var_t cpu_mask;
	struct drbd_work_queue sender_work;
};

struct drbd_conf {
@@ -2228,7 +2228,7 @@ static inline void dec_ap_bio(struct drbd_conf *mdev)
		wake_up(&mdev->misc_wait);
	if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
		if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
			drbd_queue_work(&mdev->tconn->data.work, &mdev->bm_io_work.w);
			drbd_queue_work(&mdev->tconn->sender_work, &mdev->bm_io_work.w);
	}
}

+6 −10
Original line number Diff line number Diff line
@@ -379,7 +379,7 @@ void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
					set_bit(CREATE_BARRIER, &tconn->flags);
				}

				drbd_queue_work(&tconn->data.work, &b->w);
				drbd_queue_work(&tconn->sender_work, &b->w);
			}
			pn = &b->next;
		} else {
@@ -2173,8 +2173,7 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
	D_ASSERT(list_empty(&mdev->read_ee));
	D_ASSERT(list_empty(&mdev->net_ee));
	D_ASSERT(list_empty(&mdev->resync_reads));
	D_ASSERT(list_empty(&mdev->tconn->data.work.q));
	D_ASSERT(list_empty(&mdev->tconn->meta.work.q));
	D_ASSERT(list_empty(&mdev->tconn->sender_work.q));
	D_ASSERT(list_empty(&mdev->resync_work.list));
	D_ASSERT(list_empty(&mdev->unplug_work.list));
	D_ASSERT(list_empty(&mdev->go_diskless.list));
@@ -2349,7 +2348,6 @@ void drbd_minor_destroy(struct kref *kref)

	/* paranoia asserts */
	D_ASSERT(mdev->open_cnt == 0);
	D_ASSERT(list_empty(&mdev->tconn->data.work.q));
	/* end paranoia asserts */

	/* cleanup stuff that may have been allocated during
@@ -2700,10 +2698,8 @@ struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts)
	init_waitqueue_head(&tconn->ping_wait);
	idr_init(&tconn->volumes);

	drbd_init_workqueue(&tconn->data.work);
	drbd_init_workqueue(&tconn->sender_work);
	mutex_init(&tconn->data.mutex);

	drbd_init_workqueue(&tconn->meta.work);
	mutex_init(&tconn->meta.mutex);

	drbd_thread_init(tconn, &tconn->receiver, drbdd_init, "receiver");
@@ -3356,7 +3352,7 @@ void drbd_go_diskless(struct drbd_conf *mdev)
{
	D_ASSERT(mdev->state.disk == D_FAILED);
	if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
		drbd_queue_work(&mdev->tconn->data.work, &mdev->go_diskless);
		drbd_queue_work(&mdev->tconn->sender_work, &mdev->go_diskless);
}

/**
@@ -3394,7 +3390,7 @@ void drbd_queue_bitmap_io(struct drbd_conf *mdev,
	set_bit(BITMAP_IO, &mdev->flags);
	if (atomic_read(&mdev->ap_bio_cnt) == 0) {
		if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
			drbd_queue_work(&mdev->tconn->data.work, &mdev->bm_io_work.w);
			drbd_queue_work(&mdev->tconn->sender_work, &mdev->bm_io_work.w);
	}
	spin_unlock_irq(&mdev->tconn->req_lock);
}
@@ -3452,7 +3448,7 @@ static void md_sync_timer_fn(unsigned long data)
{
	struct drbd_conf *mdev = (struct drbd_conf *) data;

	drbd_queue_work_front(&mdev->tconn->data.work, &mdev->md_sync_work);
	drbd_queue_work_front(&mdev->tconn->sender_work, &mdev->md_sync_work);
}

static int w_md_sync(struct drbd_work *w, int unused)
+2 −2
Original line number Diff line number Diff line
@@ -4413,7 +4413,7 @@ void conn_flush_workqueue(struct drbd_tconn *tconn)
	barr.w.cb = w_prev_work_done;
	barr.w.tconn = tconn;
	init_completion(&barr.done);
	drbd_queue_work(&tconn->data.work, &barr.w);
	drbd_queue_work(&tconn->sender_work, &barr.w);
	wait_for_completion(&barr.done);
}

@@ -5147,7 +5147,7 @@ static int got_OVResult(struct drbd_tconn *tconn, struct packet_info *pi)
		if (w) {
			w->cb = w_ov_finished;
			w->mdev = mdev;
			drbd_queue_work_front(&mdev->tconn->data.work, w);
			drbd_queue_work(&mdev->tconn->sender_work, w);
		} else {
			dev_err(DEV, "kmalloc(w) failed.");
			ov_out_of_sync_print(mdev);
+6 −6
Original line number Diff line number Diff line
@@ -170,7 +170,7 @@ static void queue_barrier(struct drbd_conf *mdev)
	 * dec_ap_pending will be done in got_BarrierAck
	 * or (on connection loss) in tl_clear.  */
	inc_ap_pending(mdev);
	drbd_queue_work(&tconn->data.work, &b->w);
	drbd_queue_work(&tconn->sender_work, &b->w);
	set_bit(CREATE_BARRIER, &tconn->flags);
}

@@ -483,7 +483,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
		D_ASSERT((req->rq_state & RQ_LOCAL_MASK) == 0);
		req->rq_state |= RQ_NET_QUEUED;
		req->w.cb = w_send_read_req;
		drbd_queue_work(&mdev->tconn->data.work, &req->w);
		drbd_queue_work(&mdev->tconn->sender_work, &req->w);
		break;

	case QUEUE_FOR_NET_WRITE:
@@ -527,7 +527,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
		D_ASSERT(req->rq_state & RQ_NET_PENDING);
		req->rq_state |= RQ_NET_QUEUED;
		req->w.cb =  w_send_dblock;
		drbd_queue_work(&mdev->tconn->data.work, &req->w);
		drbd_queue_work(&mdev->tconn->sender_work, &req->w);

		/* close the epoch, in case it outgrew the limit */
		rcu_read_lock();
@@ -542,7 +542,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
	case QUEUE_FOR_SEND_OOS:
		req->rq_state |= RQ_NET_QUEUED;
		req->w.cb =  w_send_out_of_sync;
		drbd_queue_work(&mdev->tconn->data.work, &req->w);
		drbd_queue_work(&mdev->tconn->sender_work, &req->w);
		break;

	case READ_RETRY_REMOTE_CANCELED:
@@ -682,7 +682,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,

		get_ldev(mdev);
		req->w.cb = w_restart_disk_io;
		drbd_queue_work(&mdev->tconn->data.work, &req->w);
		drbd_queue_work(&mdev->tconn->sender_work, &req->w);
		break;

	case RESEND:
@@ -692,7 +692,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
		   During connection handshake, we ensure that the peer was not rebooted. */
		if (!(req->rq_state & RQ_NET_OK)) {
			if (req->w.cb) {
				drbd_queue_work(&mdev->tconn->data.work, &req->w);
				drbd_queue_work(&mdev->tconn->sender_work, &req->w);
				rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
			}
			break;
Loading