Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 053d8f66 authored by David S. Miller's avatar David S. Miller
Browse files
parents c9cedbba 615cc221
Loading
Loading
Loading
Loading
+58 −22
Original line number Original line Diff line number Diff line
@@ -60,22 +60,25 @@ static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
	return 0;
	return 0;
}
}


static void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
{
	INIT_LIST_HEAD(&work->node);
	work->fn = fn;
	init_waitqueue_head(&work->done);
	work->flushing = 0;
	work->queue_seq = work->done_seq = 0;
}

/* Init poll structure */
/* Init poll structure */
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
		     unsigned long mask, struct vhost_dev *dev)
		     unsigned long mask, struct vhost_dev *dev)
{
{
	struct vhost_work *work = &poll->work;

	init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
	init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
	init_poll_funcptr(&poll->table, vhost_poll_func);
	init_poll_funcptr(&poll->table, vhost_poll_func);
	poll->mask = mask;
	poll->mask = mask;
	poll->dev = dev;
	poll->dev = dev;


	INIT_LIST_HEAD(&work->node);
	vhost_work_init(&poll->work, fn);
	work->fn = fn;
	init_waitqueue_head(&work->done);
	work->flushing = 0;
	work->queue_seq = work->done_seq = 0;
}
}


/* Start polling a file. We add ourselves to file's wait queue. The caller must
/* Start polling a file. We add ourselves to file's wait queue. The caller must
@@ -95,35 +98,38 @@ void vhost_poll_stop(struct vhost_poll *poll)
	remove_wait_queue(poll->wqh, &poll->wait);
	remove_wait_queue(poll->wqh, &poll->wait);
}
}


/* Flush any work that has been scheduled. When calling this, don't hold any
static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
 * locks that are also used by the callback. */
void vhost_poll_flush(struct vhost_poll *poll)
{
{
	struct vhost_work *work = &poll->work;
	unsigned seq;
	unsigned seq;
	int left;
	int left;
	int flushing;
	int flushing;


	spin_lock_irq(&poll->dev->work_lock);
	spin_lock_irq(&dev->work_lock);
	seq = work->queue_seq;
	seq = work->queue_seq;
	work->flushing++;
	work->flushing++;
	spin_unlock_irq(&poll->dev->work_lock);
	spin_unlock_irq(&dev->work_lock);
	wait_event(work->done, ({
	wait_event(work->done, ({
		   spin_lock_irq(&poll->dev->work_lock);
		   spin_lock_irq(&dev->work_lock);
		   left = seq - work->done_seq <= 0;
		   left = seq - work->done_seq <= 0;
		   spin_unlock_irq(&poll->dev->work_lock);
		   spin_unlock_irq(&dev->work_lock);
		   left;
		   left;
	}));
	}));
	spin_lock_irq(&poll->dev->work_lock);
	spin_lock_irq(&dev->work_lock);
	flushing = --work->flushing;
	flushing = --work->flushing;
	spin_unlock_irq(&poll->dev->work_lock);
	spin_unlock_irq(&dev->work_lock);
	BUG_ON(flushing < 0);
	BUG_ON(flushing < 0);
}
}


void vhost_poll_queue(struct vhost_poll *poll)
/* Flush any work that has been scheduled. When calling this, don't hold any
 * locks that are also used by the callback. */
void vhost_poll_flush(struct vhost_poll *poll)
{
	vhost_work_flush(poll->dev, &poll->work);
}

static inline void vhost_work_queue(struct vhost_dev *dev,
				    struct vhost_work *work)
{
{
	struct vhost_dev *dev = poll->dev;
	struct vhost_work *work = &poll->work;
	unsigned long flags;
	unsigned long flags;


	spin_lock_irqsave(&dev->work_lock, flags);
	spin_lock_irqsave(&dev->work_lock, flags);
@@ -135,6 +141,11 @@ void vhost_poll_queue(struct vhost_poll *poll)
	spin_unlock_irqrestore(&dev->work_lock, flags);
	spin_unlock_irqrestore(&dev->work_lock, flags);
}
}


void vhost_poll_queue(struct vhost_poll *poll)
{
	vhost_work_queue(poll->dev, &poll->work);
}

static void vhost_vq_reset(struct vhost_dev *dev,
static void vhost_vq_reset(struct vhost_dev *dev,
			   struct vhost_virtqueue *vq)
			   struct vhost_virtqueue *vq)
{
{
@@ -236,6 +247,29 @@ long vhost_dev_check_owner(struct vhost_dev *dev)
	return dev->mm == current->mm ? 0 : -EPERM;
	return dev->mm == current->mm ? 0 : -EPERM;
}
}


struct vhost_attach_cgroups_struct {
        struct vhost_work work;
        struct task_struct *owner;
        int ret;
};

static void vhost_attach_cgroups_work(struct vhost_work *work)
{
        struct vhost_attach_cgroups_struct *s;
        s = container_of(work, struct vhost_attach_cgroups_struct, work);
        s->ret = cgroup_attach_task_all(s->owner, current);
}

static int vhost_attach_cgroups(struct vhost_dev *dev)
{
        struct vhost_attach_cgroups_struct attach;
        attach.owner = current;
        vhost_work_init(&attach.work, vhost_attach_cgroups_work);
        vhost_work_queue(dev, &attach.work);
        vhost_work_flush(dev, &attach.work);
        return attach.ret;
}

/* Caller should have device mutex */
/* Caller should have device mutex */
static long vhost_dev_set_owner(struct vhost_dev *dev)
static long vhost_dev_set_owner(struct vhost_dev *dev)
{
{
@@ -255,14 +289,16 @@ static long vhost_dev_set_owner(struct vhost_dev *dev)
	}
	}


	dev->worker = worker;
	dev->worker = worker;
	err = cgroup_attach_task_current_cg(worker);
	wake_up_process(worker);	/* avoid contributing to loadavg */

	err = vhost_attach_cgroups(dev);
	if (err)
	if (err)
		goto err_cgroup;
		goto err_cgroup;
	wake_up_process(worker);	/* avoid contributing to loadavg */


	return 0;
	return 0;
err_cgroup:
err_cgroup:
	kthread_stop(worker);
	kthread_stop(worker);
	dev->worker = NULL;
err_worker:
err_worker:
	if (dev->mm)
	if (dev->mm)
		mmput(dev->mm);
		mmput(dev->mm);
+10 −1
Original line number Original line Diff line number Diff line
@@ -578,7 +578,11 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
int cgroup_scan_tasks(struct cgroup_scanner *scan);
int cgroup_scan_tasks(struct cgroup_scanner *scan);
int cgroup_attach_task(struct cgroup *, struct task_struct *);
int cgroup_attach_task(struct cgroup *, struct task_struct *);
int cgroup_attach_task_current_cg(struct task_struct *);
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
static inline int cgroup_attach_task_current_cg(struct task_struct *tsk)
{
	return cgroup_attach_task_all(current, tsk);
}


/*
/*
 * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
 * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
@@ -636,6 +640,11 @@ static inline int cgroupstats_build(struct cgroupstats *stats,
}
}


/* No cgroups - nothing to do */
/* No cgroups - nothing to do */
static inline int cgroup_attach_task_all(struct task_struct *from,
					 struct task_struct *t)
{
	return 0;
}
static inline int cgroup_attach_task_current_cg(struct task_struct *t)
static inline int cgroup_attach_task_current_cg(struct task_struct *t)
{
{
	return 0;
	return 0;
+5 −4
Original line number Original line Diff line number Diff line
@@ -1791,10 +1791,11 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
}
}


/**
/**
 * cgroup_attach_task_current_cg - attach task 'tsk' to current task's cgroup
 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
 * @from: attach to all cgroups of a given task
 * @tsk: the task to be attached
 * @tsk: the task to be attached
 */
 */
int cgroup_attach_task_current_cg(struct task_struct *tsk)
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
{
{
	struct cgroupfs_root *root;
	struct cgroupfs_root *root;
	struct cgroup *cur_cg;
	struct cgroup *cur_cg;
@@ -1802,7 +1803,7 @@ int cgroup_attach_task_current_cg(struct task_struct *tsk)


	cgroup_lock();
	cgroup_lock();
	for_each_active_root(root) {
	for_each_active_root(root) {
		cur_cg = task_cgroup_from_root(current, root);
		cur_cg = task_cgroup_from_root(from, root);
		retval = cgroup_attach_task(cur_cg, tsk);
		retval = cgroup_attach_task(cur_cg, tsk);
		if (retval)
		if (retval)
			break;
			break;
@@ -1811,7 +1812,7 @@ int cgroup_attach_task_current_cg(struct task_struct *tsk)


	return retval;
	return retval;
}
}
EXPORT_SYMBOL_GPL(cgroup_attach_task_current_cg);
EXPORT_SYMBOL_GPL(cgroup_attach_task_all);


/*
/*
 * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex
 * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex