Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8af7c124 authored by Tejun Heo's avatar Tejun Heo
Browse files

fscache: convert operation to use workqueue instead of slow-work



Make fscache operation to use only workqueue instead of combination of
workqueue and slow-work.  FSCACHE_OP_SLOW is dropped and
FSCACHE_OP_FAST is renamed to FSCACHE_OP_ASYNC and uses newly added
fscache_op_wq workqueue to execute op->processor().
fscache_operation_init_slow() is dropped and fscache_operation_init()
now takes @processor argument directly.

* Unbound workqueue is used.

* fscache_retrieval_work() is no longer necessary as OP_ASYNC now does
  the equivalent thing.

* sysctl fscache.operation_max_active added to control concurrency.
  The default value is nr_cpus clamped between 2 and
  WQ_UNBOUND_MAX_ACTIVE.

* debugfs support is dropped for now.  Tracing API based debug
  facility is planned to be added.

Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Acked-by: default avatarDavid Howells <dhowells@redhat.com>
parent 8b8edefa
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -422,7 +422,7 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
	shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;

	op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
	op->op.flags |= FSCACHE_OP_FAST;
	op->op.flags |= FSCACHE_OP_ASYNC;
	op->op.processor = cachefiles_read_copier;

	pagevec_init(&pagevec, 0);
@@ -729,7 +729,7 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
	pagevec_init(&pagevec, 0);

	op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
	op->op.flags |= FSCACHE_OP_FAST;
	op->op.flags |= FSCACHE_OP_ASYNC;
	op->op.processor = cachefiles_read_copier;

	INIT_LIST_HEAD(&backpages);
+1 −0
Original line number Diff line number Diff line
@@ -83,6 +83,7 @@ extern unsigned fscache_defer_create;
extern unsigned fscache_debug;
extern struct kobject *fscache_root;
extern struct workqueue_struct *fscache_object_wq;
extern struct workqueue_struct *fscache_op_wq;
DECLARE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait);

static inline bool fscache_object_congested(void)
+23 −0
Original line number Diff line number Diff line
@@ -42,11 +42,13 @@ MODULE_PARM_DESC(fscache_debug,

struct kobject *fscache_root;
struct workqueue_struct *fscache_object_wq;
struct workqueue_struct *fscache_op_wq;

DEFINE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait);

/* these values serve as lower bounds, will be adjusted in fscache_init() */
static unsigned fscache_object_max_active = 4;
static unsigned fscache_op_max_active = 2;

#ifdef CONFIG_SYSCTL
static struct ctl_table_header *fscache_sysctl_header;
@@ -74,6 +76,14 @@ ctl_table fscache_sysctls[] = {
		.proc_handler	= fscache_max_active_sysctl,
		.extra1		= &fscache_object_wq,
	},
	{
		.procname	= "operation_max_active",
		.data		= &fscache_op_max_active,
		.maxlen		= sizeof(unsigned),
		.mode		= 0644,
		.proc_handler	= fscache_max_active_sysctl,
		.extra1		= &fscache_op_wq,
	},
	{}
};

@@ -110,6 +120,16 @@ static int __init fscache_init(void)
	if (!fscache_object_wq)
		goto error_object_wq;

	fscache_op_max_active =
		clamp_val(fscache_object_max_active / 2,
			  fscache_op_max_active, WQ_UNBOUND_MAX_ACTIVE);

	ret = -ENOMEM;
	fscache_op_wq = alloc_workqueue("fscache_operation", WQ_UNBOUND,
					fscache_op_max_active);
	if (!fscache_op_wq)
		goto error_op_wq;

	for_each_possible_cpu(cpu)
		init_waitqueue_head(&per_cpu(fscache_object_cong_wait, cpu));

@@ -152,6 +172,8 @@ error_sysctl:
#endif
	fscache_proc_cleanup();
error_proc:
	destroy_workqueue(fscache_op_wq);
error_op_wq:
	destroy_workqueue(fscache_object_wq);
error_object_wq:
	slow_work_unregister_user(THIS_MODULE);
@@ -172,6 +194,7 @@ static void __exit fscache_exit(void)
	kmem_cache_destroy(fscache_cookie_jar);
	unregister_sysctl_table(fscache_sysctl_header);
	fscache_proc_cleanup();
	destroy_workqueue(fscache_op_wq);
	destroy_workqueue(fscache_object_wq);
	slow_work_unregister_user(THIS_MODULE);
	printk(KERN_NOTICE "FS-Cache: Unloaded\n");
+8 −59
Original line number Diff line number Diff line
@@ -42,16 +42,12 @@ void fscache_enqueue_operation(struct fscache_operation *op)

	fscache_stat(&fscache_n_op_enqueue);
	switch (op->flags & FSCACHE_OP_TYPE) {
	case FSCACHE_OP_FAST:
		_debug("queue fast");
	case FSCACHE_OP_ASYNC:
		_debug("queue async");
		atomic_inc(&op->usage);
		if (!schedule_work(&op->fast_work))
		if (!queue_work(fscache_op_wq, &op->work))
			fscache_put_operation(op);
		break;
	case FSCACHE_OP_SLOW:
		_debug("queue slow");
		slow_work_enqueue(&op->slow_work);
		break;
	case FSCACHE_OP_MYTHREAD:
		_debug("queue for caller's attention");
		break;
@@ -455,36 +451,13 @@ void fscache_operation_gc(struct work_struct *work)
}

/*
 * allow the slow work item processor to get a ref on an operation
 * execute an operation using fs_op_wq to provide processing context -
 * the caller holds a ref to this object, so we don't need to hold one
 */
static int fscache_op_get_ref(struct slow_work *work)
void fscache_op_work_func(struct work_struct *work)
{
	struct fscache_operation *op =
		container_of(work, struct fscache_operation, slow_work);

	atomic_inc(&op->usage);
	return 0;
}

/*
 * allow the slow work item processor to discard a ref on an operation
 */
static void fscache_op_put_ref(struct slow_work *work)
{
	struct fscache_operation *op =
		container_of(work, struct fscache_operation, slow_work);

	fscache_put_operation(op);
}

/*
 * execute an operation using the slow thread pool to provide processing context
 * - the caller holds a ref to this object, so we don't need to hold one
 */
static void fscache_op_execute(struct slow_work *work)
{
	struct fscache_operation *op =
		container_of(work, struct fscache_operation, slow_work);
		container_of(work, struct fscache_operation, work);
	unsigned long start;

	_enter("{OBJ%x OP%x,%d}",
@@ -494,31 +467,7 @@ static void fscache_op_execute(struct slow_work *work)
	start = jiffies;
	op->processor(op);
	fscache_hist(fscache_ops_histogram, start);
	fscache_put_operation(op);

	_leave("");
}

/*
 * describe an operation for slow-work debugging
 */
#ifdef CONFIG_SLOW_WORK_DEBUG
static void fscache_op_desc(struct slow_work *work, struct seq_file *m)
{
	struct fscache_operation *op =
		container_of(work, struct fscache_operation, slow_work);

	seq_printf(m, "FSC: OBJ%x OP%x: %s/%s fl=%lx",
		   op->object->debug_id, op->debug_id,
		   op->name, op->state, op->flags);
}
#endif

const struct slow_work_ops fscache_op_slow_work_ops = {
	.owner		= THIS_MODULE,
	.get_ref	= fscache_op_get_ref,
	.put_ref	= fscache_op_put_ref,
	.execute	= fscache_op_execute,
#ifdef CONFIG_SLOW_WORK_DEBUG
	.desc		= fscache_op_desc,
#endif
};
+8 −28
Original line number Diff line number Diff line
@@ -105,7 +105,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,

page_busy:
	/* we might want to wait here, but that could deadlock the allocator as
	 * the slow-work threads writing to the cache may all end up sleeping
	 * the work threads writing to the cache may all end up sleeping
	 * on memory allocation */
	fscache_stat(&fscache_n_store_vmscan_busy);
	return false;
@@ -188,9 +188,8 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
		return -ENOMEM;
	}

	fscache_operation_init(op, NULL);
	fscache_operation_init_slow(op, fscache_attr_changed_op);
	op->flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_EXCLUSIVE);
	fscache_operation_init(op, fscache_attr_changed_op, NULL);
	op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE);
	fscache_set_op_name(op, "Attr");

	spin_lock(&cookie->lock);
@@ -217,24 +216,6 @@ nobufs:
}
EXPORT_SYMBOL(__fscache_attr_changed);

/*
 * handle secondary execution given to a retrieval op on behalf of the
 * cache
 */
static void fscache_retrieval_work(struct work_struct *work)
{
	struct fscache_retrieval *op =
		container_of(work, struct fscache_retrieval, op.fast_work);
	unsigned long start;

	_enter("{OP%x}", op->op.debug_id);

	start = jiffies;
	op->op.processor(&op->op);
	fscache_hist(fscache_ops_histogram, start);
	fscache_put_operation(&op->op);
}

/*
 * release a retrieval op reference
 */
@@ -269,13 +250,12 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
		return NULL;
	}

	fscache_operation_init(&op->op, fscache_release_retrieval_op);
	fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op);
	op->op.flags	= FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING);
	op->mapping	= mapping;
	op->end_io_func	= end_io_func;
	op->context	= context;
	op->start_time	= jiffies;
	INIT_WORK(&op->op.fast_work, fscache_retrieval_work);
	INIT_LIST_HEAD(&op->to_do);
	fscache_set_op_name(&op->op, "Retr");
	return op;
@@ -795,9 +775,9 @@ int __fscache_write_page(struct fscache_cookie *cookie,
	if (!op)
		goto nomem;

	fscache_operation_init(&op->op, fscache_release_write_op);
	fscache_operation_init_slow(&op->op, fscache_write_op);
	op->op.flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_WAITING);
	fscache_operation_init(&op->op, fscache_write_op,
			       fscache_release_write_op);
	op->op.flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_WAITING);
	fscache_set_op_name(&op->op, "Write1");

	ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
@@ -852,7 +832,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
	fscache_stat(&fscache_n_store_ops);
	fscache_stat(&fscache_n_stores_ok);

	/* the slow work queue now carries its own ref on the object */
	/* the work queue now carries its own ref on the object */
	fscache_put_operation(&op->op);
	_leave(" = 0");
	return 0;
Loading