Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b0af205a authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm:
  dm: detect lost queue
  dm: publish dm_vcalloc
  dm: publish dm_table_unplug_all
  dm: publish dm_get_mapinfo
  dm: export struct dm_dev
  dm crypt: avoid unnecessary wait when splitting bio
  dm crypt: tidy ctx pending
  dm crypt: fix async inc_pending
  dm crypt: move dec_pending on error into write_io_submit
  dm crypt: remove inc_pending from write_io_submit
  dm crypt: tidy write loop pending
  dm crypt: tidy crypt alloc
  dm crypt: tidy inc pending
  dm exception store: use chunk_t for_areas
  dm exception store: introduce area_location function
  dm raid1: kcopyd should stop on error if errors handled
  dm mpath: remove is_active from struct dm_path
  dm mpath: use more error codes

Fixed up trivial conflict in drivers/md/dm-mpath.c manually.
parents 73f6aa4d 0c2322e4
Loading
Loading
Loading
Loading
+66 −43
Original line number Diff line number Diff line
@@ -333,7 +333,6 @@ static void crypt_convert_init(struct crypt_config *cc,
	ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
	ctx->sector = sector + cc->iv_offset;
	init_completion(&ctx->restart);
	atomic_set(&ctx->pending, 1);
}

static int crypt_convert_block(struct crypt_config *cc,
@@ -408,6 +407,8 @@ static int crypt_convert(struct crypt_config *cc,
{
	int r;

	atomic_set(&ctx->pending, 1);

	while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
	      ctx->idx_out < ctx->bio_out->bi_vcnt) {

@@ -456,9 +457,11 @@ static void dm_crypt_bio_destructor(struct bio *bio)
/*
 * Generate a new unfragmented bio with the given size
 * This should never violate the device limitations
 * May return a smaller bio when running out of pages
 * May return a smaller bio when running out of pages, indicated by
 * *out_of_pages set to 1.
 */
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
				      unsigned *out_of_pages)
{
	struct crypt_config *cc = io->target->private;
	struct bio *clone;
@@ -472,11 +475,14 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
		return NULL;

	clone_init(io, clone);
	*out_of_pages = 0;

	for (i = 0; i < nr_iovecs; i++) {
		page = mempool_alloc(cc->page_pool, gfp_mask);
		if (!page)
		if (!page) {
			*out_of_pages = 1;
			break;
		}

		/*
		 * if additional pages cannot be allocated without waiting,
@@ -517,6 +523,27 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
	}
}

static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti,
					  struct bio *bio, sector_t sector)
{
	struct crypt_config *cc = ti->private;
	struct dm_crypt_io *io;

	io = mempool_alloc(cc->io_pool, GFP_NOIO);
	io->target = ti;
	io->base_bio = bio;
	io->sector = sector;
	io->error = 0;
	atomic_set(&io->pending, 0);

	return io;
}

static void crypt_inc_pending(struct dm_crypt_io *io)
{
	atomic_inc(&io->pending);
}

/*
 * One of the bios was finished. Check for completion of
 * the whole request and correctly clean up the buffer.
@@ -591,7 +618,7 @@ static void kcryptd_io_read(struct dm_crypt_io *io)
	struct bio *base_bio = io->base_bio;
	struct bio *clone;

	atomic_inc(&io->pending);
	crypt_inc_pending(io);

	/*
	 * The block layer might modify the bvec array, so always
@@ -653,6 +680,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
		crypt_free_buffer_pages(cc, clone);
		bio_put(clone);
		io->error = -EIO;
		crypt_dec_pending(io);
		return;
	}

@@ -664,28 +692,34 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,

	if (async)
		kcryptd_queue_io(io);
	else {
		atomic_inc(&io->pending);
	else
		generic_make_request(clone);
}
}

static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io)
static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
{
	struct crypt_config *cc = io->target->private;
	struct bio *clone;
	int crypt_finished;
	unsigned out_of_pages = 0;
	unsigned remaining = io->base_bio->bi_size;
	int r;

	/*
	 * Prevent io from disappearing until this function completes.
	 */
	crypt_inc_pending(io);
	crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector);

	/*
	 * The allocated buffers can be smaller than the whole bio,
	 * so repeat the whole process until all the data can be handled.
	 */
	while (remaining) {
		clone = crypt_alloc_buffer(io, remaining);
		clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
		if (unlikely(!clone)) {
			io->error = -ENOMEM;
			return;
			break;
		}

		io->ctx.bio_out = clone;
@@ -693,37 +727,32 @@ static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io)

		remaining -= clone->bi_size;

		crypt_inc_pending(io);
		r = crypt_convert(cc, &io->ctx);
		crypt_finished = atomic_dec_and_test(&io->ctx.pending);

		if (atomic_dec_and_test(&io->ctx.pending)) {
			/* processed, no running async crypto  */
		/* Encryption was already finished, submit io now */
		if (crypt_finished) {
			kcryptd_crypt_write_io_submit(io, r, 0);
			if (unlikely(r < 0))
				return;
		} else
			atomic_inc(&io->pending);

		/* out of memory -> run queues */
		if (unlikely(remaining)) {
			/* wait for async crypto then reinitialize pending */
			wait_event(cc->writeq, !atomic_read(&io->ctx.pending));
			atomic_set(&io->ctx.pending, 1);
			congestion_wait(WRITE, HZ/100);
		}
	}
			/*
			 * If there was an error, do not try next fragments.
			 * For async, error is processed in async handler.
			 */
			if (unlikely(r < 0))
				break;
		}

static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
{
	struct crypt_config *cc = io->target->private;

		/*
	 * Prevent io from disappearing until this function completes.
		 * Out of memory -> run queues
		 * But don't wait if split was due to the io size restriction
		 */
	atomic_inc(&io->pending);
		if (unlikely(out_of_pages))
			congestion_wait(WRITE, HZ/100);

	crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector);
	kcryptd_crypt_write_convert_loop(io);
		if (unlikely(remaining))
			wait_event(cc->writeq, !atomic_read(&io->ctx.pending));
	}

	crypt_dec_pending(io);
}
@@ -741,7 +770,7 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
	struct crypt_config *cc = io->target->private;
	int r = 0;

	atomic_inc(&io->pending);
	crypt_inc_pending(io);

	crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
			   io->sector);
@@ -1108,15 +1137,9 @@ static void crypt_dtr(struct dm_target *ti)
static int crypt_map(struct dm_target *ti, struct bio *bio,
		     union map_info *map_context)
{
	struct crypt_config *cc = ti->private;
	struct dm_crypt_io *io;

	io = mempool_alloc(cc->io_pool, GFP_NOIO);
	io->target = ti;
	io->base_bio = bio;
	io->sector = bio->bi_sector - ti->begin;
	io->error = 0;
	atomic_set(&io->pending, 0);
	io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin);

	if (bio_data_dir(io->base_bio) == READ)
		kcryptd_queue_io(io);
+19 −10
Original line number Diff line number Diff line
@@ -108,12 +108,12 @@ struct pstore {
	 * Used to keep track of which metadata area the data in
	 * 'chunk' refers to.
	 */
	uint32_t current_area;
	chunk_t current_area;

	/*
	 * The next free chunk for an exception.
	 */
	uint32_t next_free;
	chunk_t next_free;

	/*
	 * The index of next free exception in the current
@@ -175,7 +175,7 @@ static void do_metadata(struct work_struct *work)
/*
 * Read or write a chunk aligned and sized block of data from a device.
 */
static int chunk_io(struct pstore *ps, uint32_t chunk, int rw, int metadata)
static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata)
{
	struct dm_io_region where = {
		.bdev = ps->snap->cow->bdev,
@@ -208,17 +208,24 @@ static int chunk_io(struct pstore *ps, uint32_t chunk, int rw, int metadata)
	return req.result;
}

/*
 * Convert a metadata area index to a chunk index.
 */
static chunk_t area_location(struct pstore *ps, chunk_t area)
{
	return 1 + ((ps->exceptions_per_area + 1) * area);
}

/*
 * Read or write a metadata area.  Remembering to skip the first
 * chunk which holds the header.
 */
static int area_io(struct pstore *ps, uint32_t area, int rw)
static int area_io(struct pstore *ps, chunk_t area, int rw)
{
	int r;
	uint32_t chunk;
	chunk_t chunk;

	/* convert a metadata area index to a chunk index */
	chunk = 1 + ((ps->exceptions_per_area + 1) * area);
	chunk = area_location(ps, area);

	r = chunk_io(ps, chunk, rw, 0);
	if (r)
@@ -228,7 +235,7 @@ static int area_io(struct pstore *ps, uint32_t area, int rw)
	return 0;
}

static int zero_area(struct pstore *ps, uint32_t area)
static int zero_area(struct pstore *ps, chunk_t area)
{
	memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT);
	return area_io(ps, area, WRITE);
@@ -404,7 +411,7 @@ static int insert_exceptions(struct pstore *ps, int *full)

static int read_exceptions(struct pstore *ps)
{
	uint32_t area;
	chunk_t area;
	int r, full = 1;

	/*
@@ -517,6 +524,7 @@ static int persistent_prepare(struct exception_store *store,
{
	struct pstore *ps = get_info(store);
	uint32_t stride;
	chunk_t next_free;
	sector_t size = get_dev_size(store->snap->cow->bdev);

	/* Is there enough room ? */
@@ -530,7 +538,8 @@ static int persistent_prepare(struct exception_store *store,
	 * into account the location of the metadata chunks.
	 */
	stride = (ps->exceptions_per_area + 1);
	if ((++ps->next_free % stride) == 1)
	next_free = ++ps->next_free;
	if (sector_div(next_free, stride) == 1)
		ps->next_free++;

	atomic_inc(&ps->pending_count);
+2 −2
Original line number Diff line number Diff line
@@ -1131,7 +1131,7 @@ static void retrieve_deps(struct dm_table *table,
	unsigned int count = 0;
	struct list_head *tmp;
	size_t len, needed;
	struct dm_dev *dd;
	struct dm_dev_internal *dd;
	struct dm_target_deps *deps;

	deps = get_result_buffer(param, param_size, &len);
@@ -1157,7 +1157,7 @@ static void retrieve_deps(struct dm_table *table,
	deps->count = count;
	count = 0;
	list_for_each_entry (dd, dm_table_get_devices(table), list)
		deps->dev[count++] = huge_encode_dev(dd->bdev->bd_dev);
		deps->dev[count++] = huge_encode_dev(dd->dm_dev.bdev->bd_dev);

	param->data_size = param->data_start + needed;
}
+19 −16
Original line number Diff line number Diff line
@@ -30,6 +30,7 @@ struct pgpath {
	struct list_head list;

	struct priority_group *pg;	/* Owning PG */
	unsigned is_active;		/* Path status */
	unsigned fail_count;		/* Cumulative failure count */

	struct dm_path path;
@@ -125,7 +126,7 @@ static struct pgpath *alloc_pgpath(void)
	struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);

	if (pgpath) {
		pgpath->path.is_active = 1;
		pgpath->is_active = 1;
		INIT_WORK(&pgpath->deactivate_path, deactivate_path);
	}

@@ -575,12 +576,12 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
	/* we need at least a path arg */
	if (as->argc < 1) {
		ti->error = "no device given";
		return NULL;
		return ERR_PTR(-EINVAL);
	}

	p = alloc_pgpath();
	if (!p)
		return NULL;
		return ERR_PTR(-ENOMEM);

	r = dm_get_device(ti, shift(as), ti->begin, ti->len,
			  dm_table_get_mode(ti->table), &p->path.dev);
@@ -608,7 +609,7 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,

 bad:
	free_pgpath(p);
	return NULL;
	return ERR_PTR(r);
}

static struct priority_group *parse_priority_group(struct arg_set *as,
@@ -626,14 +627,14 @@ static struct priority_group *parse_priority_group(struct arg_set *as,

	if (as->argc < 2) {
		as->argc = 0;
		ti->error = "not enough priority group aruments";
		return NULL;
		ti->error = "not enough priority group arguments";
		return ERR_PTR(-EINVAL);
	}

	pg = alloc_priority_group();
	if (!pg) {
		ti->error = "couldn't allocate priority group";
		return NULL;
		return ERR_PTR(-ENOMEM);
	}
	pg->m = m;

@@ -666,8 +667,10 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
		path_args.argv = as->argv;

		pgpath = parse_path(&path_args, &pg->ps, ti);
		if (!pgpath)
		if (IS_ERR(pgpath)) {
			r = PTR_ERR(pgpath);
			goto bad;
		}

		pgpath->pg = pg;
		list_add_tail(&pgpath->list, &pg->pgpaths);
@@ -678,7 +681,7 @@ static struct priority_group *parse_priority_group(struct arg_set *as,

 bad:
	free_priority_group(pg, ti);
	return NULL;
	return ERR_PTR(r);
}

static int parse_hw_handler(struct arg_set *as, struct multipath *m)
@@ -797,8 +800,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
		struct priority_group *pg;

		pg = parse_priority_group(&as, m);
		if (!pg) {
			r = -EINVAL;
		if (IS_ERR(pg)) {
			r = PTR_ERR(pg);
			goto bad;
		}

@@ -864,13 +867,13 @@ static int fail_path(struct pgpath *pgpath)

	spin_lock_irqsave(&m->lock, flags);

	if (!pgpath->path.is_active)
	if (!pgpath->is_active)
		goto out;

	DMWARN("Failing path %s.", pgpath->path.dev->name);

	pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
	pgpath->path.is_active = 0;
	pgpath->is_active = 0;
	pgpath->fail_count++;

	m->nr_valid_paths--;
@@ -901,7 +904,7 @@ static int reinstate_path(struct pgpath *pgpath)

	spin_lock_irqsave(&m->lock, flags);

	if (pgpath->path.is_active)
	if (pgpath->is_active)
		goto out;

	if (!pgpath->pg->ps.type->reinstate_path) {
@@ -915,7 +918,7 @@ static int reinstate_path(struct pgpath *pgpath)
	if (r)
		goto out;

	pgpath->path.is_active = 1;
	pgpath->is_active = 1;

	m->current_pgpath = NULL;
	if (!m->nr_valid_paths++ && m->queue_size)
@@ -1303,7 +1306,7 @@ static int multipath_status(struct dm_target *ti, status_type_t type,

			list_for_each_entry(p, &pg->pgpaths, list) {
				DMEMIT("%s %s %u ", p->path.dev->name,
				       p->path.is_active ? "A" : "F",
				       p->is_active ? "A" : "F",
				       p->fail_count);
				if (pg->ps.type->status)
					sz += pg->ps.type->status(&pg->ps,
+0 −2
Original line number Diff line number Diff line
@@ -13,8 +13,6 @@ struct dm_dev;

struct dm_path {
	struct dm_dev *dev;	/* Read-only */
	unsigned is_active;	/* Read-only */

	void *pscontext;	/* For path-selector use */
};

Loading