Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a5bc92cd authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
  io context: fix ref counting
  block: make the end_io functions be non-GPL exports
  block: fix improper kobject release in blk_integrity_unregister
  block: always assign default lock to queues
  mg_disk: Add missing ready status check on mg_write()
  mg_disk: fix issue with data integrity on error in mg_write()
  mg_disk: fix reading invalid status when use polling driver
  mg_disk: remove prohibited sleep operation
parents 6eb80e00 cbb4f264
Loading
Loading
Loading
Loading
+6 −13
Original line number Diff line number Diff line
@@ -575,13 +575,6 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
		return NULL;
	}

	/*
	 * if caller didn't supply a lock, they get per-queue locking with
	 * our embedded lock
	 */
	if (!lock)
		lock = &q->__queue_lock;

	q->request_fn		= rfn;
	q->prep_rq_fn		= NULL;
	q->unplug_fn		= generic_unplug_device;
@@ -2143,7 +2136,7 @@ bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
{
	return blk_end_bidi_request(rq, error, nr_bytes, 0);
}
EXPORT_SYMBOL_GPL(blk_end_request);
EXPORT_SYMBOL(blk_end_request);

/**
 * blk_end_request_all - Helper function for drives to finish the request.
@@ -2164,7 +2157,7 @@ void blk_end_request_all(struct request *rq, int error)
	pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
	BUG_ON(pending);
}
EXPORT_SYMBOL_GPL(blk_end_request_all);
EXPORT_SYMBOL(blk_end_request_all);

/**
 * blk_end_request_cur - Helper function to finish the current request chunk.
@@ -2182,7 +2175,7 @@ bool blk_end_request_cur(struct request *rq, int error)
{
	return blk_end_request(rq, error, blk_rq_cur_bytes(rq));
}
EXPORT_SYMBOL_GPL(blk_end_request_cur);
EXPORT_SYMBOL(blk_end_request_cur);

/**
 * __blk_end_request - Helper function for drivers to complete the request.
@@ -2201,7 +2194,7 @@ bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
{
	return __blk_end_bidi_request(rq, error, nr_bytes, 0);
}
EXPORT_SYMBOL_GPL(__blk_end_request);
EXPORT_SYMBOL(__blk_end_request);

/**
 * __blk_end_request_all - Helper function for drives to finish the request.
@@ -2222,7 +2215,7 @@ void __blk_end_request_all(struct request *rq, int error)
	pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
	BUG_ON(pending);
}
EXPORT_SYMBOL_GPL(__blk_end_request_all);
EXPORT_SYMBOL(__blk_end_request_all);

/**
 * __blk_end_request_cur - Helper function to finish the current request chunk.
@@ -2241,7 +2234,7 @@ bool __blk_end_request_cur(struct request *rq, int error)
{
	return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
}
EXPORT_SYMBOL_GPL(__blk_end_request_cur);
EXPORT_SYMBOL(__blk_end_request_cur);

void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
		     struct bio *bio)
+1 −0
Original line number Diff line number Diff line
@@ -379,6 +379,7 @@ void blk_integrity_unregister(struct gendisk *disk)

	kobject_uevent(&bi->kobj, KOBJ_REMOVE);
	kobject_del(&bi->kobj);
	kobject_put(&bi->kobj);
	kmem_cache_free(integrity_cachep, bi);
	disk->integrity = NULL;
}
+7 −0
Original line number Diff line number Diff line
@@ -164,6 +164,13 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)

	blk_set_default_limits(&q->limits);

	/*
	 * If the caller didn't supply a lock, fall back to our embedded
	 * per-queue locks
	 */
	if (!q->queue_lock)
		q->queue_lock = &q->__queue_lock;

	/*
	 * by default assume old behaviour and bounce for any highmem page
	 */
+56 −45
Original line number Diff line number Diff line
@@ -36,7 +36,6 @@

/* Register offsets */
#define MG_BUFF_OFFSET			0x8000
#define MG_STORAGE_BUFFER_SIZE		0x200
#define MG_REG_OFFSET			0xC000
#define MG_REG_FEATURE			(MG_REG_OFFSET + 2)	/* write case */
#define MG_REG_ERROR			(MG_REG_OFFSET + 2)	/* read case */
@@ -219,6 +218,16 @@ static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
	host->error = MG_ERR_NONE;
	expire = jiffies + msecs_to_jiffies(msec);

	/* These 2 times dummy status read prevents reading invalid
	 * status. A very little time (3 times of mflash operating clk)
	 * is required for busy bit is set. Use dummy read instead of
	 * busy wait, because mflash's PLL is machine dependent.
	 */
	if (prv_data->use_polling) {
		status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
		status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
	}

	status = inb((unsigned long)host->dev_base + MG_REG_STATUS);

	do {
@@ -245,8 +254,6 @@ static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
			mg_dump_status("not ready", status, host);
			return MG_ERR_INV_STAT;
		}
		if (prv_data->use_polling)
			msleep(1);

		status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
	} while (time_before(cur_jiffies, expire));
@@ -469,9 +476,18 @@ static unsigned int mg_out(struct mg_host *host,
	return MG_ERR_NONE;
}

static void mg_read_one(struct mg_host *host, struct request *req)
{
	u16 *buff = (u16 *)req->buffer;
	u32 i;

	for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
		*buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET +
			      (i << 1));
}

static void mg_read(struct request *req)
{
	u32 j;
	struct mg_host *host = req->rq_disk->private_data;

	if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
@@ -482,49 +498,65 @@ static void mg_read(struct request *req)
	       blk_rq_sectors(req), blk_rq_pos(req), req->buffer);

	do {
		u16 *buff = (u16 *)req->buffer;

		if (mg_wait(host, ATA_DRQ,
			    MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) {
			mg_bad_rw_intr(host);
			return;
		}
		for (j = 0; j < MG_SECTOR_SIZE >> 1; j++)
			*buff++ = inw((unsigned long)host->dev_base +
				      MG_BUFF_OFFSET + (j << 1));

		mg_read_one(host, req);

		outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base +
				MG_REG_COMMAND);
	} while (mg_end_request(host, 0, MG_SECTOR_SIZE));
}

static void mg_write_one(struct mg_host *host, struct request *req)
{
	u16 *buff = (u16 *)req->buffer;
	u32 i;

	for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
		outw(*buff++, (unsigned long)host->dev_base + MG_BUFF_OFFSET +
		     (i << 1));
}

static void mg_write(struct request *req)
{
	u32 j;
	struct mg_host *host = req->rq_disk->private_data;
	unsigned int rem = blk_rq_sectors(req);

	if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
	if (mg_out(host, blk_rq_pos(req), rem,
		   MG_CMD_WR, NULL) != MG_ERR_NONE) {
		mg_bad_rw_intr(host);
		return;
	}

	MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
	       blk_rq_sectors(req), blk_rq_pos(req), req->buffer);

	do {
		u16 *buff = (u16 *)req->buffer;
	       rem, blk_rq_pos(req), req->buffer);

	if (mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
	if (mg_wait(host, ATA_DRQ,
		    MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
		mg_bad_rw_intr(host);
		return;
	}
		for (j = 0; j < MG_SECTOR_SIZE >> 1; j++)
			outw(*buff++, (unsigned long)host->dev_base +
				      MG_BUFF_OFFSET + (j << 1));

	do {
		mg_write_one(host, req);

		outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
				MG_REG_COMMAND);

		rem--;
		if (rem > 1 && mg_wait(host, ATA_DRQ,
					MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
			mg_bad_rw_intr(host);
			return;
		} else if (mg_wait(host, MG_STAT_READY,
					MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
			mg_bad_rw_intr(host);
			return;
		}
	} while (mg_end_request(host, 0, MG_SECTOR_SIZE));
}

@@ -532,7 +564,6 @@ static void mg_read_intr(struct mg_host *host)
{
	struct request *req = host->req;
	u32 i;
	u16 *buff;

	/* check status */
	do {
@@ -550,13 +581,7 @@ static void mg_read_intr(struct mg_host *host)
	return;

ok_to_read:
	/* get current segment of request */
	buff = (u16 *)req->buffer;

	/* read 1 sector */
	for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
		*buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET +
			      (i << 1));
	mg_read_one(host, req);

	MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
	       blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer);
@@ -575,8 +600,7 @@ ok_to_read:
static void mg_write_intr(struct mg_host *host)
{
	struct request *req = host->req;
	u32 i, j;
	u16 *buff;
	u32 i;
	bool rem;

	/* check status */
@@ -597,12 +621,7 @@ static void mg_write_intr(struct mg_host *host)
ok_to_write:
	if ((rem = mg_end_request(host, 0, MG_SECTOR_SIZE))) {
		/* write 1 sector and set handler if remains */
		buff = (u16 *)req->buffer;
		for (j = 0; j < MG_STORAGE_BUFFER_SIZE >> 1; j++) {
			outw(*buff, (unsigned long)host->dev_base +
					MG_BUFF_OFFSET + (j << 1));
			buff++;
		}
		mg_write_one(host, req);
		MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
		       blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
		host->mg_do_intr = mg_write_intr;
@@ -667,9 +686,6 @@ static unsigned int mg_issue_req(struct request *req,
		unsigned int sect_num,
		unsigned int sect_cnt)
{
	u16 *buff;
	u32 i;

	switch (rq_data_dir(req)) {
	case READ:
		if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr)
@@ -693,12 +709,7 @@ static unsigned int mg_issue_req(struct request *req,
			mg_bad_rw_intr(host);
			return host->error;
		}
		buff = (u16 *)req->buffer;
		for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) {
			outw(*buff, (unsigned long)host->dev_base +
					MG_BUFF_OFFSET + (i << 1));
			buff++;
		}
		mg_write_one(host, req);
		mod_timer(&host->timer, jiffies + 3 * HZ);
		outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
				MG_REG_COMMAND);
+1 −1
Original line number Diff line number Diff line
@@ -92,7 +92,7 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
	 * a race).
	 */
	if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) {
		atomic_long_inc(&ioc->refcount);
		atomic_inc(&ioc->nr_tasks);
		return ioc;
	}