Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3e1e21c7 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-4.5/nvme' of git://git.kernel.dk/linux-block

Pull NVMe updates from Jens Axboe:
 "Last branch for this series is the nvme changes.  It's in a separate
  branch to avoid splitting too much between core and NVMe changes,
  since NVMe is still helping drive some blk-mq changes.  That said, not
  a huge amount of core changes in here.  The grunt of the work is the
  continued split of the code"

* 'for-4.5/nvme' of git://git.kernel.dk/linux-block: (67 commits)
  uapi: update install list after nvme.h rename
  NVMe: Export NVMe attributes to sysfs group
  NVMe: Shutdown controller only for power-off
  NVMe: IO queue deletion re-write
  NVMe: Remove queue freezing on resets
  NVMe: Use a retryable error code on reset
  NVMe: Fix admin queue ring wrap
  nvme: make SG_IO support optional
  nvme: fixes for NVME_IOCTL_IO_CMD on the char device
  nvme: synchronize access to ctrl->namespaces
  nvme: Move nvme_freeze/unfreeze_queues to nvme core
  PCI/AER: include header file
  NVMe: Export namespace attributes to sysfs
  NVMe: Add pci error handlers
  block: remove REQ_NO_TIMEOUT flag
  nvme: merge iod and cmd_info
  nvme: meta_sg doesn't have to be an array
  nvme: properly free resources for cancelled command
  nvme: simplify completion handling
  nvme: special case AEN requests
  ...
parents 0a13daed a9cf8284
Loading
Loading
Loading
Loading
+6 −7
Original line number Diff line number Diff line
@@ -66,7 +66,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
	}

	if (unlikely(!bip))
		return NULL;
		return ERR_PTR(-ENOMEM);

	memset(bip, 0, sizeof(*bip));

@@ -89,7 +89,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
	return bip;
err:
	mempool_free(bip, bs->bio_integrity_pool);
	return NULL;
	return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL(bio_integrity_alloc);

@@ -298,10 +298,10 @@ int bio_integrity_prep(struct bio *bio)

	/* Allocate bio integrity payload and integrity vectors */
	bip = bio_integrity_alloc(bio, GFP_NOIO, nr_pages);
	if (unlikely(bip == NULL)) {
	if (IS_ERR(bip)) {
		printk(KERN_ERR "could not allocate data integrity bioset\n");
		kfree(buf);
		return -EIO;
		return PTR_ERR(bip);
	}

	bip->bip_flags |= BIP_BLOCK_INTEGRITY;
@@ -465,9 +465,8 @@ int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
	BUG_ON(bip_src == NULL);

	bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt);

	if (bip == NULL)
		return -EIO;
	if (IS_ERR(bip))
		return PTR_ERR(bip);

	memcpy(bip->bip_vec, bip_src->bip_vec,
	       bip_src->bip_vcnt * sizeof(struct bio_vec));
+8 −0
Original line number Diff line number Diff line
@@ -680,6 +680,13 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref)
	wake_up_all(&q->mq_freeze_wq);
}

static void blk_rq_timed_out_timer(unsigned long data)
{
	struct request_queue *q = (struct request_queue *)data;

	kblockd_schedule_work(&q->timeout_work);
}

struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
{
	struct request_queue *q;
@@ -841,6 +848,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
	if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
		goto fail;

	INIT_WORK(&q->timeout_work, blk_timeout_work);
	q->request_fn		= rfn;
	q->prep_rq_fn		= NULL;
	q->unprep_rq_fn		= NULL;
+8 −5
Original line number Diff line number Diff line
@@ -603,8 +603,6 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
			blk_mq_complete_request(rq, -EIO);
		return;
	}
	if (rq->cmd_flags & REQ_NO_TIMEOUT)
		return;

	if (time_after_eq(jiffies, rq->deadline)) {
		if (!blk_mark_rq_complete(rq))
@@ -615,15 +613,19 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
	}
}

static void blk_mq_rq_timer(unsigned long priv)
static void blk_mq_timeout_work(struct work_struct *work)
{
	struct request_queue *q = (struct request_queue *)priv;
	struct request_queue *q =
		container_of(work, struct request_queue, timeout_work);
	struct blk_mq_timeout_data data = {
		.next		= 0,
		.next_set	= 0,
	};
	int i;

	if (blk_queue_enter(q, true))
		return;

	blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);

	if (data.next_set) {
@@ -638,6 +640,7 @@ static void blk_mq_rq_timer(unsigned long priv)
				blk_mq_tag_idle(hctx);
		}
	}
	blk_queue_exit(q);
}

/*
@@ -2008,7 +2011,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
		hctxs[i]->queue_num = i;
	}

	setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
	INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
	blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);

	q->nr_queues = nr_cpu_ids;
+6 −5
Original line number Diff line number Diff line
@@ -127,13 +127,16 @@ static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout
	}
}

void blk_rq_timed_out_timer(unsigned long data)
void blk_timeout_work(struct work_struct *work)
{
	struct request_queue *q = (struct request_queue *) data;
	struct request_queue *q =
		container_of(work, struct request_queue, timeout_work);
	unsigned long flags, next = 0;
	struct request *rq, *tmp;
	int next_set = 0;

	if (blk_queue_enter(q, true))
		return;
	spin_lock_irqsave(q->queue_lock, flags);

	list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
@@ -143,6 +146,7 @@ void blk_rq_timed_out_timer(unsigned long data)
		mod_timer(&q->timeout, round_jiffies_up(next));

	spin_unlock_irqrestore(q->queue_lock, flags);
	blk_queue_exit(q);
}

/**
@@ -193,9 +197,6 @@ void blk_add_timer(struct request *req)
	struct request_queue *q = req->q;
	unsigned long expiry;

	if (req->cmd_flags & REQ_NO_TIMEOUT)
		return;

	/* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */
	if (!q->mq_ops && !q->rq_timed_out_fn)
		return;
+1 −1
Original line number Diff line number Diff line
@@ -93,7 +93,7 @@ static inline void blk_flush_integrity(void)
}
#endif

void blk_rq_timed_out_timer(unsigned long data);
void blk_timeout_work(struct work_struct *work);
unsigned long blk_rq_timeout(unsigned long timeout);
void blk_add_timer(struct request *req);
void blk_delete_timer(struct request *);
Loading