Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 94d26bfc authored by Sebastian Ott's avatar Sebastian Ott Committed by Martin Schwidefsky
Browse files

s390/scm: remove cluster option



Remove CONFIG_SCM_BLOCK_CLUSTER_WRITE and related code. This quirk is
no longer needed on current hardware.

Signed-off-by: default avatarSebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent d12a3d60
Loading
Loading
Loading
Loading
+0 −7
Original line number Diff line number Diff line
@@ -82,10 +82,3 @@ config SCM_BLOCK

	  To compile this driver as a module, choose M here: the
	  module will be called scm_block.

config SCM_BLOCK_CLUSTER_WRITE
	def_bool y
	prompt "SCM force cluster writes"
	depends on SCM_BLOCK
	help
	  Force writes to Storage Class Memory (SCM) to be in done in clusters.
+0 −3
Original line number Diff line number Diff line
@@ -19,7 +19,4 @@ obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o
obj-$(CONFIG_DCSSBLK) += dcssblk.o

scm_block-objs := scm_drv.o scm_blk.o
ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
scm_block-objs += scm_blk_cluster.o
endif
obj-$(CONFIG_SCM_BLOCK) += scm_block.o
+3 −42
Original line number Diff line number Diff line
@@ -42,7 +42,6 @@ static void __scm_free_rq(struct scm_request *scmrq)
	struct aob_rq_header *aobrq = to_aobrq(scmrq);

	free_page((unsigned long) scmrq->aob);
	__scm_free_rq_cluster(scmrq);
	kfree(scmrq->request);
	kfree(aobrq);
}
@@ -82,9 +81,6 @@ static int __scm_alloc_rq(void)
	if (!scmrq->request)
		goto free;

	if (__scm_alloc_rq_cluster(scmrq))
		goto free;

	INIT_LIST_HEAD(&scmrq->list);
	spin_lock_irq(&list_lock);
	list_add(&scmrq->list, &inactive_requests);
@@ -234,7 +230,6 @@ static inline void scm_request_init(struct scm_blk_dev *bdev,
	scmrq->error = 0;
	/* We don't use all msbs - place aidaws at the end of the aob page. */
	scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io];
	scm_request_cluster_init(scmrq);
}

static void scm_ensure_queue_restart(struct scm_blk_dev *bdev)
@@ -246,12 +241,11 @@ static void scm_ensure_queue_restart(struct scm_blk_dev *bdev)
	blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY);
}

void scm_request_requeue(struct scm_request *scmrq)
static void scm_request_requeue(struct scm_request *scmrq)
{
	struct scm_blk_dev *bdev = scmrq->bdev;
	int i;

	scm_release_cluster(scmrq);
	for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++)
		blk_requeue_request(bdev->rq, scmrq->request[i]);

@@ -260,12 +254,11 @@ void scm_request_requeue(struct scm_request *scmrq)
	scm_ensure_queue_restart(bdev);
}

void scm_request_finish(struct scm_request *scmrq)
static void scm_request_finish(struct scm_request *scmrq)
{
	struct scm_blk_dev *bdev = scmrq->bdev;
	int i;

	scm_release_cluster(scmrq);
	for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++)
		blk_end_request_all(scmrq->request[i], scmrq->error);

@@ -313,31 +306,6 @@ static void scm_blk_request(struct request_queue *rq)
		}
		scm_request_set(scmrq, req);

		if (!scm_reserve_cluster(scmrq)) {
			SCM_LOG(5, "cluster busy");
			scm_request_set(scmrq, NULL);
			if (scmrq->aob->request.msb_count)
				goto out;

			scm_request_done(scmrq);
			return;
		}

		if (scm_need_cluster_request(scmrq)) {
			if (scmrq->aob->request.msb_count) {
				/* Start cluster requests separately. */
				scm_request_set(scmrq, NULL);
				if (scm_request_start(scmrq))
					return;
			} else {
				atomic_inc(&bdev->queued_reqs);
				blk_start_request(req);
				scm_initiate_cluster_request(scmrq);
			}
			scmrq = NULL;
			continue;
		}

		if (scm_request_prepare(scmrq)) {
			SCM_LOG(5, "aidaw alloc failed");
			scm_request_set(scmrq, NULL);
@@ -444,12 +412,6 @@ static void scm_blk_tasklet(struct scm_blk_dev *bdev)
			continue;
		}

		if (scm_test_cluster_request(scmrq)) {
			scm_cluster_request_irq(scmrq);
			spin_lock_irqsave(&bdev->lock, flags);
			continue;
		}

		scm_request_finish(scmrq);
		spin_lock_irqsave(&bdev->lock, flags);
	}
@@ -498,7 +460,6 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
	blk_queue_max_segments(rq, nr_max_blk);
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq);
	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, rq);
	scm_blk_dev_cluster_setup(bdev);

	bdev->gendisk = alloc_disk(SCM_NR_PARTS);
	if (!bdev->gendisk)
@@ -558,7 +519,7 @@ static bool __init scm_blk_params_valid(void)
	if (!nr_requests_per_io || nr_requests_per_io > 64)
		return false;

	return scm_cluster_size_valid();
	return true;
}

static int __init scm_blk_init(void)
+0 −54
Original line number Diff line number Diff line
@@ -23,9 +23,6 @@ struct scm_blk_dev {
	atomic_t queued_reqs;
	enum {SCM_OPER, SCM_WR_PROHIBIT} state;
	struct list_head finished_requests;
#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
	struct list_head cluster_list;
#endif
};

struct scm_request {
@@ -36,13 +33,6 @@ struct scm_request {
	struct list_head list;
	u8 retries;
	int error;
#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
	struct {
		enum {CLUSTER_NONE, CLUSTER_READ, CLUSTER_WRITE} state;
		struct list_head list;
		void **buf;
	} cluster;
#endif
};

#define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data)
@@ -52,55 +42,11 @@ void scm_blk_dev_cleanup(struct scm_blk_dev *);
void scm_blk_set_available(struct scm_blk_dev *);
void scm_blk_irq(struct scm_device *, void *, int);

void scm_request_finish(struct scm_request *);
void scm_request_requeue(struct scm_request *);

struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes);

int scm_drv_init(void);
void scm_drv_cleanup(void);

#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
void __scm_free_rq_cluster(struct scm_request *);
int __scm_alloc_rq_cluster(struct scm_request *);
void scm_request_cluster_init(struct scm_request *);
bool scm_reserve_cluster(struct scm_request *);
void scm_release_cluster(struct scm_request *);
void scm_blk_dev_cluster_setup(struct scm_blk_dev *);
bool scm_need_cluster_request(struct scm_request *);
void scm_initiate_cluster_request(struct scm_request *);
void scm_cluster_request_irq(struct scm_request *);
bool scm_test_cluster_request(struct scm_request *);
bool scm_cluster_size_valid(void);
#else /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */
static inline void __scm_free_rq_cluster(struct scm_request *scmrq) {}
static inline int __scm_alloc_rq_cluster(struct scm_request *scmrq)
{
	return 0;
}
static inline void scm_request_cluster_init(struct scm_request *scmrq) {}
static inline bool scm_reserve_cluster(struct scm_request *scmrq)
{
	return true;
}
static inline void scm_release_cluster(struct scm_request *scmrq) {}
static inline void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev) {}
static inline bool scm_need_cluster_request(struct scm_request *scmrq)
{
	return false;
}
static inline void scm_initiate_cluster_request(struct scm_request *scmrq) {}
static inline void scm_cluster_request_irq(struct scm_request *scmrq) {}
static inline bool scm_test_cluster_request(struct scm_request *scmrq)
{
	return false;
}
static inline bool scm_cluster_size_valid(void)
{
	return true;
}
#endif /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */

extern debug_info_t *scm_debug;

#define SCM_LOG(imp, txt) do {					\
+0 −255
Original line number Diff line number Diff line
/*
 * Block driver for s390 storage class memory.
 *
 * Copyright IBM Corp. 2012
 * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
 */

#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/genhd.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <asm/eadm.h>
#include "scm_blk.h"

static unsigned int write_cluster_size = 64;
module_param(write_cluster_size, uint, S_IRUGO);
MODULE_PARM_DESC(write_cluster_size,
		 "Number of pages used for contiguous writes.");

#define CLUSTER_SIZE (write_cluster_size * PAGE_SIZE)

void __scm_free_rq_cluster(struct scm_request *scmrq)
{
	int i;

	if (!scmrq->cluster.buf)
		return;

	for (i = 0; i < 2 * write_cluster_size; i++)
		free_page((unsigned long) scmrq->cluster.buf[i]);

	kfree(scmrq->cluster.buf);
}

int __scm_alloc_rq_cluster(struct scm_request *scmrq)
{
	int i;

	scmrq->cluster.buf = kzalloc(sizeof(void *) * 2 * write_cluster_size,
				 GFP_KERNEL);
	if (!scmrq->cluster.buf)
		return -ENOMEM;

	for (i = 0; i < 2 * write_cluster_size; i++) {
		scmrq->cluster.buf[i] = (void *) get_zeroed_page(GFP_DMA);
		if (!scmrq->cluster.buf[i])
			return -ENOMEM;
	}
	INIT_LIST_HEAD(&scmrq->cluster.list);
	return 0;
}

void scm_request_cluster_init(struct scm_request *scmrq)
{
	scmrq->cluster.state = CLUSTER_NONE;
}

static bool clusters_intersect(struct request *A, struct request *B)
{
	unsigned long firstA, lastA, firstB, lastB;

	firstA = ((u64) blk_rq_pos(A) << 9) / CLUSTER_SIZE;
	lastA = (((u64) blk_rq_pos(A) << 9) +
		    blk_rq_bytes(A) - 1) / CLUSTER_SIZE;

	firstB = ((u64) blk_rq_pos(B) << 9) / CLUSTER_SIZE;
	lastB = (((u64) blk_rq_pos(B) << 9) +
		    blk_rq_bytes(B) - 1) / CLUSTER_SIZE;

	return (firstB <= lastA && firstA <= lastB);
}

bool scm_reserve_cluster(struct scm_request *scmrq)
{
	struct request *req = scmrq->request[scmrq->aob->request.msb_count];
	struct scm_blk_dev *bdev = scmrq->bdev;
	struct scm_request *iter;
	int pos, add = 1;

	if (write_cluster_size == 0)
		return true;

	spin_lock(&bdev->lock);
	list_for_each_entry(iter, &bdev->cluster_list, cluster.list) {
		if (iter == scmrq) {
			/*
			 * We don't have to use clusters_intersect here, since
			 * cluster requests are always started separately.
			 */
			add = 0;
			continue;
		}
		for (pos = 0; pos < iter->aob->request.msb_count; pos++) {
			if (clusters_intersect(req, iter->request[pos]) &&
			    (rq_data_dir(req) == WRITE ||
			     rq_data_dir(iter->request[pos]) == WRITE)) {
				spin_unlock(&bdev->lock);
				return false;
			}
		}
	}
	if (add)
		list_add(&scmrq->cluster.list, &bdev->cluster_list);
	spin_unlock(&bdev->lock);

	return true;
}

void scm_release_cluster(struct scm_request *scmrq)
{
	struct scm_blk_dev *bdev = scmrq->bdev;
	unsigned long flags;

	if (write_cluster_size == 0)
		return;

	spin_lock_irqsave(&bdev->lock, flags);
	list_del(&scmrq->cluster.list);
	spin_unlock_irqrestore(&bdev->lock, flags);
}

void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev)
{
	INIT_LIST_HEAD(&bdev->cluster_list);
	blk_queue_io_opt(bdev->rq, CLUSTER_SIZE);
}

static int scm_prepare_cluster_request(struct scm_request *scmrq)
{
	struct scm_blk_dev *bdev = scmrq->bdev;
	struct scm_device *scmdev = bdev->gendisk->private_data;
	struct request *req = scmrq->request[0];
	struct msb *msb = &scmrq->aob->msb[0];
	struct req_iterator iter;
	struct aidaw *aidaw;
	struct bio_vec bv;
	int i = 0;
	u64 addr;

	switch (scmrq->cluster.state) {
	case CLUSTER_NONE:
		scmrq->cluster.state = CLUSTER_READ;
		/* fall through */
	case CLUSTER_READ:
		msb->bs = MSB_BS_4K;
		msb->oc = MSB_OC_READ;
		msb->flags = MSB_FLAG_IDA;
		msb->blk_count = write_cluster_size;

		addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
		msb->scm_addr = round_down(addr, CLUSTER_SIZE);

		if (msb->scm_addr !=
		    round_down(addr + (u64) blk_rq_bytes(req) - 1,
			       CLUSTER_SIZE))
			msb->blk_count = 2 * write_cluster_size;

		aidaw = scm_aidaw_fetch(scmrq, msb->blk_count * PAGE_SIZE);
		if (!aidaw)
			return -ENOMEM;

		scmrq->aob->request.msb_count = 1;
		msb->data_addr = (u64) aidaw;
		for (i = 0; i < msb->blk_count; i++) {
			aidaw->data_addr = (u64) scmrq->cluster.buf[i];
			aidaw++;
		}

		break;
	case CLUSTER_WRITE:
		aidaw = (void *) msb->data_addr;
		msb->oc = MSB_OC_WRITE;

		for (addr = msb->scm_addr;
		     addr < scmdev->address + ((u64) blk_rq_pos(req) << 9);
		     addr += PAGE_SIZE) {
			aidaw->data_addr = (u64) scmrq->cluster.buf[i];
			aidaw++;
			i++;
		}
		rq_for_each_segment(bv, req, iter) {
			aidaw->data_addr = (u64) page_address(bv.bv_page);
			aidaw++;
			i++;
		}
		for (; i < msb->blk_count; i++) {
			aidaw->data_addr = (u64) scmrq->cluster.buf[i];
			aidaw++;
		}
		break;
	}
	return 0;
}

bool scm_need_cluster_request(struct scm_request *scmrq)
{
	int pos = scmrq->aob->request.msb_count;

	if (rq_data_dir(scmrq->request[pos]) == READ)
		return false;

	return blk_rq_bytes(scmrq->request[pos]) < CLUSTER_SIZE;
}

/* Called with queue lock held. */
void scm_initiate_cluster_request(struct scm_request *scmrq)
{
	if (scm_prepare_cluster_request(scmrq))
		goto requeue;
	if (eadm_start_aob(scmrq->aob))
		goto requeue;
	return;
requeue:
	scm_request_requeue(scmrq);
}

bool scm_test_cluster_request(struct scm_request *scmrq)
{
	return scmrq->cluster.state != CLUSTER_NONE;
}

void scm_cluster_request_irq(struct scm_request *scmrq)
{
	struct scm_blk_dev *bdev = scmrq->bdev;
	unsigned long flags;

	switch (scmrq->cluster.state) {
	case CLUSTER_NONE:
		BUG();
		break;
	case CLUSTER_READ:
		if (scmrq->error) {
			scm_request_finish(scmrq);
			break;
		}
		scmrq->cluster.state = CLUSTER_WRITE;
		spin_lock_irqsave(&bdev->rq_lock, flags);
		scm_initiate_cluster_request(scmrq);
		spin_unlock_irqrestore(&bdev->rq_lock, flags);
		break;
	case CLUSTER_WRITE:
		scm_request_finish(scmrq);
		break;
	}
}

bool scm_cluster_size_valid(void)
{
	if (write_cluster_size == 1 || write_cluster_size > 128)
		return false;

	return !(write_cluster_size & (write_cluster_size - 1));
}