Loading arch/arm64/configs/markw_defconfig +2 −0 Original line number Diff line number Diff line Loading @@ -304,6 +304,8 @@ CONFIG_IOSCHED_CFQ=y # CONFIG_DEFAULT_DEADLINE is not set CONFIG_DEFAULT_CFQ=y # CONFIG_DEFAULT_NOOP is not set CONFIG_IOSCHED_SIOPLUS=y # CONFIG_DEFAULT_SIOPLUS is not set CONFIG_DEFAULT_IOSCHED="cfq" CONFIG_ASN1=y CONFIG_UNINLINE_SPIN_UNLOCK=y Loading block/Kconfig.iosched +14 −0 Original line number Diff line number Diff line Loading @@ -50,6 +50,16 @@ config CFQ_GROUP_IOSCHED ---help--- Enable group IO scheduling in CFQ. config IOSCHED_SIOPLUS tristate "Simple I/O scheduler plus" ---help--- default y The Simple I/O scheduler is an extremely simple scheduler, ensure fairness. The algorithm does not do any sorting but based on noop and deadline, that relies on deadlines to basic merging, trying to keep a minimum overhead. It is aimed mainly for aleatory access devices (eg: flash devices). choice prompt "Default I/O scheduler" default DEFAULT_CFQ Loading @@ -66,6 +76,9 @@ choice config DEFAULT_NOOP bool "No-op" config DEFAULT_SIOPLUS bool "SIOPLUS" if IOSCHED_SIOPLUS=y endchoice config DEFAULT_IOSCHED Loading @@ -73,6 +86,7 @@ config DEFAULT_IOSCHED default "deadline" if DEFAULT_DEADLINE default "cfq" if DEFAULT_CFQ default "noop" if DEFAULT_NOOP default "sioplus" if DEFAULT_SIOPLUS endmenu Loading block/Makefile +1 −0 Original line number Diff line number Diff line Loading @@ -19,6 +19,7 @@ obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o obj-$(CONFIG_IOSCHED_TEST) += test-iosched.o obj-$(CONFIG_IOSCHED_SIOPLUS) += sioplus-iosched.o obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o Loading block/sioplus-iosched.c 0 → 100644 +419 −0 Original line number Diff line number Diff line /* * Simple IO scheduler plus * Based on Noop, Deadline and V(R) IO schedulers. * * Copyright (C) 2012 Miguel Boton <mboton@gmail.com> * (C) 2013, 2014 Boy Petersen <boypetersen@gmail.com> * * This algorithm does not do any kind of sorting, as it is aimed for * aleatory access devices, but it does some basic merging. We try to * keep minimum overhead to achieve low latency. * * Asynchronous and synchronous requests are not treated separately, but * we relay on deadlines to ensure fairness. * * The plus version incorporates several fixes and logic improvements. * */ #include <linux/blkdev.h> #include <linux/elevator.h> #include <linux/bio.h> #include <linux/module.h> #include <linux/init.h> #include <linux/version.h> #include <linux/slab.h> enum { ASYNC, SYNC }; /* Tunables */ static const int sync_read_expire = (HZ / 4); /* max time before a sync read is submitted. */ static const int sync_write_expire = (HZ / 4) * 5; /* max time before a sync write is submitted. */ static const int async_read_expire = (HZ / 2); /* ditto for async, these limits are SOFT! */ static const int async_write_expire = (HZ * 2); /* ditto for async, these limits are SOFT! */ static const int writes_starved = 1; /* max times reads can starve a write */ static const int fifo_batch = 3; /* # of sequential requests treated as one by the above parameters. For throughput. */ /* Elevator data */ struct sio_data { /* Request queues */ struct list_head fifo_list[2][2]; /* Attributes */ unsigned int batched; unsigned int starved; /* Settings */ int fifo_expire[2][2]; int fifo_batch; int writes_starved; }; static void sio_merged_requests(struct request_queue *q, struct request *rq, struct request *next) { /* * If next expires before rq, assign its expire time to rq * and move into next position (next will be deleted) in fifo. */ if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist)) { if (time_before(next->fifo_time, rq->fifo_time)) { list_move(&rq->queuelist, &next->queuelist); rq->fifo_time = next->fifo_time; } } /* Delete next request */ rq_fifo_clear(next); } static void sio_add_request(struct request_queue *q, struct request *rq) { struct sio_data *sd = q->elevator->elevator_data; const int sync = rq_is_sync(rq); const int data_dir = rq_data_dir(rq); /* * Add request to the proper fifo list and set its * expire time. */ rq->fifo_time = jiffies + sd->fifo_expire[sync][data_dir]; list_add_tail(&rq->queuelist, &sd->fifo_list[sync][data_dir]); } #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,38) static int sio_queue_empty(struct request_queue *q) { struct sio_data *sd = q->elevator->elevator_data; /* Check if fifo lists are empty */ return list_empty(&sd->fifo_list[SYNC][READ]) && list_empty(&sd->fifo_list[SYNC][WRITE]) && list_empty(&sd->fifo_list[ASYNC][READ]) && list_empty(&sd->fifo_list[ASYNC][WRITE]); } #endif static struct request * sio_expired_request(struct sio_data *sd, int sync, int data_dir) { struct list_head *list = &sd->fifo_list[sync][data_dir]; struct request *rq; if (list_empty(list)) return NULL; /* Retrieve request */ rq = rq_entry_fifo(list->next); /* Request has expired */ if (time_after_eq(jiffies, rq->fifo_time)) return rq; return NULL; } static struct request * sio_choose_expired_request(struct sio_data *sd) { struct request *rq; /* Reset (non-expired-)batch-counter */ sd->batched = 0; /* * Check expired requests. * Asynchronous requests have priority over synchronous. * Write requests have priority over read. */ rq = sio_expired_request(sd, ASYNC, WRITE); if (rq) return rq; rq = sio_expired_request(sd, ASYNC, READ); if (rq) return rq; rq = sio_expired_request(sd, SYNC, WRITE); if (rq) return rq; rq = sio_expired_request(sd, SYNC, READ); if (rq) return rq; return NULL; } static struct request * sio_choose_request(struct sio_data *sd, int data_dir) { struct list_head *sync = sd->fifo_list[SYNC]; struct list_head *async = sd->fifo_list[ASYNC]; /* Increase (non-expired-)batch-counter */ sd->batched++; /* * Retrieve request from available fifo list. * Synchronous requests have priority over asynchronous. * Read requests have priority over write. */ if (!list_empty(&sync[data_dir])) return rq_entry_fifo(sync[data_dir].next); if (!list_empty(&async[data_dir])) return rq_entry_fifo(async[data_dir].next); if (!list_empty(&sync[!data_dir])) return rq_entry_fifo(sync[!data_dir].next); if (!list_empty(&async[!data_dir])) return rq_entry_fifo(async[!data_dir].next); return NULL; } static inline void sio_dispatch_request(struct sio_data *sd, struct request *rq) { /* * Remove the request from the fifo list * and dispatch it. */ rq_fifo_clear(rq); elv_dispatch_add_tail(rq->q, rq); if (rq_data_dir(rq)) { sd->starved = 0; } else { if (!list_empty(&sd->fifo_list[SYNC][WRITE]) || !list_empty(&sd->fifo_list[ASYNC][WRITE])) sd->starved++; } } static int sio_dispatch_requests(struct request_queue *q, int force) { struct sio_data *sd = q->elevator->elevator_data; struct request *rq = NULL; int data_dir = READ; /* * Retrieve any expired request after a batch of * sequential requests. */ if (sd->batched >= sd->fifo_batch) rq = sio_choose_expired_request(sd); /* Retrieve request */ if (!rq) { if (sd->starved >= sd->writes_starved) data_dir = WRITE; rq = sio_choose_request(sd, data_dir); if (!rq) return 0; } /* Dispatch request */ sio_dispatch_request(sd, rq); return 1; } static struct request * sio_former_request(struct request_queue *q, struct request *rq) { struct sio_data *sd = q->elevator->elevator_data; const int sync = rq_is_sync(rq); const int data_dir = rq_data_dir(rq); if (rq->queuelist.prev == &sd->fifo_list[sync][data_dir]) return NULL; /* Return former request */ return list_entry(rq->queuelist.prev, struct request, queuelist); } static struct request * sio_latter_request(struct request_queue *q, struct request *rq) { struct sio_data *sd = q->elevator->elevator_data; const int sync = rq_is_sync(rq); const int data_dir = rq_data_dir(rq); if (rq->queuelist.next == &sd->fifo_list[sync][data_dir]) return NULL; /* Return latter request */ return list_entry(rq->queuelist.next, struct request, queuelist); } static int sio_init_queue(struct request_queue *q, struct elevator_type *e) { struct sio_data *sd; struct elevator_queue *eq; eq = elevator_alloc(q, e); if (eq == NULL) return -ENOMEM; /* Allocate structure */ sd = kmalloc_node(sizeof(*sd), GFP_KERNEL, q->node); if (sd == NULL) { kobject_put(&eq->kobj); return -ENOMEM; } eq->elevator_data = sd; spin_lock_irq(q->queue_lock); q->elevator = eq; spin_unlock_irq(q->queue_lock); /* Initialize fifo lists */ INIT_LIST_HEAD(&sd->fifo_list[SYNC][READ]); INIT_LIST_HEAD(&sd->fifo_list[SYNC][WRITE]); INIT_LIST_HEAD(&sd->fifo_list[ASYNC][READ]); INIT_LIST_HEAD(&sd->fifo_list[ASYNC][WRITE]); /* Initialize data */ sd->batched = 0; sd->fifo_expire[SYNC][READ] = sync_read_expire; sd->fifo_expire[SYNC][WRITE] = sync_write_expire; sd->fifo_expire[ASYNC][READ] = async_read_expire; sd->fifo_expire[ASYNC][WRITE] = async_write_expire; sd->fifo_batch = fifo_batch; return 0; } static void sio_exit_queue(struct elevator_queue *e) { struct sio_data *sd = e->elevator_data; BUG_ON(!list_empty(&sd->fifo_list[SYNC][READ])); BUG_ON(!list_empty(&sd->fifo_list[SYNC][WRITE])); BUG_ON(!list_empty(&sd->fifo_list[ASYNC][READ])); BUG_ON(!list_empty(&sd->fifo_list[ASYNC][WRITE])); /* Free structure */ kfree(sd); } /* * sysfs code */ static ssize_t sio_var_show(int var, char *page) { return sprintf(page, "%d\n", var); } static ssize_t sio_var_store(int *var, const char *page, size_t count) { char *p = (char *) page; *var = simple_strtol(p, &p, 10); return count; } #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ static ssize_t __FUNC(struct elevator_queue *e, char *page) \ { \ struct sio_data *sd = e->elevator_data; \ int __data = __VAR; \ if (__CONV) \ __data = jiffies_to_msecs(__data); \ return sio_var_show(__data, (page)); \ } SHOW_FUNCTION(sio_sync_read_expire_show, sd->fifo_expire[SYNC][READ], 1); SHOW_FUNCTION(sio_sync_write_expire_show, sd->fifo_expire[SYNC][WRITE], 1); SHOW_FUNCTION(sio_async_read_expire_show, sd->fifo_expire[ASYNC][READ], 1); SHOW_FUNCTION(sio_async_write_expire_show, sd->fifo_expire[ASYNC][WRITE], 1); SHOW_FUNCTION(sio_fifo_batch_show, sd->fifo_batch, 0); SHOW_FUNCTION(sio_writes_starved_show, sd->writes_starved, 0); #undef SHOW_FUNCTION #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ { \ struct sio_data *sd = e->elevator_data; \ int __data; \ int ret = sio_var_store(&__data, (page), count); \ if (__data < (MIN)) \ __data = (MIN); \ else if (__data > (MAX)) \ __data = (MAX); \ if (__CONV) \ *(__PTR) = msecs_to_jiffies(__data); \ else \ *(__PTR) = __data; \ return ret; \ } STORE_FUNCTION(sio_sync_read_expire_store, &sd->fifo_expire[SYNC][READ], 0, INT_MAX, 1); STORE_FUNCTION(sio_sync_write_expire_store, &sd->fifo_expire[SYNC][WRITE], 0, INT_MAX, 1); STORE_FUNCTION(sio_async_read_expire_store, &sd->fifo_expire[ASYNC][READ], 0, INT_MAX, 1); STORE_FUNCTION(sio_async_write_expire_store, &sd->fifo_expire[ASYNC][WRITE], 0, INT_MAX, 1); STORE_FUNCTION(sio_fifo_batch_store, &sd->fifo_batch, 1, INT_MAX, 0); STORE_FUNCTION(sio_writes_starved_store, &sd->writes_starved, 1, INT_MAX, 0); #undef STORE_FUNCTION #define DD_ATTR(name) \ __ATTR(name, S_IRUGO|S_IWUSR, sio_##name##_show, \ sio_##name##_store) static struct elv_fs_entry sio_attrs[] = { DD_ATTR(sync_read_expire), DD_ATTR(sync_write_expire), DD_ATTR(async_read_expire), DD_ATTR(async_write_expire), DD_ATTR(fifo_batch), DD_ATTR(writes_starved), __ATTR_NULL }; static struct elevator_type iosched_sioplus = { .ops = { .elevator_merge_req_fn = sio_merged_requests, .elevator_dispatch_fn = sio_dispatch_requests, .elevator_add_req_fn = sio_add_request, #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,38) .elevator_queue_empty_fn = sio_queue_empty, #endif .elevator_former_req_fn = sio_former_request, .elevator_latter_req_fn = sio_latter_request, .elevator_init_fn = sio_init_queue, .elevator_exit_fn = sio_exit_queue, }, .elevator_attrs = sio_attrs, .elevator_name = "sioplus", .elevator_owner = THIS_MODULE, }; static int __init sioplus_init(void) { /* Register elevator */ elv_register(&iosched_sioplus); return 0; } static void __exit sioplus_exit(void) { /* Unregister elevator */ elv_unregister(&iosched_sioplus); } module_init(sioplus_init); module_exit(sioplus_exit); MODULE_AUTHOR("Miguel Boton"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Simple IO scheduler plus"); Loading
arch/arm64/configs/markw_defconfig +2 −0 Original line number Diff line number Diff line Loading @@ -304,6 +304,8 @@ CONFIG_IOSCHED_CFQ=y # CONFIG_DEFAULT_DEADLINE is not set CONFIG_DEFAULT_CFQ=y # CONFIG_DEFAULT_NOOP is not set CONFIG_IOSCHED_SIOPLUS=y # CONFIG_DEFAULT_SIOPLUS is not set CONFIG_DEFAULT_IOSCHED="cfq" CONFIG_ASN1=y CONFIG_UNINLINE_SPIN_UNLOCK=y Loading
block/Kconfig.iosched +14 −0 Original line number Diff line number Diff line Loading @@ -50,6 +50,16 @@ config CFQ_GROUP_IOSCHED ---help--- Enable group IO scheduling in CFQ. config IOSCHED_SIOPLUS tristate "Simple I/O scheduler plus" ---help--- default y The Simple I/O scheduler is an extremely simple scheduler, ensure fairness. The algorithm does not do any sorting but based on noop and deadline, that relies on deadlines to basic merging, trying to keep a minimum overhead. It is aimed mainly for aleatory access devices (eg: flash devices). choice prompt "Default I/O scheduler" default DEFAULT_CFQ Loading @@ -66,6 +76,9 @@ choice config DEFAULT_NOOP bool "No-op" config DEFAULT_SIOPLUS bool "SIOPLUS" if IOSCHED_SIOPLUS=y endchoice config DEFAULT_IOSCHED Loading @@ -73,6 +86,7 @@ config DEFAULT_IOSCHED default "deadline" if DEFAULT_DEADLINE default "cfq" if DEFAULT_CFQ default "noop" if DEFAULT_NOOP default "sioplus" if DEFAULT_SIOPLUS endmenu Loading
block/Makefile +1 −0 Original line number Diff line number Diff line Loading @@ -19,6 +19,7 @@ obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o obj-$(CONFIG_IOSCHED_TEST) += test-iosched.o obj-$(CONFIG_IOSCHED_SIOPLUS) += sioplus-iosched.o obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o Loading
block/sioplus-iosched.c 0 → 100644 +419 −0 Original line number Diff line number Diff line /* * Simple IO scheduler plus * Based on Noop, Deadline and V(R) IO schedulers. * * Copyright (C) 2012 Miguel Boton <mboton@gmail.com> * (C) 2013, 2014 Boy Petersen <boypetersen@gmail.com> * * This algorithm does not do any kind of sorting, as it is aimed for * aleatory access devices, but it does some basic merging. We try to * keep minimum overhead to achieve low latency. * * Asynchronous and synchronous requests are not treated separately, but * we relay on deadlines to ensure fairness. * * The plus version incorporates several fixes and logic improvements. * */ #include <linux/blkdev.h> #include <linux/elevator.h> #include <linux/bio.h> #include <linux/module.h> #include <linux/init.h> #include <linux/version.h> #include <linux/slab.h> enum { ASYNC, SYNC }; /* Tunables */ static const int sync_read_expire = (HZ / 4); /* max time before a sync read is submitted. */ static const int sync_write_expire = (HZ / 4) * 5; /* max time before a sync write is submitted. */ static const int async_read_expire = (HZ / 2); /* ditto for async, these limits are SOFT! */ static const int async_write_expire = (HZ * 2); /* ditto for async, these limits are SOFT! */ static const int writes_starved = 1; /* max times reads can starve a write */ static const int fifo_batch = 3; /* # of sequential requests treated as one by the above parameters. For throughput. */ /* Elevator data */ struct sio_data { /* Request queues */ struct list_head fifo_list[2][2]; /* Attributes */ unsigned int batched; unsigned int starved; /* Settings */ int fifo_expire[2][2]; int fifo_batch; int writes_starved; }; static void sio_merged_requests(struct request_queue *q, struct request *rq, struct request *next) { /* * If next expires before rq, assign its expire time to rq * and move into next position (next will be deleted) in fifo. */ if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist)) { if (time_before(next->fifo_time, rq->fifo_time)) { list_move(&rq->queuelist, &next->queuelist); rq->fifo_time = next->fifo_time; } } /* Delete next request */ rq_fifo_clear(next); } static void sio_add_request(struct request_queue *q, struct request *rq) { struct sio_data *sd = q->elevator->elevator_data; const int sync = rq_is_sync(rq); const int data_dir = rq_data_dir(rq); /* * Add request to the proper fifo list and set its * expire time. */ rq->fifo_time = jiffies + sd->fifo_expire[sync][data_dir]; list_add_tail(&rq->queuelist, &sd->fifo_list[sync][data_dir]); } #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,38) static int sio_queue_empty(struct request_queue *q) { struct sio_data *sd = q->elevator->elevator_data; /* Check if fifo lists are empty */ return list_empty(&sd->fifo_list[SYNC][READ]) && list_empty(&sd->fifo_list[SYNC][WRITE]) && list_empty(&sd->fifo_list[ASYNC][READ]) && list_empty(&sd->fifo_list[ASYNC][WRITE]); } #endif static struct request * sio_expired_request(struct sio_data *sd, int sync, int data_dir) { struct list_head *list = &sd->fifo_list[sync][data_dir]; struct request *rq; if (list_empty(list)) return NULL; /* Retrieve request */ rq = rq_entry_fifo(list->next); /* Request has expired */ if (time_after_eq(jiffies, rq->fifo_time)) return rq; return NULL; } static struct request * sio_choose_expired_request(struct sio_data *sd) { struct request *rq; /* Reset (non-expired-)batch-counter */ sd->batched = 0; /* * Check expired requests. * Asynchronous requests have priority over synchronous. * Write requests have priority over read. */ rq = sio_expired_request(sd, ASYNC, WRITE); if (rq) return rq; rq = sio_expired_request(sd, ASYNC, READ); if (rq) return rq; rq = sio_expired_request(sd, SYNC, WRITE); if (rq) return rq; rq = sio_expired_request(sd, SYNC, READ); if (rq) return rq; return NULL; } static struct request * sio_choose_request(struct sio_data *sd, int data_dir) { struct list_head *sync = sd->fifo_list[SYNC]; struct list_head *async = sd->fifo_list[ASYNC]; /* Increase (non-expired-)batch-counter */ sd->batched++; /* * Retrieve request from available fifo list. * Synchronous requests have priority over asynchronous. * Read requests have priority over write. */ if (!list_empty(&sync[data_dir])) return rq_entry_fifo(sync[data_dir].next); if (!list_empty(&async[data_dir])) return rq_entry_fifo(async[data_dir].next); if (!list_empty(&sync[!data_dir])) return rq_entry_fifo(sync[!data_dir].next); if (!list_empty(&async[!data_dir])) return rq_entry_fifo(async[!data_dir].next); return NULL; } static inline void sio_dispatch_request(struct sio_data *sd, struct request *rq) { /* * Remove the request from the fifo list * and dispatch it. */ rq_fifo_clear(rq); elv_dispatch_add_tail(rq->q, rq); if (rq_data_dir(rq)) { sd->starved = 0; } else { if (!list_empty(&sd->fifo_list[SYNC][WRITE]) || !list_empty(&sd->fifo_list[ASYNC][WRITE])) sd->starved++; } } static int sio_dispatch_requests(struct request_queue *q, int force) { struct sio_data *sd = q->elevator->elevator_data; struct request *rq = NULL; int data_dir = READ; /* * Retrieve any expired request after a batch of * sequential requests. */ if (sd->batched >= sd->fifo_batch) rq = sio_choose_expired_request(sd); /* Retrieve request */ if (!rq) { if (sd->starved >= sd->writes_starved) data_dir = WRITE; rq = sio_choose_request(sd, data_dir); if (!rq) return 0; } /* Dispatch request */ sio_dispatch_request(sd, rq); return 1; } static struct request * sio_former_request(struct request_queue *q, struct request *rq) { struct sio_data *sd = q->elevator->elevator_data; const int sync = rq_is_sync(rq); const int data_dir = rq_data_dir(rq); if (rq->queuelist.prev == &sd->fifo_list[sync][data_dir]) return NULL; /* Return former request */ return list_entry(rq->queuelist.prev, struct request, queuelist); } static struct request * sio_latter_request(struct request_queue *q, struct request *rq) { struct sio_data *sd = q->elevator->elevator_data; const int sync = rq_is_sync(rq); const int data_dir = rq_data_dir(rq); if (rq->queuelist.next == &sd->fifo_list[sync][data_dir]) return NULL; /* Return latter request */ return list_entry(rq->queuelist.next, struct request, queuelist); } static int sio_init_queue(struct request_queue *q, struct elevator_type *e) { struct sio_data *sd; struct elevator_queue *eq; eq = elevator_alloc(q, e); if (eq == NULL) return -ENOMEM; /* Allocate structure */ sd = kmalloc_node(sizeof(*sd), GFP_KERNEL, q->node); if (sd == NULL) { kobject_put(&eq->kobj); return -ENOMEM; } eq->elevator_data = sd; spin_lock_irq(q->queue_lock); q->elevator = eq; spin_unlock_irq(q->queue_lock); /* Initialize fifo lists */ INIT_LIST_HEAD(&sd->fifo_list[SYNC][READ]); INIT_LIST_HEAD(&sd->fifo_list[SYNC][WRITE]); INIT_LIST_HEAD(&sd->fifo_list[ASYNC][READ]); INIT_LIST_HEAD(&sd->fifo_list[ASYNC][WRITE]); /* Initialize data */ sd->batched = 0; sd->fifo_expire[SYNC][READ] = sync_read_expire; sd->fifo_expire[SYNC][WRITE] = sync_write_expire; sd->fifo_expire[ASYNC][READ] = async_read_expire; sd->fifo_expire[ASYNC][WRITE] = async_write_expire; sd->fifo_batch = fifo_batch; return 0; } static void sio_exit_queue(struct elevator_queue *e) { struct sio_data *sd = e->elevator_data; BUG_ON(!list_empty(&sd->fifo_list[SYNC][READ])); BUG_ON(!list_empty(&sd->fifo_list[SYNC][WRITE])); BUG_ON(!list_empty(&sd->fifo_list[ASYNC][READ])); BUG_ON(!list_empty(&sd->fifo_list[ASYNC][WRITE])); /* Free structure */ kfree(sd); } /* * sysfs code */ static ssize_t sio_var_show(int var, char *page) { return sprintf(page, "%d\n", var); } static ssize_t sio_var_store(int *var, const char *page, size_t count) { char *p = (char *) page; *var = simple_strtol(p, &p, 10); return count; } #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ static ssize_t __FUNC(struct elevator_queue *e, char *page) \ { \ struct sio_data *sd = e->elevator_data; \ int __data = __VAR; \ if (__CONV) \ __data = jiffies_to_msecs(__data); \ return sio_var_show(__data, (page)); \ } SHOW_FUNCTION(sio_sync_read_expire_show, sd->fifo_expire[SYNC][READ], 1); SHOW_FUNCTION(sio_sync_write_expire_show, sd->fifo_expire[SYNC][WRITE], 1); SHOW_FUNCTION(sio_async_read_expire_show, sd->fifo_expire[ASYNC][READ], 1); SHOW_FUNCTION(sio_async_write_expire_show, sd->fifo_expire[ASYNC][WRITE], 1); SHOW_FUNCTION(sio_fifo_batch_show, sd->fifo_batch, 0); SHOW_FUNCTION(sio_writes_starved_show, sd->writes_starved, 0); #undef SHOW_FUNCTION #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ { \ struct sio_data *sd = e->elevator_data; \ int __data; \ int ret = sio_var_store(&__data, (page), count); \ if (__data < (MIN)) \ __data = (MIN); \ else if (__data > (MAX)) \ __data = (MAX); \ if (__CONV) \ *(__PTR) = msecs_to_jiffies(__data); \ else \ *(__PTR) = __data; \ return ret; \ } STORE_FUNCTION(sio_sync_read_expire_store, &sd->fifo_expire[SYNC][READ], 0, INT_MAX, 1); STORE_FUNCTION(sio_sync_write_expire_store, &sd->fifo_expire[SYNC][WRITE], 0, INT_MAX, 1); STORE_FUNCTION(sio_async_read_expire_store, &sd->fifo_expire[ASYNC][READ], 0, INT_MAX, 1); STORE_FUNCTION(sio_async_write_expire_store, &sd->fifo_expire[ASYNC][WRITE], 0, INT_MAX, 1); STORE_FUNCTION(sio_fifo_batch_store, &sd->fifo_batch, 1, INT_MAX, 0); STORE_FUNCTION(sio_writes_starved_store, &sd->writes_starved, 1, INT_MAX, 0); #undef STORE_FUNCTION #define DD_ATTR(name) \ __ATTR(name, S_IRUGO|S_IWUSR, sio_##name##_show, \ sio_##name##_store) static struct elv_fs_entry sio_attrs[] = { DD_ATTR(sync_read_expire), DD_ATTR(sync_write_expire), DD_ATTR(async_read_expire), DD_ATTR(async_write_expire), DD_ATTR(fifo_batch), DD_ATTR(writes_starved), __ATTR_NULL }; static struct elevator_type iosched_sioplus = { .ops = { .elevator_merge_req_fn = sio_merged_requests, .elevator_dispatch_fn = sio_dispatch_requests, .elevator_add_req_fn = sio_add_request, #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,38) .elevator_queue_empty_fn = sio_queue_empty, #endif .elevator_former_req_fn = sio_former_request, .elevator_latter_req_fn = sio_latter_request, .elevator_init_fn = sio_init_queue, .elevator_exit_fn = sio_exit_queue, }, .elevator_attrs = sio_attrs, .elevator_name = "sioplus", .elevator_owner = THIS_MODULE, }; static int __init sioplus_init(void) { /* Register elevator */ elv_register(&iosched_sioplus); return 0; } static void __exit sioplus_exit(void) { /* Unregister elevator */ elv_unregister(&iosched_sioplus); } module_init(sioplus_init); module_exit(sioplus_exit); MODULE_AUTHOR("Miguel Boton"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Simple IO scheduler plus");