Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit da2c0b02 authored by Tejun Heo's avatar Tejun Heo
Browse files

Merge branch 'master' into tj-percpu

parents 795f99b6 33bfad54
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -954,14 +954,14 @@ elevator_allow_merge_fn called whenever the block layer determines
				results in some sort of conflict internally,
				this hook allows it to do that.

elevator_dispatch_fn		fills the dispatch queue with ready requests.
elevator_dispatch_fn*		fills the dispatch queue with ready requests.
				I/O schedulers are free to postpone requests by
				not filling the dispatch queue unless @force
				is non-zero.  Once dispatched, I/O schedulers
				are not allowed to manipulate the requests -
				they belong to generic dispatch queue.

elevator_add_req_fn		called to add a new request into the scheduler
elevator_add_req_fn*		called to add a new request into the scheduler

elevator_queue_empty_fn		returns true if the merge queue is empty.
				Drivers shouldn't use this, but rather check
@@ -991,7 +991,7 @@ elevator_activate_req_fn Called when device driver first sees a request.
elevator_deactivate_req_fn	Called when device driver decides to delay
				a request by requeueing it.

elevator_init_fn
elevator_init_fn*
elevator_exit_fn		Allocate and free any elevator specific storage
				for a queue.

+6 −19
Original line number Diff line number Diff line
@@ -2,13 +2,13 @@
IP-Aliasing:
============

IP-aliases are additional IP-addresses/masks hooked up to a base 
interface by adding a colon and a string when running ifconfig. 
This string is usually numeric, but this is not a must.

IP-Aliases are avail if CONFIG_INET (`standard' IPv4 networking) 
is configured in the kernel.
IP-aliases are an obsolete way to manage multiple IP-addresses/masks
per interface. Newer tools such as iproute2 support multiple
address/prefixes per interface, but aliases are still supported
for backwards compatibility.

An alias is formed by adding a colon and a string when running ifconfig.
This string is usually numeric, but this is not a must.

o Alias creation.
  Alias creation is done by 'magic' interface naming: eg. to create a
@@ -38,16 +38,3 @@ o Relationship with main device

  If the base device is shut down the added aliases will be deleted 
  too.


Contact
-------
Please finger or e-mail me:
   Juan Jose Ciarlante <jjciarla@raiz.uncu.edu.ar>

Updated by Erik Schoenfelder <schoenfr@gaertner.DE>

; local variables:
; mode: indented-text
; mode: auto-fill
; end:
+0 −2
Original line number Diff line number Diff line
@@ -2836,8 +2836,6 @@ S: Maintained
MAC80211
P:	Johannes Berg
M:	johannes@sipsolutions.net
P:	Michael Wu
M:	flamingice@sourmilk.net
L:	linux-wireless@vger.kernel.org
W:	http://linuxwireless.org/
T:	git kernel.org:/pub/scm/linux/kernel/git/linville/wireless-2.6.git
+1 −1
Original line number Diff line number Diff line
@@ -302,7 +302,7 @@ static void bio_end_empty_barrier(struct bio *bio, int err)
 * Description:
 *    Issue a flush for the block device in question. Caller can supply
 *    room for storing the error offset in case of a flush error, if they
 *    wish to.  Caller must run wait_for_completion() on its own.
 *    wish to.
 */
int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
{
+63 −37
Original line number Diff line number Diff line
@@ -64,11 +64,12 @@ static struct workqueue_struct *kblockd_workqueue;

static void drive_stat_acct(struct request *rq, int new_io)
{
	struct gendisk *disk = rq->rq_disk;
	struct hd_struct *part;
	int rw = rq_data_dir(rq);
	int cpu;

	if (!blk_fs_request(rq) || !rq->rq_disk)
	if (!blk_fs_request(rq) || !disk || !blk_queue_io_stat(disk->queue))
		return;

	cpu = part_stat_lock();
@@ -599,8 +600,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
	q->request_fn		= rfn;
	q->prep_rq_fn		= NULL;
	q->unplug_fn		= generic_unplug_device;
	q->queue_flags		= (1 << QUEUE_FLAG_CLUSTER |
				   1 << QUEUE_FLAG_STACKABLE);
	q->queue_flags		= QUEUE_FLAG_DEFAULT;
	q->queue_lock		= lock;

	blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
@@ -1125,6 +1125,8 @@ void init_request_from_bio(struct request *req, struct bio *bio)

	if (bio_sync(bio))
		req->cmd_flags |= REQ_RW_SYNC;
	if (bio_unplug(bio))
		req->cmd_flags |= REQ_UNPLUG;
	if (bio_rw_meta(bio))
		req->cmd_flags |= REQ_RW_META;

@@ -1141,6 +1143,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
	int el_ret, nr_sectors;
	const unsigned short prio = bio_prio(bio);
	const int sync = bio_sync(bio);
	const int unplug = bio_unplug(bio);
	int rw_flags;

	nr_sectors = bio_sectors(bio);
@@ -1244,7 +1247,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
		blk_plug_device(q);
	add_request(q, req);
out:
	if (sync || blk_queue_nonrot(q))
	if (unplug || blk_queue_nonrot(q))
		__generic_unplug_device(q);
	spin_unlock_irq(q->queue_lock);
	return 0;
@@ -1448,6 +1451,11 @@ static inline void __generic_make_request(struct bio *bio)
			err = -EOPNOTSUPP;
			goto end_io;
		}
		if (bio_barrier(bio) && bio_has_data(bio) &&
		    (q->next_ordered == QUEUE_ORDERED_NONE)) {
			err = -EOPNOTSUPP;
			goto end_io;
		}

		ret = q->make_request_fn(q, bio);
	} while (ret);
@@ -1655,6 +1663,55 @@ void blkdev_dequeue_request(struct request *req)
}
EXPORT_SYMBOL(blkdev_dequeue_request);

static void blk_account_io_completion(struct request *req, unsigned int bytes)
{
	struct gendisk *disk = req->rq_disk;

	if (!disk || !blk_queue_io_stat(disk->queue))
		return;

	if (blk_fs_request(req)) {
		const int rw = rq_data_dir(req);
		struct hd_struct *part;
		int cpu;

		cpu = part_stat_lock();
		part = disk_map_sector_rcu(req->rq_disk, req->sector);
		part_stat_add(cpu, part, sectors[rw], bytes >> 9);
		part_stat_unlock();
	}
}

static void blk_account_io_done(struct request *req)
{
	struct gendisk *disk = req->rq_disk;

	if (!disk || !blk_queue_io_stat(disk->queue))
		return;

	/*
	 * Account IO completion.  bar_rq isn't accounted as a normal
	 * IO on queueing nor completion.  Accounting the containing
	 * request is enough.
	 */
	if (blk_fs_request(req) && req != &req->q->bar_rq) {
		unsigned long duration = jiffies - req->start_time;
		const int rw = rq_data_dir(req);
		struct hd_struct *part;
		int cpu;

		cpu = part_stat_lock();
		part = disk_map_sector_rcu(disk, req->sector);

		part_stat_inc(cpu, part, ios[rw]);
		part_stat_add(cpu, part, ticks[rw], duration);
		part_round_stats(cpu, part);
		part_dec_in_flight(part);

		part_stat_unlock();
	}
}

/**
 * __end_that_request_first - end I/O on a request
 * @req:      the request being processed
@@ -1690,16 +1747,7 @@ static int __end_that_request_first(struct request *req, int error,
				(unsigned long long)req->sector);
	}

	if (blk_fs_request(req) && req->rq_disk) {
		const int rw = rq_data_dir(req);
		struct hd_struct *part;
		int cpu;

		cpu = part_stat_lock();
		part = disk_map_sector_rcu(req->rq_disk, req->sector);
		part_stat_add(cpu, part, sectors[rw], nr_bytes >> 9);
		part_stat_unlock();
	}
	blk_account_io_completion(req, nr_bytes);

	total_bytes = bio_nbytes = 0;
	while ((bio = req->bio) != NULL) {
@@ -1779,8 +1827,6 @@ static int __end_that_request_first(struct request *req, int error,
 */
static void end_that_request_last(struct request *req, int error)
{
	struct gendisk *disk = req->rq_disk;

	if (blk_rq_tagged(req))
		blk_queue_end_tag(req->q, req);

@@ -1792,27 +1838,7 @@ static void end_that_request_last(struct request *req, int error)

	blk_delete_timer(req);

	/*
	 * Account IO completion.  bar_rq isn't accounted as a normal
	 * IO on queueing nor completion.  Accounting the containing
	 * request is enough.
	 */
	if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
		unsigned long duration = jiffies - req->start_time;
		const int rw = rq_data_dir(req);
		struct hd_struct *part;
		int cpu;

		cpu = part_stat_lock();
		part = disk_map_sector_rcu(disk, req->sector);

		part_stat_inc(cpu, part, ios[rw]);
		part_stat_add(cpu, part, ticks[rw], duration);
		part_round_stats(cpu, part);
		part_dec_in_flight(part);

		part_stat_unlock();
	}
	blk_account_io_done(req);

	if (req->end_io)
		req->end_io(req, error);
Loading