Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 80bfa2f6 authored by Roger Pau Monne's avatar Roger Pau Monne Committed by Konrad Rzeszutek Wilk
Browse files

xen-blkif: drop struct blkif_request_segment_aligned



This was wrongly introduced in commit 402b27f9, the only difference
between blkif_request_segment_aligned and blkif_request_segment is
that the former has a named padding, while both share the same
memory layout.

Also correct a few minor glitches in the description, including for it
to no longer assume PAGE_SIZE == 4096.

Signed-off-by: default avatarRoger Pau Monné <roger.pau@citrix.com>
[Description fix by Jan Beulich]
Signed-off-by: default avatarJan Beulich <jbeulich@suse.com>
Reported-by: default avatarJan Beulich <jbeulich@suse.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Tested-by: default avatarMatt Rushton <mrushton@amazon.com>
Cc: Matt Wilson <msw@amazon.com>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent c05f3e3c
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -847,7 +847,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
	struct grant_page **pages = pending_req->indirect_pages;
	struct xen_blkif *blkif = pending_req->blkif;
	int indirect_grefs, rc, n, nseg, i;
	struct blkif_request_segment_aligned *segments = NULL;
	struct blkif_request_segment *segments = NULL;

	nseg = pending_req->nr_pages;
	indirect_grefs = INDIRECT_PAGES(nseg);
+1 −1
Original line number Diff line number Diff line
@@ -57,7 +57,7 @@
#define MAX_INDIRECT_SEGMENTS 256

#define SEGS_PER_INDIRECT_FRAME \
	(PAGE_SIZE/sizeof(struct blkif_request_segment_aligned))
	(PAGE_SIZE/sizeof(struct blkif_request_segment))
#define MAX_INDIRECT_PAGES \
	((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
#define INDIRECT_PAGES(_segs) \
+3 −3
Original line number Diff line number Diff line
@@ -162,7 +162,7 @@ static DEFINE_SPINLOCK(minor_lock);
#define DEV_NAME	"xvd"	/* name in /dev */

#define SEGS_PER_INDIRECT_FRAME \
	(PAGE_SIZE/sizeof(struct blkif_request_segment_aligned))
	(PAGE_SIZE/sizeof(struct blkif_request_segment))
#define INDIRECT_GREFS(_segs) \
	((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)

@@ -393,7 +393,7 @@ static int blkif_queue_request(struct request *req)
	unsigned long id;
	unsigned int fsect, lsect;
	int i, ref, n;
	struct blkif_request_segment_aligned *segments = NULL;
	struct blkif_request_segment *segments = NULL;

	/*
	 * Used to store if we are able to queue the request by just using
@@ -550,7 +550,7 @@ static int blkif_queue_request(struct request *req)
			} else {
				n = i % SEGS_PER_INDIRECT_FRAME;
				segments[n] =
					(struct blkif_request_segment_aligned) {
					(struct blkif_request_segment) {
							.gref       = ref,
							.first_sect = fsect,
							.last_sect  = lsect };
+14 −20
Original line number Diff line number Diff line
@@ -113,13 +113,13 @@ typedef uint64_t blkif_sector_t;
 * it's less than the number provided by the backend. The indirect_grefs field
 * in blkif_request_indirect should be filled by the frontend with the
 * grant references of the pages that are holding the indirect segments.
 * This pages are filled with an array of blkif_request_segment_aligned
 * that hold the information about the segments. The number of indirect
 * pages to use is determined by the maximum number of segments
 * a indirect request contains. Every indirect page can contain a maximum
 * of 512 segments (PAGE_SIZE/sizeof(blkif_request_segment_aligned)),
 * so to calculate the number of indirect pages to use we have to do
 * ceil(indirect_segments/512).
 * These pages are filled with an array of blkif_request_segment that hold the
 * information about the segments. The number of indirect pages to use is
 * determined by the number of segments an indirect request contains. Every
 * indirect page can contain a maximum of
 * (PAGE_SIZE / sizeof(struct blkif_request_segment)) segments, so to
 * calculate the number of indirect pages to use we have to do
 * ceil(indirect_segments / (PAGE_SIZE / sizeof(struct blkif_request_segment))).
 *
 * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not*
 * create the "feature-max-indirect-segments" node!
@@ -135,13 +135,12 @@ typedef uint64_t blkif_sector_t;

#define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8

struct blkif_request_segment_aligned {
struct blkif_request_segment {
		grant_ref_t gref;        /* reference to I/O buffer frame        */
		/* @first_sect: first sector in frame to transfer (inclusive).   */
		/* @last_sect: last sector in frame to transfer (inclusive).     */
		uint8_t     first_sect, last_sect;
	uint16_t    _pad; /* padding to make it 8 bytes, so it's cache-aligned */
} __attribute__((__packed__));
};

struct blkif_request_rw {
	uint8_t        nr_segments;  /* number of segments                   */
@@ -151,12 +150,7 @@ struct blkif_request_rw {
#endif
	uint64_t       id;           /* private guest value, echoed in resp  */
	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
	struct blkif_request_segment {
		grant_ref_t gref;        /* reference to I/O buffer frame        */
		/* @first_sect: first sector in frame to transfer (inclusive).   */
		/* @last_sect: last sector in frame to transfer (inclusive).     */
		uint8_t     first_sect, last_sect;
	} seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
	struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
} __attribute__((__packed__));

struct blkif_request_discard {