Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8dad9a67 authored by Roger Pau Monne's avatar Roger Pau Monne Committed by Greg Kroah-Hartman
Browse files

xen/blkfront: force data bouncing when backend is untrusted



commit 2400617da7eebf9167d71a46122828bc479d64c9 upstream.

Split the current bounce buffering logic used with persistent grants
into it's own option, and allow enabling it independently of
persistent grants.  This allows to reuse the same code paths to
perform the bounce buffering required to avoid leaking contiguous data
in shared pages not part of the request fragments.

Reporting whether the backend is to be trusted can be done using a
module parameter, or from the xenstore frontend path as set by the
toolstack when adding the device.

This is CVE-2022-33742, part of XSA-403.

Signed-off-by: default avatarRoger Pau Monné <roger.pau@citrix.com>
Reviewed-by: default avatarJuergen Gross <jgross@suse.com>
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent c6e94136
Loading
Loading
Loading
Loading
+30 −15
Original line number Diff line number Diff line
@@ -144,6 +144,10 @@ static unsigned int xen_blkif_max_ring_order;
module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, S_IRUGO);
MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");

static bool __read_mostly xen_blkif_trusted = true;
module_param_named(trusted, xen_blkif_trusted, bool, 0644);
MODULE_PARM_DESC(trusted, "Is the backend trusted");

#define BLK_RING_SIZE(info)	\
	__CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)

@@ -206,6 +210,7 @@ struct blkfront_info
	unsigned int discard_granularity;
	unsigned int discard_alignment;
	unsigned int feature_persistent:1;
	unsigned int bounce:1;
	/* Number of 4KB segments handled */
	unsigned int max_indirect_segments;
	int is_ready;
@@ -296,7 +301,7 @@ static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num)
		if (!gnt_list_entry)
			goto out_of_memory;

		if (info->feature_persistent) {
		if (info->bounce) {
			granted_page = alloc_page(GFP_NOIO | __GFP_ZERO);
			if (!granted_page) {
				kfree(gnt_list_entry);
@@ -316,7 +321,7 @@ static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num)
	list_for_each_entry_safe(gnt_list_entry, n,
	                         &rinfo->grants, node) {
		list_del(&gnt_list_entry->node);
		if (info->feature_persistent)
		if (info->bounce)
			__free_page(gnt_list_entry->page);
		kfree(gnt_list_entry);
		i--;
@@ -362,7 +367,7 @@ static struct grant *get_grant(grant_ref_t *gref_head,
	/* Assign a gref to this page */
	gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
	BUG_ON(gnt_list_entry->gref == -ENOSPC);
	if (info->feature_persistent)
	if (info->bounce)
		grant_foreign_access(gnt_list_entry, info);
	else {
		/* Grant access to the GFN passed by the caller */
@@ -386,7 +391,7 @@ static struct grant *get_indirect_grant(grant_ref_t *gref_head,
	/* Assign a gref to this page */
	gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
	BUG_ON(gnt_list_entry->gref == -ENOSPC);
	if (!info->feature_persistent) {
	if (!info->bounce) {
		struct page *indirect_page;

		/* Fetch a pre-allocated page to use for indirect grefs */
@@ -701,7 +706,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
		.grant_idx = 0,
		.segments = NULL,
		.rinfo = rinfo,
		.need_copy = rq_data_dir(req) && info->feature_persistent,
		.need_copy = rq_data_dir(req) && info->bounce,
	};

	/*
@@ -1015,11 +1020,12 @@ static void xlvbd_flush(struct blkfront_info *info)
{
	blk_queue_write_cache(info->rq, info->feature_flush ? true : false,
			      info->feature_fua ? true : false);
	pr_info("blkfront: %s: %s %s %s %s %s\n",
	pr_info("blkfront: %s: %s %s %s %s %s %s %s\n",
		info->gd->disk_name, flush_info(info),
		"persistent grants:", info->feature_persistent ?
		"enabled;" : "disabled;", "indirect descriptors:",
		info->max_indirect_segments ? "enabled;" : "disabled;");
		info->max_indirect_segments ? "enabled;" : "disabled;",
		"bounce buffer:", info->bounce ? "enabled" : "disabled;");
}

static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
@@ -1254,7 +1260,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
	if (!list_empty(&rinfo->indirect_pages)) {
		struct page *indirect_page, *n;

		BUG_ON(info->feature_persistent);
		BUG_ON(info->bounce);
		list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
			list_del(&indirect_page->lru);
			__free_page(indirect_page);
@@ -1271,7 +1277,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
				continue;

			rinfo->persistent_gnts_c--;
			if (info->feature_persistent)
			if (info->bounce)
				__free_page(persistent_gnt->page);
			kfree(persistent_gnt);
		}
@@ -1291,7 +1297,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
		for (j = 0; j < segs; j++) {
			persistent_gnt = rinfo->shadow[i].grants_used[j];
			gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
			if (info->feature_persistent)
			if (info->bounce)
				__free_page(persistent_gnt->page);
			kfree(persistent_gnt);
		}
@@ -1481,7 +1487,7 @@ static int blkif_completion(unsigned long *id,
	data.s = s;
	num_sg = s->num_sg;

	if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
	if (bret->operation == BLKIF_OP_READ && info->bounce) {
		for_each_sg(s->sg, sg, num_sg, i) {
			BUG_ON(sg->offset + sg->length > PAGE_SIZE);

@@ -1540,7 +1546,7 @@ static int blkif_completion(unsigned long *id,
				 * Add the used indirect page back to the list of
				 * available pages for indirect grefs.
				 */
				if (!info->feature_persistent) {
				if (!info->bounce) {
					indirect_page = s->indirect_grants[i]->page;
					list_add(&indirect_page->lru, &rinfo->indirect_pages);
				}
@@ -1822,6 +1828,13 @@ static int talk_to_blkback(struct xenbus_device *dev,
	int err;
	unsigned int i, max_page_order = 0;
	unsigned int ring_page_order = 0;
	unsigned int trusted;

	/* Check if backend is trusted. */
	err = xenbus_scanf(XBT_NIL, dev->nodename, "trusted", "%u", &trusted);
	if (err < 0)
		trusted = 1;
	info->bounce = !xen_blkif_trusted || !trusted;

	err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
			   "max-ring-page-order", "%u", &max_page_order);
@@ -2301,10 +2314,10 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
	if (err)
		goto out_of_memory;

	if (!info->feature_persistent && info->max_indirect_segments) {
	if (!info->bounce && info->max_indirect_segments) {
		/*
		 * We are using indirect descriptors but not persistent
		 * grants, we need to allocate a set of pages that can be
		 * We are using indirect descriptors but don't have a bounce
		 * buffer, we need to allocate a set of pages that can be
		 * used for mapping indirect grefs
		 */
		int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info);
@@ -2410,6 +2423,8 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
		info->feature_persistent = 0;
	else
		info->feature_persistent = persistent;
	if (info->feature_persistent)
		info->bounce = true;

	err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
			   "feature-max-indirect-segments", "%u",