Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3273cba1 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'for-linus-4.4-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen bug fixes from David Vrabel:
 - XSA-155 security fixes to backend drivers.
 - XSA-157 security fixes to pciback.

* tag 'for-linus-4.4-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen-pciback: fix up cleanup path when alloc fails
  xen/pciback: Don't allow MSI-X ops if PCI_COMMAND_MEMORY is not set.
  xen/pciback: For XEN_PCI_OP_disable_msi[|x] only disable if device has MSI(X) enabled.
  xen/pciback: Do not install an IRQ handler for MSI interrupts.
  xen/pciback: Return error on XEN_PCI_OP_enable_msix when device has MSI or MSI-X enabled
  xen/pciback: Return error on XEN_PCI_OP_enable_msi when device has MSI or MSI-X enabled
  xen/pciback: Save xen_pci_op commands before processing it
  xen-scsiback: safely copy requests
  xen-blkback: read from indirect descriptors only once
  xen-blkback: only read request operation from shared ring once
  xen-netback: use RING_COPY_REQUEST() throughout
  xen-netback: don't use last request to determine minimum Tx credit
  xen: Add RING_COPY_REQUEST()
  xen/x86/pvh: Use HVM's flush_tlb_others op
  xen: Resume PMU from non-atomic context
  xen/events/fifo: Consume unprocessed events when a CPU dies
parents 83ad283f 584a561a
Loading
Loading
Loading
Loading
+2 −7
Original line number Diff line number Diff line
@@ -2495,14 +2495,9 @@ void __init xen_init_mmu_ops(void)
{
	x86_init.paging.pagetable_init = xen_pagetable_init;

	/* Optimization - we can use the HVM one but it has no idea which
	 * VCPUs are descheduled - which means that it will needlessly IPI
	 * them. Xen knows so let it do the job.
	 */
	if (xen_feature(XENFEAT_auto_translated_physmap)) {
		pv_mmu_ops.flush_tlb_others = xen_flush_tlb_others;
	if (xen_feature(XENFEAT_auto_translated_physmap))
		return;
	}

	pv_mmu_ops = xen_mmu_ops;

	memset(dummy_mapping, 0xff, PAGE_SIZE);
+10 −10
Original line number Diff line number Diff line
@@ -68,26 +68,16 @@ static void xen_pv_post_suspend(int suspend_cancelled)

void xen_arch_pre_suspend(void)
{
	int cpu;

	for_each_online_cpu(cpu)
		xen_pmu_finish(cpu);

	if (xen_pv_domain())
		xen_pv_pre_suspend();
}

void xen_arch_post_suspend(int cancelled)
{
	int cpu;

	if (xen_pv_domain())
		xen_pv_post_suspend(cancelled);
	else
		xen_hvm_post_suspend(cancelled);

	for_each_online_cpu(cpu)
		xen_pmu_init(cpu);
}

static void xen_vcpu_notify_restore(void *data)
@@ -106,10 +96,20 @@ static void xen_vcpu_notify_suspend(void *data)

void xen_arch_resume(void)
{
	int cpu;

	on_each_cpu(xen_vcpu_notify_restore, NULL, 1);

	for_each_online_cpu(cpu)
		xen_pmu_init(cpu);
}

void xen_arch_suspend(void)
{
	int cpu;

	for_each_online_cpu(cpu)
		xen_pmu_finish(cpu);

	on_each_cpu(xen_vcpu_notify_suspend, NULL, 1);
}
+10 −5
Original line number Diff line number Diff line
@@ -950,6 +950,8 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
		goto unmap;

	for (n = 0, i = 0; n < nseg; n++) {
		uint8_t first_sect, last_sect;

		if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
			/* Map indirect segments */
			if (segments)
@@ -957,15 +959,18 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
			segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
		}
		i = n % SEGS_PER_INDIRECT_FRAME;

		pending_req->segments[n]->gref = segments[i].gref;
		seg[n].nsec = segments[i].last_sect -
			segments[i].first_sect + 1;
		seg[n].offset = (segments[i].first_sect << 9);
		if ((segments[i].last_sect >= (XEN_PAGE_SIZE >> 9)) ||
		    (segments[i].last_sect < segments[i].first_sect)) {

		first_sect = READ_ONCE(segments[i].first_sect);
		last_sect = READ_ONCE(segments[i].last_sect);
		if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) {
			rc = -EINVAL;
			goto unmap;
		}

		seg[n].nsec = last_sect - first_sect + 1;
		seg[n].offset = first_sect << 9;
		preq->nr_sects += seg[n].nsec;
	}

+4 −4
Original line number Diff line number Diff line
@@ -408,8 +408,8 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
					struct blkif_x86_32_request *src)
{
	int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
	dst->operation = src->operation;
	switch (src->operation) {
	dst->operation = READ_ONCE(src->operation);
	switch (dst->operation) {
	case BLKIF_OP_READ:
	case BLKIF_OP_WRITE:
	case BLKIF_OP_WRITE_BARRIER:
@@ -456,8 +456,8 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
					struct blkif_x86_64_request *src)
{
	int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
	dst->operation = src->operation;
	switch (src->operation) {
	dst->operation = READ_ONCE(src->operation);
	switch (dst->operation) {
	case BLKIF_OP_READ:
	case BLKIF_OP_WRITE:
	case BLKIF_OP_WRITE_BARRIER:
+15 −19
Original line number Diff line number Diff line
@@ -258,18 +258,18 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
						 struct netrx_pending_operations *npo)
{
	struct xenvif_rx_meta *meta;
	struct xen_netif_rx_request *req;
	struct xen_netif_rx_request req;

	req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
	RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);

	meta = npo->meta + npo->meta_prod++;
	meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
	meta->gso_size = 0;
	meta->size = 0;
	meta->id = req->id;
	meta->id = req.id;

	npo->copy_off = 0;
	npo->copy_gref = req->gref;
	npo->copy_gref = req.gref;

	return meta;
}
@@ -424,7 +424,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
	struct xenvif *vif = netdev_priv(skb->dev);
	int nr_frags = skb_shinfo(skb)->nr_frags;
	int i;
	struct xen_netif_rx_request *req;
	struct xen_netif_rx_request req;
	struct xenvif_rx_meta *meta;
	unsigned char *data;
	int head = 1;
@@ -443,15 +443,15 @@ static int xenvif_gop_skb(struct sk_buff *skb,

	/* Set up a GSO prefix descriptor, if necessary */
	if ((1 << gso_type) & vif->gso_prefix_mask) {
		req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
		RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
		meta = npo->meta + npo->meta_prod++;
		meta->gso_type = gso_type;
		meta->gso_size = skb_shinfo(skb)->gso_size;
		meta->size = 0;
		meta->id = req->id;
		meta->id = req.id;
	}

	req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
	RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
	meta = npo->meta + npo->meta_prod++;

	if ((1 << gso_type) & vif->gso_mask) {
@@ -463,9 +463,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
	}

	meta->size = 0;
	meta->id = req->id;
	meta->id = req.id;
	npo->copy_off = 0;
	npo->copy_gref = req->gref;
	npo->copy_gref = req.gref;

	data = skb->data;
	while (data < skb_tail_pointer(skb)) {
@@ -679,9 +679,7 @@ static void tx_add_credit(struct xenvif_queue *queue)
	 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
	 * Otherwise the interface can seize up due to insufficient credit.
	 */
	max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size;
	max_burst = min(max_burst, 131072UL);
	max_burst = max(max_burst, queue->credit_bytes);
	max_burst = max(131072UL, queue->credit_bytes);

	/* Take care that adding a new chunk of credit doesn't wrap to zero. */
	max_credit = queue->remaining_credit + queue->credit_bytes;
@@ -711,7 +709,7 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
		spin_unlock_irqrestore(&queue->response_lock, flags);
		if (cons == end)
			break;
		txp = RING_GET_REQUEST(&queue->tx, cons++);
		RING_COPY_REQUEST(&queue->tx, cons++, txp);
	} while (1);
	queue->tx.req_cons = cons;
}
@@ -778,8 +776,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
		if (drop_err)
			txp = &dropped_tx;

		memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots),
		       sizeof(*txp));
		RING_COPY_REQUEST(&queue->tx, cons + slots, txp);

		/* If the guest submitted a frame >= 64 KiB then
		 * first->size overflowed and following slots will
@@ -1112,8 +1109,7 @@ static int xenvif_get_extras(struct xenvif_queue *queue,
			return -EBADR;
		}

		memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons),
		       sizeof(extra));
		RING_COPY_REQUEST(&queue->tx, cons, &extra);
		if (unlikely(!extra.type ||
			     extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
			queue->tx.req_cons = ++cons;
@@ -1322,7 +1318,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,

		idx = queue->tx.req_cons;
		rmb(); /* Ensure that we see the request before we copy it. */
		memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq));
		RING_COPY_REQUEST(&queue->tx, idx, &txreq);

		/* Credit-based scheduling. */
		if (txreq.size > queue->remaining_credit &&
Loading