Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 235a71c5 authored by Stefano Stabellini's avatar Stefano Stabellini Committed by Boris Ostrovsky
Browse files

xen/pvcalls: implement release command



Send PVCALLS_RELEASE to the backend and wait for a reply. Take both
in_mutex and out_mutex to avoid concurrent accesses. Then, free the
socket.

For passive sockets, check whether we have already pre-allocated an
active socket for the purpose of being accepted. If so, free that as
well.

Signed-off-by: default avatarStefano Stabellini <stefano@aporeto.com>
Reviewed-by: default avatarBoris Ostrovsky <boris.ostrovsky@oracle.com>
CC: boris.ostrovsky@oracle.com
CC: jgross@suse.com
Signed-off-by: default avatarBoris Ostrovsky <boris.ostrovsky@oracle.com>
parent 5842c835
Loading
Loading
Loading
Loading
+98 −0
Original line number Diff line number Diff line
@@ -199,6 +199,21 @@ static irqreturn_t pvcalls_front_event_handler(int irq, void *dev_id)
static void pvcalls_front_free_map(struct pvcalls_bedata *bedata,
				   struct sock_mapping *map)
{
	int i;

	unbind_from_irqhandler(map->active.irq, map);

	spin_lock(&bedata->socket_lock);
	if (!list_empty(&map->list))
		list_del_init(&map->list);
	spin_unlock(&bedata->socket_lock);

	for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
		gnttab_end_foreign_access(map->active.ring->ref[i], 0, 0);
	gnttab_end_foreign_access(map->active.ref, 0, 0);
	free_page((unsigned long)map->active.ring);

	kfree(map);
}

static irqreturn_t pvcalls_front_conn_handler(int irq, void *sock_map)
@@ -972,6 +987,89 @@ unsigned int pvcalls_front_poll(struct file *file, struct socket *sock,
	return ret;
}

int pvcalls_front_release(struct socket *sock)
{
	struct pvcalls_bedata *bedata;
	struct sock_mapping *map;
	int req_id, notify, ret;
	struct xen_pvcalls_request *req;

	if (sock->sk == NULL)
		return 0;

	pvcalls_enter();
	if (!pvcalls_front_dev) {
		pvcalls_exit();
		return -EIO;
	}

	bedata = dev_get_drvdata(&pvcalls_front_dev->dev);

	map = (struct sock_mapping *) sock->sk->sk_send_head;
	if (map == NULL) {
		pvcalls_exit();
		return 0;
	}

	spin_lock(&bedata->socket_lock);
	ret = get_request(bedata, &req_id);
	if (ret < 0) {
		spin_unlock(&bedata->socket_lock);
		pvcalls_exit();
		return ret;
	}
	sock->sk->sk_send_head = NULL;

	req = RING_GET_REQUEST(&bedata->ring, req_id);
	req->req_id = req_id;
	req->cmd = PVCALLS_RELEASE;
	req->u.release.id = (uintptr_t)map;

	bedata->ring.req_prod_pvt++;
	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
	spin_unlock(&bedata->socket_lock);
	if (notify)
		notify_remote_via_irq(bedata->irq);

	wait_event(bedata->inflight_req,
		   READ_ONCE(bedata->rsp[req_id].req_id) == req_id);

	if (map->active_socket) {
		/*
		 * Set in_error and wake up inflight_conn_req to force
		 * recvmsg waiters to exit.
		 */
		map->active.ring->in_error = -EBADF;
		wake_up_interruptible(&map->active.inflight_conn_req);

		/*
		 * Wait until there are no more waiters on the mutexes.
		 * We know that no new waiters can be added because sk_send_head
		 * is set to NULL -- we only need to wait for the existing
		 * waiters to return.
		 */
		while (!mutex_trylock(&map->active.in_mutex) ||
			   !mutex_trylock(&map->active.out_mutex))
			cpu_relax();

		pvcalls_front_free_map(bedata, map);
	} else {
		spin_lock(&bedata->socket_lock);
		list_del(&map->list);
		spin_unlock(&bedata->socket_lock);
		if (READ_ONCE(map->passive.inflight_req_id) !=
		    PVCALLS_INVALID_ID) {
			pvcalls_front_free_map(bedata,
					       map->passive.accept_map);
		}
		kfree(map);
	}
	WRITE_ONCE(bedata->rsp[req_id].req_id, PVCALLS_INVALID_ID);

	pvcalls_exit();
	return 0;
}

static const struct xenbus_device_id pvcalls_front_ids[] = {
	{ "pvcalls" },
	{ "" }
+1 −0
Original line number Diff line number Diff line
@@ -23,5 +23,6 @@ int pvcalls_front_recvmsg(struct socket *sock,
unsigned int pvcalls_front_poll(struct file *file,
				struct socket *sock,
				poll_table *wait);
int pvcalls_front_release(struct socket *sock);

#endif