Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2a04ae8a authored by Vitaly Kuznetsov's avatar Vitaly Kuznetsov Committed by David S. Miller
Browse files

hv_netvsc: remove locking in netvsc_send()



Packet scheduler guarantees there won't be multiple senders for the same
queue and as we use q_idx for multi_send_data the spinlock is redundant.

Signed-off-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: default avatarK. Y. Srinivasan <kys@microsoft.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8b9fbe1a
Loading
Loading
Loading
Loading
+0 −1
Original line number Original line Diff line number Diff line
@@ -632,7 +632,6 @@ struct nvsp_message {
#define RNDIS_PKT_ALIGN_DEFAULT 8
#define RNDIS_PKT_ALIGN_DEFAULT 8


struct multi_send_data {
struct multi_send_data {
	spinlock_t lock; /* protect struct multi_send_data */
	struct hv_netvsc_packet *pkt; /* netvsc pkt pending */
	struct hv_netvsc_packet *pkt; /* netvsc pkt pending */
	u32 count; /* counter of batched packets */
	u32 count; /* counter of batched packets */
};
};
+0 −8
Original line number Original line Diff line number Diff line
@@ -38,7 +38,6 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device)
{
{
	struct netvsc_device *net_device;
	struct netvsc_device *net_device;
	struct net_device *ndev = hv_get_drvdata(device);
	struct net_device *ndev = hv_get_drvdata(device);
	int i;


	net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
	net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
	if (!net_device)
	if (!net_device)
@@ -58,9 +57,6 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device)
	net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
	net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
	net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
	net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;


	for (i = 0; i < num_online_cpus(); i++)
		spin_lock_init(&net_device->msd[i].lock);

	hv_set_drvdata(device, net_device);
	hv_set_drvdata(device, net_device);
	return net_device;
	return net_device;
}
}
@@ -850,7 +846,6 @@ int netvsc_send(struct hv_device *device,
	u16 q_idx = packet->q_idx;
	u16 q_idx = packet->q_idx;
	u32 pktlen = packet->total_data_buflen, msd_len = 0;
	u32 pktlen = packet->total_data_buflen, msd_len = 0;
	unsigned int section_index = NETVSC_INVALID_INDEX;
	unsigned int section_index = NETVSC_INVALID_INDEX;
	unsigned long flag;
	struct multi_send_data *msdp;
	struct multi_send_data *msdp;
	struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
	struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
	bool try_batch;
	bool try_batch;
@@ -867,7 +862,6 @@ int netvsc_send(struct hv_device *device,
	msdp = &net_device->msd[q_idx];
	msdp = &net_device->msd[q_idx];


	/* batch packets in send buffer if possible */
	/* batch packets in send buffer if possible */
	spin_lock_irqsave(&msdp->lock, flag);
	if (msdp->pkt)
	if (msdp->pkt)
		msd_len = msdp->pkt->total_data_buflen;
		msd_len = msdp->pkt->total_data_buflen;


@@ -927,8 +921,6 @@ int netvsc_send(struct hv_device *device,
		cur_send = packet;
		cur_send = packet;
	}
	}


	spin_unlock_irqrestore(&msdp->lock, flag);

	if (msd_send) {
	if (msd_send) {
		m_ret = netvsc_send_pkt(msd_send, net_device);
		m_ret = netvsc_send_pkt(msd_send, net_device);