Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3e4aa12f authored by Chris Lew's avatar Chris Lew Committed by Gerrit - the friendly Code Review server
Browse files

net: qrtr: Add backup skb pool



Add a pool of SKBs that can be used when the system is in low memory
conditions. This pool will be shared between all nodes and replenished
by a worker function.

Change-Id: I639a9ac76db726dc8ad46b12d3b3d560c674939c
Signed-off-by: default avatarChris Lew <clew@codeaurora.org>
parent 1ca2f081
Loading
Loading
Loading
Loading
+68 −2
Original line number Original line Diff line number Diff line
@@ -130,6 +130,15 @@ static DECLARE_RWSEM(qrtr_epts_lock);
static DEFINE_IDR(qrtr_ports);
static DEFINE_IDR(qrtr_ports);
static DEFINE_SPINLOCK(qrtr_port_lock);
static DEFINE_SPINLOCK(qrtr_port_lock);


/* backup buffers */
#define QRTR_BACKUP_HI_NUM	5
#define QRTR_BACKUP_HI_SIZE	SZ_16K
#define QRTR_BACKUP_LO_NUM	20
#define QRTR_BACKUP_LO_SIZE	SZ_1K
static struct sk_buff_head qrtr_backup_lo;
static struct sk_buff_head qrtr_backup_hi;
static struct work_struct qrtr_backup_work;

/**
/**
 * struct qrtr_node - endpoint node
 * struct qrtr_node - endpoint node
 * @ep_lock: lock for endpoint management and callbacks
 * @ep_lock: lock for endpoint management and callbacks
@@ -716,6 +725,54 @@ int qrtr_peek_pkt_size(const void *data)
}
}
EXPORT_SYMBOL(qrtr_peek_pkt_size);
EXPORT_SYMBOL(qrtr_peek_pkt_size);


static void qrtr_alloc_backup(struct work_struct *work)
{
	struct sk_buff *skb;

	while (skb_queue_len(&qrtr_backup_lo) < QRTR_BACKUP_LO_NUM) {
		skb = alloc_skb(QRTR_BACKUP_LO_SIZE, GFP_KERNEL);
		if (!skb)
			break;
		skb_queue_tail(&qrtr_backup_lo, skb);
	}
	while (skb_queue_len(&qrtr_backup_hi) < QRTR_BACKUP_HI_NUM) {
		skb = alloc_skb(QRTR_BACKUP_HI_SIZE, GFP_KERNEL);
		if (!skb)
			break;
		skb_queue_tail(&qrtr_backup_hi, skb);
	}
}

static struct sk_buff *qrtr_get_backup(size_t len)
{
	struct sk_buff *skb = NULL;

	if (len < QRTR_BACKUP_LO_SIZE)
		skb = skb_dequeue(&qrtr_backup_lo);
	else if (len < QRTR_BACKUP_HI_SIZE)
		skb = skb_dequeue(&qrtr_backup_hi);

	if (skb)
		queue_work(system_unbound_wq, &qrtr_backup_work);

	return skb;
}

static void qrtr_backup_init(void)
{
	skb_queue_head_init(&qrtr_backup_lo);
	skb_queue_head_init(&qrtr_backup_hi);
	INIT_WORK(&qrtr_backup_work, qrtr_alloc_backup);
	queue_work(system_unbound_wq, &qrtr_backup_work);
}

static void qrtr_backup_deinit(void)
{
	cancel_work_sync(&qrtr_backup_work);
	skb_queue_purge(&qrtr_backup_lo);
	skb_queue_purge(&qrtr_backup_hi);
}

/**
/**
 * qrtr_endpoint_post() - post incoming data
 * qrtr_endpoint_post() - post incoming data
 * @ep: endpoint handle
 * @ep: endpoint handle
@@ -742,8 +799,13 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
		return -EINVAL;
		return -EINVAL;


	skb = alloc_skb_with_frags(sizeof(*v1), len, 0, &errcode, GFP_ATOMIC);
	skb = alloc_skb_with_frags(sizeof(*v1), len, 0, &errcode, GFP_ATOMIC);
	if (!skb)
	if (!skb) {
		skb = qrtr_get_backup(len);
		if (!skb) {
			pr_err("qrtr: Unable to get skb with len:%lu\n", len);
			return -ENOMEM;
			return -ENOMEM;
		}
	}


	skb_reserve(skb, sizeof(*v1));
	skb_reserve(skb, sizeof(*v1));
	cb = (struct qrtr_cb *)skb->cb;
	cb = (struct qrtr_cb *)skb->cb;
@@ -1937,6 +1999,8 @@ static int __init qrtr_proto_init(void)


	qrtr_ns_init();
	qrtr_ns_init();


	qrtr_backup_init();

	return rc;
	return rc;
}
}
postcore_initcall(qrtr_proto_init);
postcore_initcall(qrtr_proto_init);
@@ -1946,6 +2010,8 @@ static void __exit qrtr_proto_fini(void)
	qrtr_ns_remove();
	qrtr_ns_remove();
	sock_unregister(qrtr_family.family);
	sock_unregister(qrtr_family.family);
	proto_unregister(&qrtr_proto);
	proto_unregister(&qrtr_proto);

	qrtr_backup_deinit();
}
}
module_exit(qrtr_proto_fini);
module_exit(qrtr_proto_fini);