Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9810c3af authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "dfc: Support QoS rebind"

parents c708efc5 4bc15d0a
Loading
Loading
Loading
Loading
+29 −52
Original line number Original line Diff line number Diff line
@@ -911,74 +911,52 @@ int dfc_bearer_flow_ctl(struct net_device *dev,
			struct rmnet_bearer_map *bearer,
			struct rmnet_bearer_map *bearer,
			struct qos_info *qos)
			struct qos_info *qos)
{
{
	int rc = 0, qlen;
	bool enable;
	int enable;
	int i;


	enable = bearer->grant_size ? 1 : 0;
	enable = bearer->grant_size ? true : false;


	for (i = 0; i < MAX_MQ_NUM; i++) {
	qmi_rmnet_flow_control(dev, bearer->mq_idx, enable);
		if (qos->mq[i].bearer == bearer) {
	trace_dfc_qmi_tc(dev->name, bearer->bearer_id,
			/* Do not flow disable ancillary q in tcp bidir */
			 bearer->grant_size,
			if (qos->mq[i].ancillary &&
			 0, bearer->mq_idx, enable);
			    bearer->tcp_bidir && !enable)
				continue;


			qlen = qmi_rmnet_flow_control(dev, i, enable);
	/* Do not flow disable tcp ack q in tcp bidir */
	if (bearer->ack_mq_idx != INVALID_MQ &&
	    (enable || !bearer->tcp_bidir)) {
		qmi_rmnet_flow_control(dev, bearer->ack_mq_idx, enable);
		trace_dfc_qmi_tc(dev->name, bearer->bearer_id,
		trace_dfc_qmi_tc(dev->name, bearer->bearer_id,
				 bearer->grant_size,
				 bearer->grant_size,
					 qlen, i, enable);
				 0, bearer->ack_mq_idx, enable);
			rc++;
		}
	}
	}


	if (enable == 0 && bearer->ack_req)
	if (!enable && bearer->ack_req)
		dfc_send_ack(dev, bearer->bearer_id,
		dfc_send_ack(dev, bearer->bearer_id,
			     bearer->seq, qos->mux_id,
			     bearer->seq, qos->mux_id,
			     DFC_ACK_TYPE_DISABLE);
			     DFC_ACK_TYPE_DISABLE);


	return rc;
	return 0;
}
}


static int dfc_all_bearer_flow_ctl(struct net_device *dev,
static int dfc_all_bearer_flow_ctl(struct net_device *dev,
				struct qos_info *qos, u8 ack_req, u32 ancillary,
				struct qos_info *qos, u8 ack_req, u32 ancillary,
				struct dfc_flow_status_info_type_v01 *fc_info)
				struct dfc_flow_status_info_type_v01 *fc_info)
{
{
	struct rmnet_bearer_map *bearer_itm;
	struct rmnet_bearer_map *bearer;
	int rc = 0, qlen;
	bool enable;
	int i;


	enable = fc_info->num_bytes > 0 ? 1 : 0;
	list_for_each_entry(bearer, &qos->bearer_head, list) {

		bearer->grant_size = fc_info->num_bytes;
	list_for_each_entry(bearer_itm, &qos->bearer_head, list) {
		bearer->grant_thresh =
		bearer_itm->grant_size = fc_info->num_bytes;
			qmi_rmnet_grant_per(bearer->grant_size);
		bearer_itm->grant_thresh =
		bearer->seq = fc_info->seq_num;
			qmi_rmnet_grant_per(bearer_itm->grant_size);
		bearer->ack_req = ack_req;
		bearer_itm->seq = fc_info->seq_num;
		bearer->tcp_bidir = DFC_IS_TCP_BIDIR(ancillary);
		bearer_itm->ack_req = ack_req;
		bearer->last_grant = fc_info->num_bytes;
		bearer_itm->tcp_bidir = DFC_IS_TCP_BIDIR(ancillary);
		bearer->last_seq = fc_info->seq_num;
		bearer_itm->last_grant = fc_info->num_bytes;
		bearer_itm->last_seq = fc_info->seq_num;
	}


	for (i = 0; i < MAX_MQ_NUM; i++) {
		dfc_bearer_flow_ctl(dev, bearer, qos);
		bearer_itm = qos->mq[i].bearer;
		if (!bearer_itm)
			continue;
		qlen = qmi_rmnet_flow_control(dev, i, enable);
		trace_dfc_qmi_tc(dev->name, bearer_itm->bearer_id,
				 fc_info->num_bytes,
				 qlen, i, enable);
		rc++;
	}
	}


	if (enable == 0 && ack_req)
	return 0;
		dfc_send_ack(dev, fc_info->bearer_id,
			     fc_info->seq_num, fc_info->mux_id,
			     DFC_ACK_TYPE_DISABLE);

	return rc;
}
}


static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
@@ -1023,9 +1001,8 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,


		if (action)
		if (action)
			rc = dfc_bearer_flow_ctl(dev, itm, qos);
			rc = dfc_bearer_flow_ctl(dev, itm, qos);
	} else {
		qos->default_grant = fc_info->num_bytes;
	}
	}

	return rc;
	return rc;
}
}


+221 −92
Original line number Original line Diff line number Diff line
@@ -26,8 +26,11 @@
#define FLAG_QMAP_MASK 0x0020
#define FLAG_QMAP_MASK 0x0020


#define FLAG_TO_MODE(f) ((f) & FLAG_DFC_MASK)
#define FLAG_TO_MODE(f) ((f) & FLAG_DFC_MASK)

#define DFC_SUPPORTED_MODE(m) \
#define DFC_SUPPORTED_MODE(m) \
	((m) == DFC_MODE_FLOW_ID || (m) == DFC_MODE_MQ_NUM)
	((m) == DFC_MODE_FLOW_ID || (m) == DFC_MODE_MQ_NUM || \
	 (m) == DFC_MODE_SA)

#define FLAG_TO_QMAP(f) ((f) & FLAG_QMAP_MASK)
#define FLAG_TO_QMAP(f) ((f) & FLAG_QMAP_MASK)


int dfc_mode;
int dfc_mode;
@@ -218,6 +221,131 @@ static void qmi_rmnet_reset_txq(struct net_device *dev, unsigned int txq)
	}
	}
}
}


static struct rmnet_bearer_map *__qmi_rmnet_bearer_get(
				struct qos_info *qos_info, u8 bearer_id)
{
	struct rmnet_bearer_map *bearer;

	bearer = qmi_rmnet_get_bearer_map(qos_info, bearer_id);
	if (bearer) {
		bearer->flow_ref++;
	} else {
		bearer = kzalloc(sizeof(*bearer), GFP_ATOMIC);
		if (!bearer)
			return NULL;

		bearer->bearer_id = bearer_id;
		bearer->flow_ref = 1;
		bearer->grant_size = DEFAULT_GRANT;
		bearer->grant_thresh = qmi_rmnet_grant_per(bearer->grant_size);
		bearer->mq_idx = INVALID_MQ;
		bearer->ack_mq_idx = INVALID_MQ;
		list_add(&bearer->list, &qos_info->bearer_head);
	}

	return bearer;
}

static void __qmi_rmnet_bearer_put(struct net_device *dev,
				   struct qos_info *qos_info,
				   struct rmnet_bearer_map *bearer,
				   bool reset)
{
	struct mq_map *mq;
	int i, j;

	if (bearer && --bearer->flow_ref == 0) {
		for (i = 0; i < MAX_MQ_NUM; i++) {
			mq = &qos_info->mq[i];
			if (mq->bearer != bearer)
				continue;

			mq->bearer = NULL;
			if (reset) {
				qmi_rmnet_reset_txq(dev, i);
				qmi_rmnet_flow_control(dev, i, 1);
				trace_dfc_qmi_tc(dev->name,
					bearer->bearer_id, 0, 0, i, 1);

				if (dfc_mode == DFC_MODE_SA) {
					j = i + ACK_MQ_OFFSET;
					qmi_rmnet_reset_txq(dev, j);
					qmi_rmnet_flow_control(dev, j, 1);
					trace_dfc_qmi_tc(dev->name,
						bearer->bearer_id, 0, 0, j, 1);
				}
			}
		}

		/* Remove from bearer map */
		list_del(&bearer->list);
		kfree(bearer);
	}
}

static void __qmi_rmnet_update_mq(struct net_device *dev,
				  struct qos_info *qos_info,
				  struct rmnet_bearer_map *bearer,
				  struct rmnet_flow_map *itm)
{
	struct mq_map *mq;

	/* In SA mode default mq is not associated with any bearer */
	if (dfc_mode == DFC_MODE_SA && itm->mq_idx == DEFAULT_MQ_NUM)
		return;

	mq = &qos_info->mq[itm->mq_idx];
	if (!mq->bearer) {
		mq->bearer = bearer;

		if (dfc_mode == DFC_MODE_SA) {
			bearer->mq_idx = itm->mq_idx;
			bearer->ack_mq_idx = itm->mq_idx + ACK_MQ_OFFSET;
		} else {
			if (IS_ANCILLARY(itm->ip_type))
				bearer->ack_mq_idx = itm->mq_idx;
			else
				bearer->mq_idx = itm->mq_idx;
		}

		qmi_rmnet_flow_control(dev, itm->mq_idx,
				       bearer->grant_size > 0 ? 1 : 0);
		trace_dfc_qmi_tc(dev->name, itm->bearer_id,
				 bearer->grant_size, 0, itm->mq_idx,
				 bearer->grant_size > 0 ? 1 : 0);

		if (dfc_mode == DFC_MODE_SA) {
			qmi_rmnet_flow_control(dev, bearer->ack_mq_idx,
					bearer->grant_size > 0 ? 1 : 0);
			trace_dfc_qmi_tc(dev->name, itm->bearer_id,
					bearer->grant_size, 0,
					bearer->ack_mq_idx,
					bearer->grant_size > 0 ? 1 : 0);
		}
	}
}

static int __qmi_rmnet_rebind_flow(struct net_device *dev,
				   struct qos_info *qos_info,
				   struct rmnet_flow_map *itm,
				   struct rmnet_flow_map *new_map)
{
	struct rmnet_bearer_map *bearer;

	__qmi_rmnet_bearer_put(dev, qos_info, itm->bearer, false);

	bearer = __qmi_rmnet_bearer_get(qos_info, new_map->bearer_id);
	if (!bearer)
		return -ENOMEM;

	qmi_rmnet_update_flow_map(itm, new_map);
	itm->bearer = bearer;

	__qmi_rmnet_update_mq(dev, qos_info, bearer, itm);

	return 0;
}

static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
			      struct qmi_info *qmi)
			      struct qmi_info *qmi)
{
{
@@ -225,8 +353,7 @@ static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
	struct rmnet_flow_map new_map, *itm;
	struct rmnet_flow_map new_map, *itm;
	struct rmnet_bearer_map *bearer;
	struct rmnet_bearer_map *bearer;
	struct tcmsg tmp_tcm;
	struct tcmsg tmp_tcm;
	struct mq_map *mq;
	int rc = 0;
	u32 mq_idx;


	if (!qos_info || !tcm || tcm->tcm_handle >= MAX_MQ_NUM)
	if (!qos_info || !tcm || tcm->tcm_handle >= MAX_MQ_NUM)
		return -EINVAL;
		return -EINVAL;
@@ -251,7 +378,11 @@ static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
	itm = qmi_rmnet_get_flow_map(qos_info, new_map.flow_id,
	itm = qmi_rmnet_get_flow_map(qos_info, new_map.flow_id,
				     new_map.ip_type);
				     new_map.ip_type);
	if (itm) {
	if (itm) {
		pr_debug("%s: stale flow found\n", __func__);
		if (itm->bearer_id != new_map.bearer_id) {
			rc = __qmi_rmnet_rebind_flow(
				dev, qos_info, itm, &new_map);
			goto done;
		} else if (itm->mq_idx != new_map.mq_idx) {
			tmp_tcm.tcm__pad1 = itm->bearer_id;
			tmp_tcm.tcm__pad1 = itm->bearer_id;
			tmp_tcm.tcm_parent = itm->flow_id;
			tmp_tcm.tcm_parent = itm->flow_id;
			tmp_tcm.tcm_ifindex = itm->ip_type;
			tmp_tcm.tcm_ifindex = itm->ip_type;
@@ -259,6 +390,9 @@ static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
			spin_unlock_bh(&qos_info->qos_lock);
			spin_unlock_bh(&qos_info->qos_lock);
			qmi_rmnet_del_flow(dev, &tmp_tcm, qmi);
			qmi_rmnet_del_flow(dev, &tmp_tcm, qmi);
			goto again;
			goto again;
		} else {
			goto done;
		}
	}
	}


	/* Create flow map */
	/* Create flow map */
@@ -272,45 +406,19 @@ static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
	list_add(&itm->list, &qos_info->flow_head);
	list_add(&itm->list, &qos_info->flow_head);


	/* Create or update bearer map */
	/* Create or update bearer map */
	bearer = qmi_rmnet_get_bearer_map(qos_info, new_map.bearer_id);
	bearer = __qmi_rmnet_bearer_get(qos_info, new_map.bearer_id);
	if (bearer) {
		bearer->flow_ref++;
	} else {
		bearer = kzalloc(sizeof(*bearer), GFP_ATOMIC);
	if (!bearer) {
	if (!bearer) {
			spin_unlock_bh(&qos_info->qos_lock);
		rc = -ENOMEM;
			return -ENOMEM;
		goto done;
	}
	}


		bearer->bearer_id = new_map.bearer_id;
		bearer->flow_ref = 1;
		bearer->grant_size = qos_info->default_grant;
		bearer->grant_thresh = qmi_rmnet_grant_per(bearer->grant_size);
		qos_info->default_grant = DEFAULT_GRANT;
		list_add(&bearer->list, &qos_info->bearer_head);
	}
	itm->bearer = bearer;
	itm->bearer = bearer;


	/* Update mq map */
	__qmi_rmnet_update_mq(dev, qos_info, bearer, itm);
	mq_idx = tcm->tcm_handle;
	mq = &qos_info->mq[mq_idx];
	if (!mq->bearer) {
		mq->bearer = bearer;
		mq->ancillary = IS_ANCILLARY(new_map.ip_type);

		qmi_rmnet_flow_control(dev, mq_idx,
				       bearer->grant_size > 0 ? 1 : 0);
		trace_dfc_qmi_tc(dev->name, itm->bearer_id,
				 bearer->grant_size, 0, mq_idx,
				 bearer->grant_size > 0 ? 1 : 0);

	} else if (mq->bearer->bearer_id != new_map.bearer_id) {
		pr_debug("%s: un-managered bearer %u\n",
				__func__, new_map.bearer_id);
	}


done:
	spin_unlock_bh(&qos_info->qos_lock);
	spin_unlock_bh(&qos_info->qos_lock);
	return 0;
	return rc;
}
}


static int
static int
@@ -319,9 +427,6 @@ qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
{
{
	struct qos_info *qos_info = (struct qos_info *)rmnet_get_qos_pt(dev);
	struct qos_info *qos_info = (struct qos_info *)rmnet_get_qos_pt(dev);
	struct rmnet_flow_map new_map, *itm;
	struct rmnet_flow_map new_map, *itm;
	struct rmnet_bearer_map *bearer;
	struct mq_map *mq;
	u32 mq_idx;


	if (!qos_info)
	if (!qos_info)
		return -EINVAL;
		return -EINVAL;
@@ -345,26 +450,7 @@ qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
				    new_map.flow_id, new_map.ip_type,
				    new_map.flow_id, new_map.ip_type,
				    itm->mq_idx, 0);
				    itm->mq_idx, 0);


		bearer = itm->bearer;
		__qmi_rmnet_bearer_put(dev, qos_info, itm->bearer, true);
		if (bearer && --bearer->flow_ref == 0) {
			/* Remove the bearer from mq map */
			for (mq_idx = 0; mq_idx < MAX_MQ_NUM; mq_idx++) {
				mq = &qos_info->mq[mq_idx];
				if (mq->bearer != bearer)
					continue;

				mq->bearer = NULL;
				mq->ancillary = false;
				qmi_rmnet_reset_txq(dev, mq_idx);
				qmi_rmnet_flow_control(dev, mq_idx, 1);
				trace_dfc_qmi_tc(dev->name,
					new_map.bearer_id, 0, 0, mq_idx, 1);
			}

			/* Remove from bearer map */
			list_del(&bearer->list);
			kfree(bearer);
		}


		/* Remove from flow map */
		/* Remove from flow map */
		list_del(&itm->list);
		list_del(&itm->list);
@@ -682,47 +768,91 @@ void qmi_rmnet_burst_fc_check(struct net_device *dev,
}
}
EXPORT_SYMBOL(qmi_rmnet_burst_fc_check);
EXPORT_SYMBOL(qmi_rmnet_burst_fc_check);


int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb)
static bool qmi_rmnet_is_tcp_ack(struct sk_buff *skb)
{
{
	struct qos_info *qos = rmnet_get_qos_pt(dev);
	int txq = 0, ip_type = AF_INET;
	unsigned int len = skb->len;
	unsigned int len = skb->len;
	struct rmnet_flow_map *itm;
	u32 mark = skb->mark;

	if (!qos)
		return 0;

	/* If mark is mq num return it */
	if (dfc_mode == DFC_MODE_MQ_NUM)
		return mark;


	switch (skb->protocol) {
	switch (skb->protocol) {
	/* TCPv4 ACKs */
	/* TCPv4 ACKs */
	case htons(ETH_P_IP):
	case htons(ETH_P_IP):
		ip_type = AF_INET;
		if ((ip_hdr(skb)->protocol == IPPROTO_TCP) &&
		if ((!mark) &&
		    (ip_hdr(skb)->protocol == IPPROTO_TCP) &&
		    (len == 40 || len == 52) &&
		    (ip_hdr(skb)->ihl == 5) &&
		    (ip_hdr(skb)->ihl == 5) &&
		    (len == 40 || len == 52) &&
		    ((tcp_flag_word(tcp_hdr(skb)) & 0xFF00) == TCP_FLAG_ACK))
		    ((tcp_flag_word(tcp_hdr(skb)) & 0xFF00) == TCP_FLAG_ACK))
			return 1;
			return true;
		break;
		break;


	/* TCPv6 ACKs */
	/* TCPv6 ACKs */
	case htons(ETH_P_IPV6):
	case htons(ETH_P_IPV6):
		ip_type = AF_INET6;
		if ((ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) &&
		if ((!mark) &&
		    (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) &&
		    (len == 60 || len == 72) &&
		    (len == 60 || len == 72) &&
		    ((tcp_flag_word(tcp_hdr(skb)) & 0xFF00) == TCP_FLAG_ACK))
		    ((tcp_flag_word(tcp_hdr(skb)) & 0xFF00) == TCP_FLAG_ACK))
			return 1;
			return true;
		/* Fall through */
		break;
	}

	return false;
}

static int qmi_rmnet_get_queue_sa(struct qos_info *qos, struct sk_buff *skb)
{
	struct rmnet_flow_map *itm;
	int ip_type;
	int txq = DEFAULT_MQ_NUM;

	/* Put RS/NS in default mq */
	if (skb->protocol == htons(ETH_P_IPV6) &&
	    ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6 &&
	    (icmp6_hdr(skb)->icmp6_type == 133 ||
	     icmp6_hdr(skb)->icmp6_type == 135)) {
		return DEFAULT_MQ_NUM;
	}

	ip_type = (skb->protocol == htons(ETH_P_IPV6)) ? AF_INET6 : AF_INET;

	spin_lock_bh(&qos->qos_lock);

	itm = qmi_rmnet_get_flow_map(qos, skb->mark, ip_type);
	if (unlikely(!itm))
		goto done;

	/* Put the packet in the assigned mq except TCP ack */
	if (likely(itm->bearer) && qmi_rmnet_is_tcp_ack(skb))
		txq = itm->bearer->ack_mq_idx;
	else
		txq = itm->mq_idx;

done:
	spin_unlock_bh(&qos->qos_lock);
	return txq;
}
}


int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb)
{
	struct qos_info *qos = rmnet_get_qos_pt(dev);
	int txq = 0, ip_type = AF_INET;
	struct rmnet_flow_map *itm;
	u32 mark = skb->mark;

	if (!qos)
		return 0;

	/* If mark is mq num return it */
	if (dfc_mode == DFC_MODE_MQ_NUM)
		return mark;

	if (dfc_mode == DFC_MODE_SA)
		return qmi_rmnet_get_queue_sa(qos, skb);

	/* Default flows */
	/* Default flows */
	if (!mark)
	if (!mark) {
		if (qmi_rmnet_is_tcp_ack(skb))
			return 1;
		else
			return 0;
			return 0;
	}

	ip_type = (skb->protocol == htons(ETH_P_IPV6)) ? AF_INET6 : AF_INET;


	/* Dedicated flows */
	/* Dedicated flows */
	spin_lock_bh(&qos->qos_lock);
	spin_lock_bh(&qos->qos_lock);
@@ -755,7 +885,6 @@ void *qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id)


	qos->mux_id = mux_id;
	qos->mux_id = mux_id;
	qos->real_dev = real_dev;
	qos->real_dev = real_dev;
	qos->default_grant = DEFAULT_GRANT;
	qos->tran_num = 0;
	qos->tran_num = 0;
	INIT_LIST_HEAD(&qos->flow_head);
	INIT_LIST_HEAD(&qos->flow_head);
	INIT_LIST_HEAD(&qos->bearer_head);
	INIT_LIST_HEAD(&qos->bearer_head);
+8 −3
Original line number Original line Diff line number Diff line
@@ -9,14 +9,19 @@
#include <linux/netdevice.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/skbuff.h>


#define MAX_MQ_NUM 10
#define MAX_MQ_NUM 16
#define MAX_CLIENT_NUM 2
#define MAX_CLIENT_NUM 2
#define MAX_FLOW_NUM 32
#define MAX_FLOW_NUM 32
#define DEFAULT_GRANT 1
#define DEFAULT_GRANT 1
#define DFC_MAX_BEARERS_V01 16
#define DFC_MAX_BEARERS_V01 16
#define DEFAULT_MQ_NUM 0
#define ACK_MQ_OFFSET (MAX_MQ_NUM - 1)
#define INVALID_MQ 0xFF


#define DFC_MODE_FLOW_ID 2
#define DFC_MODE_FLOW_ID 2
#define DFC_MODE_MQ_NUM 3
#define DFC_MODE_MQ_NUM 3
#define DFC_MODE_SA 4

extern int dfc_mode;
extern int dfc_mode;
extern int dfc_qmap;
extern int dfc_qmap;


@@ -34,6 +39,8 @@ struct rmnet_bearer_map {
	bool rat_switch;
	bool rat_switch;
	bool tx_off;
	bool tx_off;
	u32 ack_txid;
	u32 ack_txid;
	u32 mq_idx;
	u32 ack_mq_idx;
};
};


struct rmnet_flow_map {
struct rmnet_flow_map {
@@ -53,7 +60,6 @@ struct svc_info {


struct mq_map {
struct mq_map {
	struct rmnet_bearer_map *bearer;
	struct rmnet_bearer_map *bearer;
	bool ancillary;
};
};


struct qos_info {
struct qos_info {
@@ -62,7 +68,6 @@ struct qos_info {
	struct list_head flow_head;
	struct list_head flow_head;
	struct list_head bearer_head;
	struct list_head bearer_head;
	struct mq_map mq[MAX_MQ_NUM];
	struct mq_map mq[MAX_MQ_NUM];
	u32 default_grant;
	u32 tran_num;
	u32 tran_num;
	spinlock_t qos_lock;
	spinlock_t qos_lock;
};
};