Loading drivers/platform/msm/veth_ipa/veth_emac_mgt.c +164 −64 Original line number Original line Diff line number Diff line Loading @@ -123,28 +123,18 @@ int veth_alloc_emac_export_mem( VETH_IPA_DEBUG("%s: physical addr: tx buf mem 0x%x\n", VETH_IPA_DEBUG("%s: physical addr: tx buf mem 0x%x\n", __func__, veth_emac_mem->tx_buf_mem_paddr); __func__, veth_emac_mem->tx_buf_mem_paddr); veth_emac_mem->tx_buff_pool_base = /*transport minimum 4k*/ (uint32_t *)dma_zalloc_coherent(&pdata->pdev->dev, veth_emac_mem->tx_buff_pool_base_va = sizeof(uint32_t) * VETH_TX_DESC_CNT, (uint32_t *)dma_alloc_coherent(&pdata->pdev->dev, sizeof(uint32_t) * (VETH_TX_DESC_CNT * 4), &tx_buf_pool_paddr, &tx_buf_pool_paddr, GFP_KERNEL); GFP_KERNEL | GFP_DMA); if (!veth_emac_mem->tx_buff_pool_base) { if (!veth_emac_mem->tx_buff_pool_base_va) { VETH_IPA_DEBUG("%s: No memory for rx_buf_mem_va\n", __func__); VETH_IPA_DEBUG("%s: No memory for rx_buf_mem_va\n", __func__); goto free_tx_buff_pool_base; goto free_tx_buff_pool_base; } } veth_emac_mem->tx_buff_pool_base[0] = veth_emac_mem->tx_buf_mem_paddr; for (i = 0; i < VETH_TX_DESC_CNT; i++) { veth_emac_mem->tx_buff_pool_base[i] = veth_emac_mem->tx_buff_pool_base[0] + i*VETH_ETH_FRAME_LEN_IPA; VETH_IPA_DEBUG( "%s: veth_emac_mem->tx_buff_pool_base[%d] 0x%x\n", __func__, i, veth_emac_mem->tx_buff_pool_base[i]); } veth_emac_mem->tx_buff_pool_base_pa = tx_buf_pool_paddr; veth_emac_mem->tx_buff_pool_base_pa = tx_buf_pool_paddr; //Allocate RX buffers //Allocate RX buffers Loading @@ -166,28 +156,16 @@ int veth_alloc_emac_export_mem( VETH_IPA_DEBUG("%s: physical addr: rx_buf_mem_addr 0x%x\n", VETH_IPA_DEBUG("%s: physical addr: rx_buf_mem_addr 0x%x\n", __func__, veth_emac_mem->rx_buf_mem_paddr); __func__, veth_emac_mem->rx_buf_mem_paddr); veth_emac_mem->rx_buff_pool_base = veth_emac_mem->rx_buff_pool_base_va = (uint32_t *)dma_zalloc_coherent(&pdata->pdev->dev, (uint32_t *)dma_zalloc_coherent(&pdata->pdev->dev, sizeof(uint32_t) * VETH_RX_DESC_CNT, sizeof(uint32_t) * VETH_RX_DESC_CNT*4, &rx_buf_pool_paddr, &veth_emac_mem->rx_buff_pool_base_pa, GFP_KERNEL); GFP_KERNEL | GFP_DMA); if (!veth_emac_mem->rx_buff_pool_base) { if (!veth_emac_mem->rx_buff_pool_base_va) { VETH_IPA_DEBUG("%s: No memory for rx_buf_mem_va\n", __func__); VETH_IPA_DEBUG("%s: No memory for rx_buf_mem_va\n", __func__); goto free_rx_buff_pool_base; goto free_rx_buff_pool_base; } } veth_emac_mem->rx_buff_pool_base[0] = veth_emac_mem->rx_buf_mem_paddr; for (i = 0; i < VETH_RX_DESC_CNT; i++) { veth_emac_mem->rx_buff_pool_base[i] = veth_emac_mem->rx_buff_pool_base[0] + i*VETH_ETH_FRAME_LEN_IPA; VETH_IPA_DEBUG( "%s: veth_emac_mem->rx_buff_pool_base[%d] 0x%x\n", __func__, i, veth_emac_mem->rx_buff_pool_base[i]); } veth_emac_mem->rx_buff_pool_base_pa = rx_buf_pool_paddr; veth_emac_mem->rx_buff_pool_base_pa = rx_buf_pool_paddr; return 0; return 0; Loading @@ -196,7 +174,7 @@ int veth_alloc_emac_export_mem( free_rx_buff_pool_base: free_rx_buff_pool_base: dma_free_coherent(&pdata->pdev->dev, dma_free_coherent(&pdata->pdev->dev, VETH_ETH_FRAME_LEN_IPA * VETH_RX_DESC_CNT, VETH_ETH_FRAME_LEN_IPA * VETH_RX_DESC_CNT, veth_emac_mem->rx_buff_pool_base, veth_emac_mem->rx_buff_pool_base_va, rx_buf_pool_paddr); rx_buf_pool_paddr); free_rx_buf_mem_va: free_rx_buf_mem_va: dma_free_coherent(&pdata->pdev->dev, dma_free_coherent(&pdata->pdev->dev, Loading @@ -206,7 +184,7 @@ int veth_alloc_emac_export_mem( free_tx_buff_pool_base: free_tx_buff_pool_base: dma_free_coherent(&pdata->pdev->dev, dma_free_coherent(&pdata->pdev->dev, sizeof(uint32_t) * VETH_TX_DESC_CNT, sizeof(uint32_t) * VETH_TX_DESC_CNT, veth_emac_mem->tx_buff_pool_base, veth_emac_mem->tx_buff_pool_base_va, tx_buf_pool_paddr); tx_buf_pool_paddr); free_tx_buf_mem_va: free_tx_buf_mem_va: dma_free_coherent(&pdata->pdev->dev, dma_free_coherent(&pdata->pdev->dev, Loading Loading @@ -239,6 +217,12 @@ int veth_alloc_emac_export_mem( int veth_alloc_emac_dealloc_mem( int veth_alloc_emac_dealloc_mem( struct veth_emac_export_mem *veth_emac_mem, struct veth_ipa_dev *pdata) struct veth_emac_export_mem *veth_emac_mem, struct veth_ipa_dev *pdata) { { /*1. Send stop offload to the BE *2. Receive from BE *4. Free the memory *5. Close the HAB socket ? */ if (veth_emac_mem->rx_buf_mem_va) { if (veth_emac_mem->rx_buf_mem_va) { VETH_IPA_DEBUG("%s: Freeing RX buf mem", __func__); VETH_IPA_DEBUG("%s: Freeing RX buf mem", __func__); dma_free_coherent(&pdata->pdev->dev, dma_free_coherent(&pdata->pdev->dev, Loading Loading @@ -279,21 +263,21 @@ int veth_alloc_emac_dealloc_mem( VETH_IPA_ERROR("%s: TX desc mem not available", __func__); VETH_IPA_ERROR("%s: TX desc mem not available", __func__); } } if (veth_emac_mem->rx_buff_pool_base) { if (veth_emac_mem->rx_buff_pool_base_va) { VETH_IPA_DEBUG("%s: Freeing RX buff pool mem", __func__); VETH_IPA_DEBUG("%s: Freeing RX buff pool mem", __func__); dma_free_coherent(&pdata->pdev->dev, dma_free_coherent(&pdata->pdev->dev, sizeof(uint32_t) * VETH_RX_DESC_CNT, sizeof(uint32_t) * VETH_RX_DESC_CNT, veth_emac_mem->rx_buff_pool_base, veth_emac_mem->rx_buff_pool_base_va, veth_emac_mem->rx_buff_pool_base_pa); veth_emac_mem->rx_buff_pool_base_pa); } else { } else { VETH_IPA_ERROR("%s: RX buff pool base not available", __func__); VETH_IPA_ERROR("%s: RX buff pool base not available", __func__); } } if (veth_emac_mem->tx_buff_pool_base) { if (veth_emac_mem->tx_buff_pool_base_va) { VETH_IPA_DEBUG("%s: Freeing TX buff pool mem", __func__); VETH_IPA_DEBUG("%s: Freeing TX buff pool mem", __func__); dma_free_coherent(&pdata->pdev->dev, dma_free_coherent(&pdata->pdev->dev, sizeof(uint32_t) * VETH_TX_DESC_CNT, sizeof(uint32_t) * VETH_TX_DESC_CNT, veth_emac_mem->tx_buff_pool_base, veth_emac_mem->tx_buff_pool_base_va, veth_emac_mem->tx_buff_pool_base_pa); veth_emac_mem->tx_buff_pool_base_pa); } else { } else { VETH_IPA_ERROR("%s: TX buff pool base not available", __func__); VETH_IPA_ERROR("%s: TX buff pool base not available", __func__); Loading @@ -316,7 +300,7 @@ int veth_emac_ipa_hab_init(int mmid) int ret = 0; int ret = 0; int vc_id = 0; int vc_id = 0; char *pdata_send; char *pdata_send; char *pdata_recv; uint32_t *pdata_recv; uint32_t veth_hab_pdata_size = 32; uint32_t veth_hab_pdata_size = 32; VETH_IPA_INFO("%s: Enter HAB init\n", __func__); VETH_IPA_INFO("%s: Enter HAB init\n", __func__); Loading Loading @@ -358,7 +342,7 @@ int veth_emac_ipa_hab_init(int mmid) /*Receive ACK*/ /*Receive ACK*/ memset(pdata_recv, 1, veth_hab_pdata_size); memset(pdata_recv, 1, veth_hab_pdata_size); VETH_IPA_INFO("%s: Receiving ACK\n", __func__); VETH_IPA_INFO("%s: Receiving ACK\n", __func__); ret = habmm_socket_recv(vc_id, pdata_recv, &veth_hab_pdata_size, 0, 0); ret = habmm_socket_recv(vc_id, &pdata_recv, &veth_hab_pdata_size, 0, 0); if (ret) { if (ret) { VETH_IPA_ERROR("%s: receive failed! ret %d, recv size %d\n", VETH_IPA_ERROR("%s: receive failed! ret %d, recv size %d\n", Loading Loading @@ -543,6 +527,99 @@ static int veth_emac_ipa_hab_export_rx_buf( return ret; return ret; } } /** emac_ipa_hab_export_tx_buf_pool() - This API is called * for exporting the TX buf pool memory to BE driver in QNX host * @vcid: The virtual channel ID between BE and FE driver * * @veth_emac_mem - Contains the virtual and physical addresses * of the exported memory */ int emac_ipa_hab_export_tx_buf_pool( int vc_id, struct veth_emac_export_mem *veth_emac_mem, struct veth_ipa_dev *pdata) { int ret = 0; VETH_IPA_DEBUG("%s: Export TX buf pool memory TO VC_ID %d\n", __func__, vc_id); ret = habmm_export( vc_id, veth_emac_mem->tx_buff_pool_base_va, sizeof(uint32_t) * VETH_TX_DESC_CNT * 4, &veth_emac_mem->exp_id.tx_buf_pool_exp_id, 0); if (ret) { VETH_IPA_ERROR("%s: Export failed %d returned, export id %d\n", __func__, ret, veth_emac_mem->exp_id.tx_buf_pool_exp_id); ret = -1; goto err; } pr_info("%s: Export TX buf pool memory location %p %d\n", __func__, veth_emac_mem->tx_buff_pool_base_va, veth_emac_mem->exp_id.tx_buf_pool_exp_id); return ret; err: veth_alloc_emac_dealloc_mem(veth_emac_mem, pdata); return ret; } /** emac_ipa_hab_export_tx_buf_pool() - This API is called * for exporting the TX buf pool memory to BE driver in QNX host * @vcid: The virtual channel ID between BE and FE driver * * @veth_emac_mem - Contains the virtual and physical addresses * of the exported memory */ int emac_ipa_hab_export_rx_buf_pool( int vc_id, struct veth_emac_export_mem *veth_emac_mem, struct veth_ipa_dev *pdata) { int ret = 0; VETH_IPA_DEBUG("%s: Export RX buf pool memory TO VC_ID %d\n", __func__, vc_id); ret = habmm_export( vc_id, veth_emac_mem->rx_buff_pool_base_va, sizeof(uint32_t) * (VETH_RX_DESC_CNT * 4), &veth_emac_mem->exp_id.rx_buf_pool_exp_id, 0); if (ret) { VETH_IPA_ERROR("%s: Export failed %d returned, export id %d\n", __func__, ret, veth_emac_mem->exp_id.rx_buf_pool_exp_id); ret = -1 ; goto err; } pr_info("%s: Export RX buf pool memory location %p , %d\n", __func__, veth_emac_mem->rx_buff_pool_base_va, veth_emac_mem->exp_id.rx_buf_pool_exp_id); return ret; err: veth_alloc_emac_dealloc_mem(veth_emac_mem, pdata); return ret; } /** veth_emac_ipa_send_exp_id() - This API is used to send the /** veth_emac_ipa_send_exp_id() - This API is used to send the * export IDs of all the exported memory to the BE driver in * export IDs of all the exported memory to the BE driver in * QNX host * QNX host Loading @@ -551,7 +628,7 @@ static int veth_emac_ipa_hab_export_rx_buf( * @veth_emac_mem - Contains the virtual and physical addresses * @veth_emac_mem - Contains the virtual and physical addresses * of the exported memory * of the exported memory */ */ static int veth_emac_ipa_send_exp_id( int veth_emac_ipa_send_exp_id( int vc_id, struct veth_emac_export_mem *veth_emac_mem) int vc_id, struct veth_emac_export_mem *veth_emac_mem) { { int ret = 0; int ret = 0; Loading @@ -560,10 +637,13 @@ static int veth_emac_ipa_send_exp_id( &veth_emac_mem->exp_id, &veth_emac_mem->exp_id, sizeof(veth_emac_mem->exp_id), sizeof(veth_emac_mem->exp_id), NO_FLAGS); NO_FLAGS); VETH_IPA_INFO("Sent export ids to the backend driver"); VETH_IPA_INFO("TX Descriptor export id sent %x", veth_emac_mem->exp_id.tx_desc_exp_id); if (ret) { if (ret) { VETH_IPA_ERROR("%s: Send failed failed %d returned\n", VETH_IPA_ERROR("%s: Send failed failed %d returned\n", __func__, ret); __func__, ret); ret = -1; ret = -1; return ret; return ret; } } Loading @@ -572,7 +652,7 @@ static int veth_emac_ipa_send_exp_id( } } int veth_emac_init(struct veth_emac_export_mem *veth_emac_mem, int veth_emac_init(struct veth_emac_export_mem *veth_emac_mem, struct veth_ipa_dev *pdata) struct veth_ipa_dev *pdata, bool smmu_s2_enb) { { int ret = 0; int ret = 0; Loading Loading @@ -616,6 +696,26 @@ int veth_emac_init(struct veth_emac_export_mem *veth_emac_mem, return -ENOMEM; return -ENOMEM; } } ret = emac_ipa_hab_export_tx_buf_pool(veth_emac_mem->vc_id, veth_emac_mem, pdata); if (ret < 0) { VETH_IPA_ERROR( "HAB export of TX buff pool mem failed, returning error"); return -ENOMEM; } ret = emac_ipa_hab_export_rx_buf_pool(veth_emac_mem->vc_id, veth_emac_mem, pdata); if (ret < 0) { VETH_IPA_ERROR( "HAB export of RX buff pool mem failed, returning error"); return -ENOMEM; } ret = veth_emac_ipa_send_exp_id(veth_emac_mem->vc_id, ret = veth_emac_ipa_send_exp_id(veth_emac_mem->vc_id, veth_emac_mem); veth_emac_mem); Loading drivers/platform/msm/veth_ipa/veth_emac_mgt.h +12 −1 Original line number Original line Diff line number Diff line Loading @@ -20,7 +20,8 @@ int veth_emac_init(struct veth_emac_export_mem *veth_emac_mem, int veth_emac_init(struct veth_emac_export_mem *veth_emac_mem, struct veth_ipa_dev *pdata); struct veth_ipa_dev *pdata, bool smmu_s2_enb); int veth_alloc_emac_export_mem(struct veth_emac_export_mem *veth_emac_mem, int veth_alloc_emac_export_mem(struct veth_emac_export_mem *veth_emac_mem, struct veth_ipa_dev *pdata); struct veth_ipa_dev *pdata); Loading @@ -40,6 +41,16 @@ int veth_emac_open_notify(struct veth_emac_export_mem *veth_emac_mem, struct veth_ipa_dev *pdata); struct veth_ipa_dev *pdata); int veth_emac_ipa_setup_complete(struct veth_emac_export_mem *veth_emac_mem, int veth_emac_ipa_setup_complete(struct veth_emac_export_mem *veth_emac_mem, struct veth_ipa_dev *pdata); struct veth_ipa_dev *pdata); int emac_ipa_hab_export_tx_buf_pool(int vc_id, struct veth_emac_export_mem *veth_emac_mem, struct veth_ipa_dev *pdata); int emac_ipa_hab_export_rx_buf_pool(int vc_id, struct veth_emac_export_mem *veth_emac_mem, struct veth_ipa_dev *pdata); int veth_emac_ipa_send_exp_id(int vc_id, struct veth_emac_export_mem *veth_emac_mem); #endif /* _VETH_EMAC_MGT_H_ */ #endif /* _VETH_EMAC_MGT_H_ */ drivers/platform/msm/veth_ipa/veth_ipa.c +223 −113 Original line number Original line Diff line number Diff line Loading @@ -330,13 +330,15 @@ int veth_set_ul_dl_smmu_ipa_params(struct veth_ipa_dev *pdata, return -EINVAL; return -EINVAL; } } /*Configure SGT for UL ring base*/ ul->ring_base_sgt = kzalloc(sizeof(ul->ring_base_sgt), GFP_KERNEL); ul->ring_base_sgt = kzalloc(sizeof(ul->ring_base_sgt), GFP_KERNEL); if (!ul->ring_base_sgt) if (!ul->ring_base_sgt) return -ENOMEM; return -ENOMEM; ret = dma_get_sgtable(&pdata->pdev->dev, ul->ring_base_sgt, ret = dma_get_sgtable(&pdata->pdev->dev, ul->ring_base_sgt, veth_emac_mem->rx_desc_mem_va, veth_emac_mem->rx_desc_mem_va, ul->ring_base_iova, veth_emac_mem->rx_desc_mem_paddr, (sizeof(struct s_RX_NORMAL_DESC) * (sizeof(struct s_RX_NORMAL_DESC) * VETH_RX_DESC_CNT)); VETH_RX_DESC_CNT)); if (ret) { if (ret) { Loading @@ -346,8 +348,16 @@ int veth_set_ul_dl_smmu_ipa_params(struct veth_ipa_dev *pdata, return -EAGAIN; return -EAGAIN; } } /*get pa*/ ul->ring_base_pa = sg_phys(ul->ring_base_sgt->sgl); ul->ring_base_pa = sg_phys(ul->ring_base_sgt->sgl); VETH_IPA_INFO( "%s:\n ul->ring_base_sgt = 0x%px , ul->ring_base_pa =0x%lx\n", __func__, ul->ring_base_sgt, ul->ring_base_pa); /*configure SGT for UL buff pool base*/ ul->buff_pool_base_sgt = kzalloc( ul->buff_pool_base_sgt = kzalloc( sizeof(ul->buff_pool_base_sgt), GFP_KERNEL); sizeof(ul->buff_pool_base_sgt), GFP_KERNEL); Loading @@ -356,11 +366,14 @@ int veth_set_ul_dl_smmu_ipa_params(struct veth_ipa_dev *pdata, return -ENOMEM; return -ENOMEM; } } ret = dma_get_sgtable(&pdata->pdev->dev, ul->buff_pool_base_sgt, ret = dma_get_sgtable(ipa_get_dma_dev(), veth_emac_mem->rx_buf_mem_va, ul->buff_pool_base_sgt, ul->buff_pool_base_iova, veth_emac_mem->rx_buff_pool_base_va, (sizeof(struct s_RX_NORMAL_DESC) * veth_emac_mem->rx_buff_pool_base_iova, VETH_RX_DESC_CNT)); (sizeof(uint32_t) * VETH_RX_DESC_CNT * 4) ); /*using ipa dev node for buff pool*/ /*overallocating to satisfy hab page alignment*/ if (ret) { if (ret) { VETH_IPA_ERROR("Failed to get IPA UL buff pool sgtable.\n"); VETH_IPA_ERROR("Failed to get IPA UL buff pool sgtable.\n"); kfree(ul->ring_base_sgt); kfree(ul->ring_base_sgt); Loading @@ -370,12 +383,21 @@ int veth_set_ul_dl_smmu_ipa_params(struct veth_ipa_dev *pdata, } } ul->buff_pool_base_pa = sg_phys(ul->buff_pool_base_sgt->sgl); ul->buff_pool_base_pa = sg_phys(ul->buff_pool_base_sgt->sgl); veth_emac_mem->rx_buff_pool_base_pa = ul->buff_pool_base_pa; VETH_IPA_INFO( "%s:\n ul->buff_pool_base_sgt = 0x%px,ul->buff_pool_base_pa =0x%lx\n", __func__, ul->buff_pool_base_sgt, ul->buff_pool_base_pa); /*Configure SGT for DL ring base*/ dl->ring_base_sgt = kzalloc(sizeof(dl->ring_base_sgt), GFP_KERNEL); dl->ring_base_sgt = kzalloc(sizeof(dl->ring_base_sgt), GFP_KERNEL); if (!dl->ring_base_sgt) if (!dl->ring_base_sgt) return -ENOMEM; return -ENOMEM; ret = dma_get_sgtable(&pdata->pdev->dev, dl->ring_base_sgt, ret = dma_get_sgtable(&pdata->pdev->dev, dl->ring_base_sgt, veth_emac_mem->tx_desc_mem_va, veth_emac_mem->tx_desc_mem_va, veth_emac_mem->tx_desc_mem_paddr, veth_emac_mem->tx_desc_mem_paddr, (sizeof(struct s_TX_NORMAL_DESC) * (sizeof(struct s_TX_NORMAL_DESC) * Loading @@ -390,17 +412,24 @@ int veth_set_ul_dl_smmu_ipa_params(struct veth_ipa_dev *pdata, } } dl->ring_base_pa = sg_phys(dl->ring_base_sgt->sgl); dl->ring_base_pa = sg_phys(dl->ring_base_sgt->sgl); VETH_IPA_INFO( "%s:\n dl->ring_base_sgt = 0x%px , dl->ring_base_pa =0x%lx\n", __func__, dl->ring_base_sgt, dl->ring_base_pa); /*configure SGT for DL buff pool base*/ dl->buff_pool_base_sgt = kzalloc( dl->buff_pool_base_sgt = kzalloc( sizeof(dl->buff_pool_base_sgt), GFP_KERNEL); sizeof(dl->buff_pool_base_sgt), GFP_KERNEL); if (!dl->buff_pool_base_sgt) if (!dl->buff_pool_base_sgt) return -ENOMEM; return -ENOMEM; ret = dma_get_sgtable(&pdata->pdev->dev, dl->buff_pool_base_sgt, ret = dma_get_sgtable(ipa_get_dma_dev(), veth_emac_mem->tx_buf_mem_va, dl->buff_pool_base_sgt, veth_emac_mem->tx_buf_mem_paddr, veth_emac_mem->tx_buff_pool_base_va, (sizeof(struct s_TX_NORMAL_DESC) * veth_emac_mem->tx_buff_pool_base_iova, VETH_TX_DESC_CNT)); (sizeof(uint32_t) * VETH_TX_DESC_CNT * 4) ); if (ret) { if (ret) { VETH_IPA_ERROR("Failed to get IPA DL buff pool sgtable.\n"); VETH_IPA_ERROR("Failed to get IPA DL buff pool sgtable.\n"); kfree(ul->ring_base_sgt); kfree(ul->ring_base_sgt); Loading @@ -412,6 +441,13 @@ int veth_set_ul_dl_smmu_ipa_params(struct veth_ipa_dev *pdata, } } dl->buff_pool_base_pa = sg_phys(dl->buff_pool_base_sgt->sgl); dl->buff_pool_base_pa = sg_phys(dl->buff_pool_base_sgt->sgl); veth_emac_mem->tx_buff_pool_base_pa = dl->buff_pool_base_pa; VETH_IPA_INFO( "%s:dl->buff_pool_base_sgt = 0x%px , dl->buff_pool_base_pa =0x%lx", __func__, dl->buff_pool_base_sgt, dl->buff_pool_base_pa); return ret; return ret; } } Loading @@ -433,24 +469,45 @@ static int veth_map_rx_tx_setup_info_params( else else rx_setup_info->smmu_enabled = false; rx_setup_info->smmu_enabled = false; rx_setup_info->smmu_enabled = false; /* RX Descriptor Base Physical Address*/ /* RX Descriptor Base Physical Address*/ if (!rx_setup_info->smmu_enabled) if (!rx_setup_info->smmu_enabled) rx_setup_info->ring_base_pa = veth_emac_mem->rx_desc_mem_paddr; rx_setup_info->ring_base_pa = veth_emac_mem->rx_desc_mem_paddr; rx_setup_info->ring_base_pa = veth_emac_mem->rx_desc_mem_paddr; /* RX Descriptor Base Virtual Address*/ /* RX Descriptor Base Virtual Address*/ rx_setup_info->ring_base_iova = veth_emac_mem->rx_desc_mem_paddr; if (rx_setup_info->smmu_enabled) rx_setup_info->ring_base_iova = veth_emac_mem->rx_desc_mem_iova; /* RX Descriptor Count*/ /* RX Descriptor Count*/ rx_setup_info->ntn_ring_size = VETH_RX_DESC_CNT; rx_setup_info->ntn_ring_size = VETH_RX_DESC_CNT; /* RX Buf pool base*/ if (!rx_setup_info->smmu_enabled) { rx_setup_info->buff_pool_base_pa = rx_setup_info->buff_pool_base_pa = veth_emac_mem->rx_buff_pool_base_pa; veth_emac_mem->rx_buff_pool_base_pa; } /*this may cause issues after smmu?*/ rx_setup_info->buff_pool_base_iova = rx_setup_info->buff_pool_base_iova = veth_emac_mem->rx_buff_pool_base_pa; veth_emac_mem->rx_buff_pool_base_iova; /* Assign IPA to pa*/ /*Map TX Buff Pool*/ if (emac_emb_smmu_ctx.valid) { /*store rx buf mem iova into buff pool addresses*/ veth_emac_mem->rx_buff_pool_base_va[0] = veth_emac_mem->rx_buf_mem_iova; } else { /*store rx buf mem p addr into buff pool addresse*/ veth_emac_mem->rx_buff_pool_base_va[0] = veth_emac_mem->rx_buf_mem_paddr; } for (i = 0; i < VETH_RX_DESC_CNT; i++) { veth_emac_mem->rx_buff_pool_base_va[i] = veth_emac_mem->rx_buff_pool_base_va[0] + i*VETH_ETH_FRAME_LEN_IPA; VETH_IPA_DEBUG( "%s: veth_emac_mem->rx_buff_pool_base[%d] 0x%x\n", __func__, i, veth_emac_mem->rx_buff_pool_base_va[i]); } /*RX buffer Count*/ /*RX buffer Count*/ rx_setup_info->num_buffers = VETH_RX_DESC_CNT - 1; rx_setup_info->num_buffers = VETH_RX_DESC_CNT - 1; Loading @@ -464,20 +521,26 @@ static int veth_map_rx_tx_setup_info_params( else else tx_setup_info->smmu_enabled = false; tx_setup_info->smmu_enabled = false; tx_setup_info->smmu_enabled = false; if (!tx_setup_info->smmu_enabled) tx_setup_info->ring_base_pa = tx_setup_info->ring_base_pa = veth_emac_mem->tx_desc_mem_paddr; veth_emac_mem->tx_desc_mem_paddr; /* TX Descriptor Base Virtual Address*/ /* TX Descriptor Base Virtual Address*/ tx_setup_info->ring_base_iova = veth_emac_mem->tx_desc_mem_paddr; if (tx_setup_info->smmu_enabled) tx_setup_info->ring_base_iova = veth_emac_mem->tx_desc_mem_iova; /* TX Descriptor Count*/ /* TX Descriptor Count*/ tx_setup_info->ntn_ring_size = VETH_TX_DESC_CNT; tx_setup_info->ntn_ring_size = VETH_TX_DESC_CNT; tx_setup_info->buff_pool_base_pa = veth_emac_mem->tx_buff_pool_base_pa; /* Tx Buf pool base*/ if (!tx_setup_info->smmu_enabled) { tx_setup_info->buff_pool_base_pa = veth_emac_mem->tx_buff_pool_base_pa; } tx_setup_info->buff_pool_base_iova = tx_setup_info->buff_pool_base_iova = veth_emac_mem->tx_buff_pool_base_pa; veth_emac_mem->tx_buff_pool_base_iova; /* TX buffer Count*/ /* TX buffer Count*/ tx_setup_info->num_buffers = VETH_TX_DESC_CNT-1; tx_setup_info->num_buffers = VETH_TX_DESC_CNT-1; Loading @@ -485,76 +548,93 @@ static int veth_map_rx_tx_setup_info_params( /* TX Frame length */ /* TX Frame length */ tx_setup_info->data_buff_size = VETH_ETH_FRAME_LEN_IPA; tx_setup_info->data_buff_size = VETH_ETH_FRAME_LEN_IPA; /* Allocate RX Buff List*/ /*Map TX Buff Pool*/ //Todo: Free this correctly if (emac_emb_smmu_ctx.valid) { rx_setup_info->data_buff_list = kcalloc(rx_setup_info->num_buffers, /*store tx buf iova addr in buff pool addresses*/ sizeof(struct ntn_buff_smmu_map), GFP_KERNEL); /*store tx buf p addr in buff pool addresses*/ if (rx_setup_info->data_buff_list == NULL) { veth_emac_mem->tx_buff_pool_base_va[0] = ret = -ENOMEM; veth_emac_mem->tx_buf_mem_iova; return ret; } else { veth_emac_mem->tx_buff_pool_base_va[0] = veth_emac_mem->tx_buf_mem_paddr; } for (i = 0; i < VETH_TX_DESC_CNT; i++) { veth_emac_mem->tx_buff_pool_base_va[i] = veth_emac_mem->tx_buff_pool_base_va[0] + i*VETH_ETH_FRAME_LEN_IPA; VETH_IPA_DEBUG( "%s: veth_emac_mem->tx_buff_pool_base[%d] 0x%x\n", __func__, i, veth_emac_mem->tx_buff_pool_base_va[i]); } } /* Allocate TX Buff List*/ //Todo: Free this correctly /* Allocate and Populate RX Buff List*/ tx_setup_info->data_buff_list = kcalloc(tx_setup_info->num_buffers, rx_setup_info->data_buff_list = kcalloc(rx_setup_info->num_buffers, sizeof(struct ntn_buff_smmu_map), GFP_KERNEL); sizeof(struct ntn_buff_smmu_map), GFP_KERNEL); if (tx_setup_info->data_buff_list == NULL) { if (rx_setup_info->data_buff_list == NULL) { ret = -ENOMEM; ret = -ENOMEM; return ret; return ret; } } /*Populate RX Buff list. */ rx_setup_info->data_buff_list[0].iova = veth_emac_mem->rx_buff_pool_base[0]; if (!rx_setup_info->smmu_enabled) { if (!rx_setup_info->smmu_enabled) { /* this case we use p addr in rx_buff_pool_base[0]*/ rx_setup_info->data_buff_list[0].pa = rx_setup_info->data_buff_list[0].pa = rx_setup_info->data_buff_list[0].iova; veth_emac_mem->rx_buf_mem_paddr; //VETH_IPA_DEBUG //("rx_setup_info->data_buff_list[0].pa = 0x%lx", // rx_setup_info->data_buff_list[0].pa); } else { } else { rx_setup_info->data_buff_list[0].pa = rx_setup_info->data_buff_list[0].pa = veth_emac_mem->rx_buf_mem_paddr; veth_emac_mem->rx_buf_mem_paddr; //VETH_IPA_DEBUG //("rx_setup_info->data_buff_list[0].pa = 0x%lx", rx_setup_info->data_buff_list[0].iova = // rx_setup_info->data_buff_list[0].pa); veth_emac_mem->rx_buf_mem_iova; } } for (i = 0; i <= rx_setup_info->num_buffers; i++) { for (i = 0; i <= rx_setup_info->num_buffers; i++) { rx_setup_info->data_buff_list[i].iova = rx_setup_info->data_buff_list[i].iova = veth_emac_mem->rx_buff_pool_base[i]; rx_setup_info->data_buff_list[0].iova + i*VETH_ETH_FRAME_LEN_IPA; rx_setup_info->data_buff_list[i].pa = rx_setup_info->data_buff_list[i].pa = veth_emac_mem->rx_buff_pool_base[i]; rx_setup_info->data_buff_list[0].pa + //VETH_IPA_DEBUG i*VETH_ETH_FRAME_LEN_IPA; //("rx_setup_info->data_buff_list[%d].pa = 0x%lx", // veth_emac_mem->rx_buf_mem_paddr[i]; // i, rx_setup_info->data_buff_list[i].pa); VETH_IPA_INFO("rx_setup_info->data_buff_list[%d].iova = 0x%lx", i, rx_setup_info->data_buff_list[i].iova); VETH_IPA_INFO("rx_setup_info->data_buff_list[%d].pa = 0x%lx", i, rx_setup_info->data_buff_list[i].pa); } } /*Populate TX Buff list. */ tx_setup_info->data_buff_list[0].iova = /* Allocate and Populate TX Buff List*/ veth_emac_mem->tx_buff_pool_base[0]; tx_setup_info->data_buff_list = kcalloc(tx_setup_info->num_buffers, sizeof(struct ntn_buff_smmu_map), GFP_KERNEL); if (tx_setup_info->data_buff_list == NULL) { ret = -ENOMEM; return ret; } if (!tx_setup_info->smmu_enabled) { if (!tx_setup_info->smmu_enabled) { /* this case we use p addr in rx_buff_pool_base[0]*/ tx_setup_info->data_buff_list[0].pa = tx_setup_info->data_buff_list[0].pa = tx_setup_info->data_buff_list[0].iova; veth_emac_mem->tx_buf_mem_paddr; //VETH_IPA_DEBUG //("tx_setup_info->data_buff_list[0].pa = 0x%lx", // tx_setup_info->data_buff_list[0].pa); } else { } else { tx_setup_info->data_buff_list[0].pa = tx_setup_info->data_buff_list[0].pa = veth_emac_mem->tx_buf_mem_paddr; veth_emac_mem->tx_buf_mem_paddr; //VETH_IPA_INFO //("tx_setup_info->data_buff_list[0].pa = 0x%lx", //tx_setup_info->data_buff_list[0].pa); } tx_setup_info->data_buff_list[0].iova = veth_emac_mem->tx_buf_mem_iova; } for (i = 0; i <= tx_setup_info->num_buffers; i++) { for (i = 0; i <= tx_setup_info->num_buffers; i++) { tx_setup_info->data_buff_list[i].iova = tx_setup_info->data_buff_list[i].iova = veth_emac_mem->tx_buff_pool_base[i]; tx_setup_info->data_buff_list[0].iova + i*VETH_ETH_FRAME_LEN_IPA; tx_setup_info->data_buff_list[i].pa = tx_setup_info->data_buff_list[i].pa = veth_emac_mem->tx_buff_pool_base[i]; tx_setup_info->data_buff_list[0].pa + //VETH_IPA_DEBUG( i*VETH_ETH_FRAME_LEN_IPA; //"tx_setup_info->data_buff_list[%d].pa = 0x%lx", VETH_IPA_INFO("tx_setup_info->data_buff_list[%d].iova = 0x%lx", // i,tx_setup_info->data_buff_list[i].pa); i, tx_setup_info->data_buff_list[i].iova); VETH_IPA_INFO("tx_setup_info->data_buff_list[%d].pa = 0x%lx", i, tx_setup_info->data_buff_list[i].pa); } } return ret; return ret; Loading @@ -572,10 +652,10 @@ int veth_ipa_offload_connect(struct veth_ipa_dev *pdata) int ret = 0; int ret = 0; /* Hard code SMMU Disable for PHASE 1*/ /* Hard code SMMU Enable for PHASE 1*/ emac_emb_smmu_ctx.valid = false; emac_emb_smmu_ctx.valid = true; VETH_IPA_DEBUG("%s - begin smmu_s2_enb=%d\n", __func__, VETH_IPA_DEBUG("%s - begin\n", __func__); emac_emb_smmu_ctx.valid); if (!pdata) { if (!pdata) { VETH_IPA_ERROR("Null Param %s\n", __func__); VETH_IPA_ERROR("Null Param %s\n", __func__); Loading Loading @@ -947,7 +1027,9 @@ static void veth_ipa_offload_event_handler( VETH_IPA_DEBUG("%s - veth_emac_init\n", VETH_IPA_DEBUG("%s - veth_emac_init\n", __func__); __func__); ret = veth_emac_init(&(pdata->veth_emac_mem), pdata); ret = veth_emac_init(&(pdata->veth_emac_mem), pdata, emac_emb_smmu_ctx.valid); if (ret) { if (ret) { pr_err("%s: veth_alloc_emac_export_mem failed error %d", pr_err("%s: veth_alloc_emac_export_mem failed error %d", __func__, __func__, Loading Loading @@ -1361,32 +1443,59 @@ static int veth_ipa_uc_ready(struct veth_ipa_dev *pdata) */ */ static int veth_ipa_emac_evt_mgmt(void *arg) static int veth_ipa_emac_evt_mgmt(void *arg) { { /*Wait on HAV receive here*/ int ret = 0; int ret = 0; int timeout_ms = 100; int timeout_ms = 100; int pdata_recv = 0; struct emac_hab_mm_message pdata_recv; int pdate_size = sizeof(pdata_recv); //veth_emac_import_iova msg; int pdata_size = sizeof(pdata_recv); struct veth_ipa_dev *pdata = (struct veth_ipa_dev *)arg; struct veth_ipa_dev *pdata = (struct veth_ipa_dev *)arg; //memset(&msg, 0, sizeof(struct veth_emac_import_iova) ); VETH_IPA_INFO("%s: vc_id %d\n", __func__, pdata->veth_emac_mem.vc_id); VETH_IPA_INFO("%s: vc_id %d\n", __func__, pdata->veth_emac_mem.vc_id); while (1) { while (1) { ret = habmm_socket_recv(pdata->veth_emac_mem.vc_id, ret = habmm_socket_recv(pdata->veth_emac_mem.vc_id, &pdata_recv, &pdata_recv, &pdate_size, &pdata_size, timeout_ms, timeout_ms, HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING); 0x0); VETH_IPA_INFO("EVENT ID Received: %x", pdata_recv.event_id); if (!ret) { if (!ret) { VETH_IPA_INFO("%s: pdata_recv %d\n", __func__, VETH_IPA_INFO("%s: msg->event_id %d\n", __func__, pdata_recv); pdata_recv); switch (pdata_recv.event_id) { switch (pdata_recv) { case EV_IPA_EMAC_INIT: case EV_IPA_EMAC_INIT: /* To avoid spurious events, possibly not required once state * machine is available */ if (!pdata->prv_ipa.emac_init) { if (!pdata->prv_ipa.emac_init) { VETH_IPA_INFO("EMAC_INIT\n"); VETH_IPA_INFO("EMAC_INIT event received\n"); pr_info("%s: emac_init set to true ", __func__); veth_ipa_emac_init_done_cb(pdata); veth_ipa_emac_init_done_cb(pdata); pdata->prv_ipa.emac_init = true; pdata->prv_ipa.emac_init = true; } } break; break; case EV_IPA_EMAC_SETUP: case EV_IPA_EMAC_SETUP: /*use memcpy_s later instead*/ pdata->veth_emac_mem.tx_desc_mem_iova = (dma_addr_t) pdata_recv.msg_type.iova.tx_desc_mem_iova; pdata->veth_emac_mem.rx_desc_mem_iova = (dma_addr_t) pdata_recv.msg_type.iova.rx_desc_mem_iova; pdata->veth_emac_mem.tx_buf_mem_iova = (dma_addr_t) pdata_recv.msg_type.iova.tx_buf_mem_iova; pdata->veth_emac_mem.rx_buf_mem_iova = (dma_addr_t) pdata_recv.msg_type.iova.rx_buf_mem_iova; pdata->veth_emac_mem.tx_buff_pool_base_iova = (dma_addr_t) pdata_recv.msg_type.iova.tx_buf_pool_base_iova; pdata->veth_emac_mem.rx_buff_pool_base_iova = (dma_addr_t) pdata_recv.msg_type.iova.rx_buf_pool_base_iova; VETH_IPA_INFO("EMAC_SETUP event received\n"); VETH_IPA_INFO("EMAC_SETUP event received\n"); VETH_IPA_INFO("union received: %x", pdata->veth_emac_mem.tx_buff_pool_base_iova); veth_ipa_emac_setup_done_cb(pdata); veth_ipa_emac_setup_done_cb(pdata); break; break; case EV_PHY_LINK_UP: case EV_PHY_LINK_UP: Loading @@ -1408,6 +1517,7 @@ static int veth_ipa_emac_evt_mgmt(void *arg) } } } } } } //kfree(msg); return 0; return 0; } } /** /** Loading Loading @@ -1817,7 +1927,7 @@ static int veth_ipa_stop(struct net_device *net) VETH_IPA_DEBUG("network device stopped\n"); VETH_IPA_DEBUG("network device stopped\n"); if (pdata->prv_ipa.ipa_uc_ready) { if (pdata->prv_ipa.ipa_uc_ready) { pr_info("%s: veth_ipa_stop veth_disable_ipa_offload", pr_info("%s: veth_disable_ipa_offload", __func__); __func__); veth_disable_ipa_offload(pdata); veth_disable_ipa_offload(pdata); ipa_uc_offload_dereg_rdyCB(IPA_UC_NTN); ipa_uc_offload_dereg_rdyCB(IPA_UC_NTN); Loading @@ -1835,7 +1945,7 @@ static int veth_ipa_stop(struct net_device *net) //HAB call for BE driver in the mutex lock causes a deadlock //HAB call for BE driver in the mutex lock causes a deadlock ret = veth_emac_stop_offload(&(pdata->veth_emac_mem), pdata); ret = veth_emac_stop_offload(&(pdata->veth_emac_mem), pdata); if (ret < 0) { if (ret < 0) { pr_err("%s: veth_emac_stop_offload failed", __func__); pr_err("%s: failed", __func__); return ret; return ret; } } Loading drivers/platform/msm/veth_ipa/veth_ipa.h +32 −10 Original line number Original line Diff line number Diff line Loading @@ -119,7 +119,7 @@ static void *ipa_veth_logbuf; #define VETH_TX_DESC_CNT 256 /*la uses 128*/ #define VETH_TX_DESC_CNT 256 /*la uses 128*/ /*IPA can support 2KB max pkt length*/ /*IPA can support 2KB max pkt length*/ #define VETH_ETH_FRAME_LEN_IPA (1<<11) #define VETH_ETH_FRAME_LEN_IPA (1<<12) #define VETH_IPA_LOCK() mutex_lock(&pdata->prv_ipa.ipa_lock) #define VETH_IPA_LOCK() mutex_lock(&pdata->prv_ipa.ipa_lock) #define VETH_IPA_UNLOCK() mutex_unlock(&pdata->prv_ipa.ipa_lock) #define VETH_IPA_UNLOCK() mutex_unlock(&pdata->prv_ipa.ipa_lock) Loading Loading @@ -158,6 +158,8 @@ struct veth_emac_exp { uint32_t rx_desc_exp_id; uint32_t rx_desc_exp_id; uint32_t tx_buff_exp_id; uint32_t tx_buff_exp_id; uint32_t rx_buff_exp_id; uint32_t rx_buff_exp_id; uint32_t rx_buf_pool_exp_id; uint32_t tx_buf_pool_exp_id; int event_id; int event_id; }; }; Loading @@ -172,7 +174,7 @@ struct veth_emac_export_mem { dma_addr_t tx_buf_mem_paddr; dma_addr_t tx_buf_mem_paddr; dma_addr_t tx_buf_mem_iova; dma_addr_t tx_buf_mem_iova; uint32_t *tx_buff_pool_base; uint32_t *tx_buff_pool_base_va; dma_addr_t tx_buff_pool_base_iova; dma_addr_t tx_buff_pool_base_iova; dma_addr_t tx_buff_pool_base_pa; dma_addr_t tx_buff_pool_base_pa; Loading @@ -186,7 +188,7 @@ struct veth_emac_export_mem { dma_addr_t rx_buf_mem_paddr; dma_addr_t rx_buf_mem_paddr; dma_addr_t rx_buf_mem_iova; dma_addr_t rx_buf_mem_iova; uint32_t *rx_buff_pool_base; uint32_t *rx_buff_pool_base_va; dma_addr_t rx_buff_pool_base_iova; dma_addr_t rx_buff_pool_base_iova; dma_addr_t rx_buff_pool_base_pa; dma_addr_t rx_buff_pool_base_pa; Loading Loading @@ -405,6 +407,26 @@ struct emac_emb_smmu_cb_ctx { int ret; int ret; }; }; /* Maintain Order same on FE*/ struct emac_ipa_iovas { /*iova addresses*/ void *tx_desc_mem_iova; void *tx_buf_mem_iova; void *tx_buf_pool_base_iova; void *rx_desc_mem_iova; void *rx_buf_mem_iova; void *rx_buf_pool_base_iova; }; struct emac_hab_mm_message { int event_id; union msg_type { struct emac_ipa_iovas iova; } msg_type; }; #define GET_MEM_PDEV_DEV (emac_emb_smmu_ctx.valid ? \ #define GET_MEM_PDEV_DEV (emac_emb_smmu_ctx.valid ? \ &emac_emb_smmu_ctx.smmu_pdev->dev : ¶ms->pdev->dev) &emac_emb_smmu_ctx.smmu_pdev->dev : ¶ms->pdev->dev) Loading Loading
drivers/platform/msm/veth_ipa/veth_emac_mgt.c +164 −64 Original line number Original line Diff line number Diff line Loading @@ -123,28 +123,18 @@ int veth_alloc_emac_export_mem( VETH_IPA_DEBUG("%s: physical addr: tx buf mem 0x%x\n", VETH_IPA_DEBUG("%s: physical addr: tx buf mem 0x%x\n", __func__, veth_emac_mem->tx_buf_mem_paddr); __func__, veth_emac_mem->tx_buf_mem_paddr); veth_emac_mem->tx_buff_pool_base = /*transport minimum 4k*/ (uint32_t *)dma_zalloc_coherent(&pdata->pdev->dev, veth_emac_mem->tx_buff_pool_base_va = sizeof(uint32_t) * VETH_TX_DESC_CNT, (uint32_t *)dma_alloc_coherent(&pdata->pdev->dev, sizeof(uint32_t) * (VETH_TX_DESC_CNT * 4), &tx_buf_pool_paddr, &tx_buf_pool_paddr, GFP_KERNEL); GFP_KERNEL | GFP_DMA); if (!veth_emac_mem->tx_buff_pool_base) { if (!veth_emac_mem->tx_buff_pool_base_va) { VETH_IPA_DEBUG("%s: No memory for rx_buf_mem_va\n", __func__); VETH_IPA_DEBUG("%s: No memory for rx_buf_mem_va\n", __func__); goto free_tx_buff_pool_base; goto free_tx_buff_pool_base; } } veth_emac_mem->tx_buff_pool_base[0] = veth_emac_mem->tx_buf_mem_paddr; for (i = 0; i < VETH_TX_DESC_CNT; i++) { veth_emac_mem->tx_buff_pool_base[i] = veth_emac_mem->tx_buff_pool_base[0] + i*VETH_ETH_FRAME_LEN_IPA; VETH_IPA_DEBUG( "%s: veth_emac_mem->tx_buff_pool_base[%d] 0x%x\n", __func__, i, veth_emac_mem->tx_buff_pool_base[i]); } veth_emac_mem->tx_buff_pool_base_pa = tx_buf_pool_paddr; veth_emac_mem->tx_buff_pool_base_pa = tx_buf_pool_paddr; //Allocate RX buffers //Allocate RX buffers Loading @@ -166,28 +156,16 @@ int veth_alloc_emac_export_mem( VETH_IPA_DEBUG("%s: physical addr: rx_buf_mem_addr 0x%x\n", VETH_IPA_DEBUG("%s: physical addr: rx_buf_mem_addr 0x%x\n", __func__, veth_emac_mem->rx_buf_mem_paddr); __func__, veth_emac_mem->rx_buf_mem_paddr); veth_emac_mem->rx_buff_pool_base = veth_emac_mem->rx_buff_pool_base_va = (uint32_t *)dma_zalloc_coherent(&pdata->pdev->dev, (uint32_t *)dma_zalloc_coherent(&pdata->pdev->dev, sizeof(uint32_t) * VETH_RX_DESC_CNT, sizeof(uint32_t) * VETH_RX_DESC_CNT*4, &rx_buf_pool_paddr, &veth_emac_mem->rx_buff_pool_base_pa, GFP_KERNEL); GFP_KERNEL | GFP_DMA); if (!veth_emac_mem->rx_buff_pool_base) { if (!veth_emac_mem->rx_buff_pool_base_va) { VETH_IPA_DEBUG("%s: No memory for rx_buf_mem_va\n", __func__); VETH_IPA_DEBUG("%s: No memory for rx_buf_mem_va\n", __func__); goto free_rx_buff_pool_base; goto free_rx_buff_pool_base; } } veth_emac_mem->rx_buff_pool_base[0] = veth_emac_mem->rx_buf_mem_paddr; for (i = 0; i < VETH_RX_DESC_CNT; i++) { veth_emac_mem->rx_buff_pool_base[i] = veth_emac_mem->rx_buff_pool_base[0] + i*VETH_ETH_FRAME_LEN_IPA; VETH_IPA_DEBUG( "%s: veth_emac_mem->rx_buff_pool_base[%d] 0x%x\n", __func__, i, veth_emac_mem->rx_buff_pool_base[i]); } veth_emac_mem->rx_buff_pool_base_pa = rx_buf_pool_paddr; veth_emac_mem->rx_buff_pool_base_pa = rx_buf_pool_paddr; return 0; return 0; Loading @@ -196,7 +174,7 @@ int veth_alloc_emac_export_mem( free_rx_buff_pool_base: free_rx_buff_pool_base: dma_free_coherent(&pdata->pdev->dev, dma_free_coherent(&pdata->pdev->dev, VETH_ETH_FRAME_LEN_IPA * VETH_RX_DESC_CNT, VETH_ETH_FRAME_LEN_IPA * VETH_RX_DESC_CNT, veth_emac_mem->rx_buff_pool_base, veth_emac_mem->rx_buff_pool_base_va, rx_buf_pool_paddr); rx_buf_pool_paddr); free_rx_buf_mem_va: free_rx_buf_mem_va: dma_free_coherent(&pdata->pdev->dev, dma_free_coherent(&pdata->pdev->dev, Loading @@ -206,7 +184,7 @@ int veth_alloc_emac_export_mem( free_tx_buff_pool_base: free_tx_buff_pool_base: dma_free_coherent(&pdata->pdev->dev, dma_free_coherent(&pdata->pdev->dev, sizeof(uint32_t) * VETH_TX_DESC_CNT, sizeof(uint32_t) * VETH_TX_DESC_CNT, veth_emac_mem->tx_buff_pool_base, veth_emac_mem->tx_buff_pool_base_va, tx_buf_pool_paddr); tx_buf_pool_paddr); free_tx_buf_mem_va: free_tx_buf_mem_va: dma_free_coherent(&pdata->pdev->dev, dma_free_coherent(&pdata->pdev->dev, Loading Loading @@ -239,6 +217,12 @@ int veth_alloc_emac_export_mem( int veth_alloc_emac_dealloc_mem( int veth_alloc_emac_dealloc_mem( struct veth_emac_export_mem *veth_emac_mem, struct veth_ipa_dev *pdata) struct veth_emac_export_mem *veth_emac_mem, struct veth_ipa_dev *pdata) { { /*1. Send stop offload to the BE *2. Receive from BE *4. Free the memory *5. Close the HAB socket ? */ if (veth_emac_mem->rx_buf_mem_va) { if (veth_emac_mem->rx_buf_mem_va) { VETH_IPA_DEBUG("%s: Freeing RX buf mem", __func__); VETH_IPA_DEBUG("%s: Freeing RX buf mem", __func__); dma_free_coherent(&pdata->pdev->dev, dma_free_coherent(&pdata->pdev->dev, Loading Loading @@ -279,21 +263,21 @@ int veth_alloc_emac_dealloc_mem( VETH_IPA_ERROR("%s: TX desc mem not available", __func__); VETH_IPA_ERROR("%s: TX desc mem not available", __func__); } } if (veth_emac_mem->rx_buff_pool_base) { if (veth_emac_mem->rx_buff_pool_base_va) { VETH_IPA_DEBUG("%s: Freeing RX buff pool mem", __func__); VETH_IPA_DEBUG("%s: Freeing RX buff pool mem", __func__); dma_free_coherent(&pdata->pdev->dev, dma_free_coherent(&pdata->pdev->dev, sizeof(uint32_t) * VETH_RX_DESC_CNT, sizeof(uint32_t) * VETH_RX_DESC_CNT, veth_emac_mem->rx_buff_pool_base, veth_emac_mem->rx_buff_pool_base_va, veth_emac_mem->rx_buff_pool_base_pa); veth_emac_mem->rx_buff_pool_base_pa); } else { } else { VETH_IPA_ERROR("%s: RX buff pool base not available", __func__); VETH_IPA_ERROR("%s: RX buff pool base not available", __func__); } } if (veth_emac_mem->tx_buff_pool_base) { if (veth_emac_mem->tx_buff_pool_base_va) { VETH_IPA_DEBUG("%s: Freeing TX buff pool mem", __func__); VETH_IPA_DEBUG("%s: Freeing TX buff pool mem", __func__); dma_free_coherent(&pdata->pdev->dev, dma_free_coherent(&pdata->pdev->dev, sizeof(uint32_t) * VETH_TX_DESC_CNT, sizeof(uint32_t) * VETH_TX_DESC_CNT, veth_emac_mem->tx_buff_pool_base, veth_emac_mem->tx_buff_pool_base_va, veth_emac_mem->tx_buff_pool_base_pa); veth_emac_mem->tx_buff_pool_base_pa); } else { } else { VETH_IPA_ERROR("%s: TX buff pool base not available", __func__); VETH_IPA_ERROR("%s: TX buff pool base not available", __func__); Loading @@ -316,7 +300,7 @@ int veth_emac_ipa_hab_init(int mmid) int ret = 0; int ret = 0; int vc_id = 0; int vc_id = 0; char *pdata_send; char *pdata_send; char *pdata_recv; uint32_t *pdata_recv; uint32_t veth_hab_pdata_size = 32; uint32_t veth_hab_pdata_size = 32; VETH_IPA_INFO("%s: Enter HAB init\n", __func__); VETH_IPA_INFO("%s: Enter HAB init\n", __func__); Loading Loading @@ -358,7 +342,7 @@ int veth_emac_ipa_hab_init(int mmid) /*Receive ACK*/ /*Receive ACK*/ memset(pdata_recv, 1, veth_hab_pdata_size); memset(pdata_recv, 1, veth_hab_pdata_size); VETH_IPA_INFO("%s: Receiving ACK\n", __func__); VETH_IPA_INFO("%s: Receiving ACK\n", __func__); ret = habmm_socket_recv(vc_id, pdata_recv, &veth_hab_pdata_size, 0, 0); ret = habmm_socket_recv(vc_id, &pdata_recv, &veth_hab_pdata_size, 0, 0); if (ret) { if (ret) { VETH_IPA_ERROR("%s: receive failed! ret %d, recv size %d\n", VETH_IPA_ERROR("%s: receive failed! ret %d, recv size %d\n", Loading Loading @@ -543,6 +527,99 @@ static int veth_emac_ipa_hab_export_rx_buf( return ret; return ret; } } /** emac_ipa_hab_export_tx_buf_pool() - This API is called * for exporting the TX buf pool memory to BE driver in QNX host * @vcid: The virtual channel ID between BE and FE driver * * @veth_emac_mem - Contains the virtual and physical addresses * of the exported memory */ int emac_ipa_hab_export_tx_buf_pool( int vc_id, struct veth_emac_export_mem *veth_emac_mem, struct veth_ipa_dev *pdata) { int ret = 0; VETH_IPA_DEBUG("%s: Export TX buf pool memory TO VC_ID %d\n", __func__, vc_id); ret = habmm_export( vc_id, veth_emac_mem->tx_buff_pool_base_va, sizeof(uint32_t) * VETH_TX_DESC_CNT * 4, &veth_emac_mem->exp_id.tx_buf_pool_exp_id, 0); if (ret) { VETH_IPA_ERROR("%s: Export failed %d returned, export id %d\n", __func__, ret, veth_emac_mem->exp_id.tx_buf_pool_exp_id); ret = -1; goto err; } pr_info("%s: Export TX buf pool memory location %p %d\n", __func__, veth_emac_mem->tx_buff_pool_base_va, veth_emac_mem->exp_id.tx_buf_pool_exp_id); return ret; err: veth_alloc_emac_dealloc_mem(veth_emac_mem, pdata); return ret; } /** emac_ipa_hab_export_tx_buf_pool() - This API is called * for exporting the TX buf pool memory to BE driver in QNX host * @vcid: The virtual channel ID between BE and FE driver * * @veth_emac_mem - Contains the virtual and physical addresses * of the exported memory */ int emac_ipa_hab_export_rx_buf_pool( int vc_id, struct veth_emac_export_mem *veth_emac_mem, struct veth_ipa_dev *pdata) { int ret = 0; VETH_IPA_DEBUG("%s: Export RX buf pool memory TO VC_ID %d\n", __func__, vc_id); ret = habmm_export( vc_id, veth_emac_mem->rx_buff_pool_base_va, sizeof(uint32_t) * (VETH_RX_DESC_CNT * 4), &veth_emac_mem->exp_id.rx_buf_pool_exp_id, 0); if (ret) { VETH_IPA_ERROR("%s: Export failed %d returned, export id %d\n", __func__, ret, veth_emac_mem->exp_id.rx_buf_pool_exp_id); ret = -1 ; goto err; } pr_info("%s: Export RX buf pool memory location %p , %d\n", __func__, veth_emac_mem->rx_buff_pool_base_va, veth_emac_mem->exp_id.rx_buf_pool_exp_id); return ret; err: veth_alloc_emac_dealloc_mem(veth_emac_mem, pdata); return ret; } /** veth_emac_ipa_send_exp_id() - This API is used to send the /** veth_emac_ipa_send_exp_id() - This API is used to send the * export IDs of all the exported memory to the BE driver in * export IDs of all the exported memory to the BE driver in * QNX host * QNX host Loading @@ -551,7 +628,7 @@ static int veth_emac_ipa_hab_export_rx_buf( * @veth_emac_mem - Contains the virtual and physical addresses * @veth_emac_mem - Contains the virtual and physical addresses * of the exported memory * of the exported memory */ */ static int veth_emac_ipa_send_exp_id( int veth_emac_ipa_send_exp_id( int vc_id, struct veth_emac_export_mem *veth_emac_mem) int vc_id, struct veth_emac_export_mem *veth_emac_mem) { { int ret = 0; int ret = 0; Loading @@ -560,10 +637,13 @@ static int veth_emac_ipa_send_exp_id( &veth_emac_mem->exp_id, &veth_emac_mem->exp_id, sizeof(veth_emac_mem->exp_id), sizeof(veth_emac_mem->exp_id), NO_FLAGS); NO_FLAGS); VETH_IPA_INFO("Sent export ids to the backend driver"); VETH_IPA_INFO("TX Descriptor export id sent %x", veth_emac_mem->exp_id.tx_desc_exp_id); if (ret) { if (ret) { VETH_IPA_ERROR("%s: Send failed failed %d returned\n", VETH_IPA_ERROR("%s: Send failed failed %d returned\n", __func__, ret); __func__, ret); ret = -1; ret = -1; return ret; return ret; } } Loading @@ -572,7 +652,7 @@ static int veth_emac_ipa_send_exp_id( } } int veth_emac_init(struct veth_emac_export_mem *veth_emac_mem, int veth_emac_init(struct veth_emac_export_mem *veth_emac_mem, struct veth_ipa_dev *pdata) struct veth_ipa_dev *pdata, bool smmu_s2_enb) { { int ret = 0; int ret = 0; Loading Loading @@ -616,6 +696,26 @@ int veth_emac_init(struct veth_emac_export_mem *veth_emac_mem, return -ENOMEM; return -ENOMEM; } } ret = emac_ipa_hab_export_tx_buf_pool(veth_emac_mem->vc_id, veth_emac_mem, pdata); if (ret < 0) { VETH_IPA_ERROR( "HAB export of TX buff pool mem failed, returning error"); return -ENOMEM; } ret = emac_ipa_hab_export_rx_buf_pool(veth_emac_mem->vc_id, veth_emac_mem, pdata); if (ret < 0) { VETH_IPA_ERROR( "HAB export of RX buff pool mem failed, returning error"); return -ENOMEM; } ret = veth_emac_ipa_send_exp_id(veth_emac_mem->vc_id, ret = veth_emac_ipa_send_exp_id(veth_emac_mem->vc_id, veth_emac_mem); veth_emac_mem); Loading
drivers/platform/msm/veth_ipa/veth_emac_mgt.h +12 −1 Original line number Original line Diff line number Diff line Loading @@ -20,7 +20,8 @@ int veth_emac_init(struct veth_emac_export_mem *veth_emac_mem, int veth_emac_init(struct veth_emac_export_mem *veth_emac_mem, struct veth_ipa_dev *pdata); struct veth_ipa_dev *pdata, bool smmu_s2_enb); int veth_alloc_emac_export_mem(struct veth_emac_export_mem *veth_emac_mem, int veth_alloc_emac_export_mem(struct veth_emac_export_mem *veth_emac_mem, struct veth_ipa_dev *pdata); struct veth_ipa_dev *pdata); Loading @@ -40,6 +41,16 @@ int veth_emac_open_notify(struct veth_emac_export_mem *veth_emac_mem, struct veth_ipa_dev *pdata); struct veth_ipa_dev *pdata); int veth_emac_ipa_setup_complete(struct veth_emac_export_mem *veth_emac_mem, int veth_emac_ipa_setup_complete(struct veth_emac_export_mem *veth_emac_mem, struct veth_ipa_dev *pdata); struct veth_ipa_dev *pdata); int emac_ipa_hab_export_tx_buf_pool(int vc_id, struct veth_emac_export_mem *veth_emac_mem, struct veth_ipa_dev *pdata); int emac_ipa_hab_export_rx_buf_pool(int vc_id, struct veth_emac_export_mem *veth_emac_mem, struct veth_ipa_dev *pdata); int veth_emac_ipa_send_exp_id(int vc_id, struct veth_emac_export_mem *veth_emac_mem); #endif /* _VETH_EMAC_MGT_H_ */ #endif /* _VETH_EMAC_MGT_H_ */
drivers/platform/msm/veth_ipa/veth_ipa.c +223 −113 Original line number Original line Diff line number Diff line Loading @@ -330,13 +330,15 @@ int veth_set_ul_dl_smmu_ipa_params(struct veth_ipa_dev *pdata, return -EINVAL; return -EINVAL; } } /*Configure SGT for UL ring base*/ ul->ring_base_sgt = kzalloc(sizeof(ul->ring_base_sgt), GFP_KERNEL); ul->ring_base_sgt = kzalloc(sizeof(ul->ring_base_sgt), GFP_KERNEL); if (!ul->ring_base_sgt) if (!ul->ring_base_sgt) return -ENOMEM; return -ENOMEM; ret = dma_get_sgtable(&pdata->pdev->dev, ul->ring_base_sgt, ret = dma_get_sgtable(&pdata->pdev->dev, ul->ring_base_sgt, veth_emac_mem->rx_desc_mem_va, veth_emac_mem->rx_desc_mem_va, ul->ring_base_iova, veth_emac_mem->rx_desc_mem_paddr, (sizeof(struct s_RX_NORMAL_DESC) * (sizeof(struct s_RX_NORMAL_DESC) * VETH_RX_DESC_CNT)); VETH_RX_DESC_CNT)); if (ret) { if (ret) { Loading @@ -346,8 +348,16 @@ int veth_set_ul_dl_smmu_ipa_params(struct veth_ipa_dev *pdata, return -EAGAIN; return -EAGAIN; } } /*get pa*/ ul->ring_base_pa = sg_phys(ul->ring_base_sgt->sgl); ul->ring_base_pa = sg_phys(ul->ring_base_sgt->sgl); VETH_IPA_INFO( "%s:\n ul->ring_base_sgt = 0x%px , ul->ring_base_pa =0x%lx\n", __func__, ul->ring_base_sgt, ul->ring_base_pa); /*configure SGT for UL buff pool base*/ ul->buff_pool_base_sgt = kzalloc( ul->buff_pool_base_sgt = kzalloc( sizeof(ul->buff_pool_base_sgt), GFP_KERNEL); sizeof(ul->buff_pool_base_sgt), GFP_KERNEL); Loading @@ -356,11 +366,14 @@ int veth_set_ul_dl_smmu_ipa_params(struct veth_ipa_dev *pdata, return -ENOMEM; return -ENOMEM; } } ret = dma_get_sgtable(&pdata->pdev->dev, ul->buff_pool_base_sgt, ret = dma_get_sgtable(ipa_get_dma_dev(), veth_emac_mem->rx_buf_mem_va, ul->buff_pool_base_sgt, ul->buff_pool_base_iova, veth_emac_mem->rx_buff_pool_base_va, (sizeof(struct s_RX_NORMAL_DESC) * veth_emac_mem->rx_buff_pool_base_iova, VETH_RX_DESC_CNT)); (sizeof(uint32_t) * VETH_RX_DESC_CNT * 4) ); /*using ipa dev node for buff pool*/ /*overallocating to satisfy hab page alignment*/ if (ret) { if (ret) { VETH_IPA_ERROR("Failed to get IPA UL buff pool sgtable.\n"); VETH_IPA_ERROR("Failed to get IPA UL buff pool sgtable.\n"); kfree(ul->ring_base_sgt); kfree(ul->ring_base_sgt); Loading @@ -370,12 +383,21 @@ int veth_set_ul_dl_smmu_ipa_params(struct veth_ipa_dev *pdata, } } ul->buff_pool_base_pa = sg_phys(ul->buff_pool_base_sgt->sgl); ul->buff_pool_base_pa = sg_phys(ul->buff_pool_base_sgt->sgl); veth_emac_mem->rx_buff_pool_base_pa = ul->buff_pool_base_pa; VETH_IPA_INFO( "%s:\n ul->buff_pool_base_sgt = 0x%px,ul->buff_pool_base_pa =0x%lx\n", __func__, ul->buff_pool_base_sgt, ul->buff_pool_base_pa); /*Configure SGT for DL ring base*/ dl->ring_base_sgt = kzalloc(sizeof(dl->ring_base_sgt), GFP_KERNEL); dl->ring_base_sgt = kzalloc(sizeof(dl->ring_base_sgt), GFP_KERNEL); if (!dl->ring_base_sgt) if (!dl->ring_base_sgt) return -ENOMEM; return -ENOMEM; ret = dma_get_sgtable(&pdata->pdev->dev, dl->ring_base_sgt, ret = dma_get_sgtable(&pdata->pdev->dev, dl->ring_base_sgt, veth_emac_mem->tx_desc_mem_va, veth_emac_mem->tx_desc_mem_va, veth_emac_mem->tx_desc_mem_paddr, veth_emac_mem->tx_desc_mem_paddr, (sizeof(struct s_TX_NORMAL_DESC) * (sizeof(struct s_TX_NORMAL_DESC) * Loading @@ -390,17 +412,24 @@ int veth_set_ul_dl_smmu_ipa_params(struct veth_ipa_dev *pdata, } } dl->ring_base_pa = sg_phys(dl->ring_base_sgt->sgl); dl->ring_base_pa = sg_phys(dl->ring_base_sgt->sgl); VETH_IPA_INFO( "%s:\n dl->ring_base_sgt = 0x%px , dl->ring_base_pa =0x%lx\n", __func__, dl->ring_base_sgt, dl->ring_base_pa); /*configure SGT for DL buff pool base*/ dl->buff_pool_base_sgt = kzalloc( dl->buff_pool_base_sgt = kzalloc( sizeof(dl->buff_pool_base_sgt), GFP_KERNEL); sizeof(dl->buff_pool_base_sgt), GFP_KERNEL); if (!dl->buff_pool_base_sgt) if (!dl->buff_pool_base_sgt) return -ENOMEM; return -ENOMEM; ret = dma_get_sgtable(&pdata->pdev->dev, dl->buff_pool_base_sgt, ret = dma_get_sgtable(ipa_get_dma_dev(), veth_emac_mem->tx_buf_mem_va, dl->buff_pool_base_sgt, veth_emac_mem->tx_buf_mem_paddr, veth_emac_mem->tx_buff_pool_base_va, (sizeof(struct s_TX_NORMAL_DESC) * veth_emac_mem->tx_buff_pool_base_iova, VETH_TX_DESC_CNT)); (sizeof(uint32_t) * VETH_TX_DESC_CNT * 4) ); if (ret) { if (ret) { VETH_IPA_ERROR("Failed to get IPA DL buff pool sgtable.\n"); VETH_IPA_ERROR("Failed to get IPA DL buff pool sgtable.\n"); kfree(ul->ring_base_sgt); kfree(ul->ring_base_sgt); Loading @@ -412,6 +441,13 @@ int veth_set_ul_dl_smmu_ipa_params(struct veth_ipa_dev *pdata, } } dl->buff_pool_base_pa = sg_phys(dl->buff_pool_base_sgt->sgl); dl->buff_pool_base_pa = sg_phys(dl->buff_pool_base_sgt->sgl); veth_emac_mem->tx_buff_pool_base_pa = dl->buff_pool_base_pa; VETH_IPA_INFO( "%s:dl->buff_pool_base_sgt = 0x%px , dl->buff_pool_base_pa =0x%lx", __func__, dl->buff_pool_base_sgt, dl->buff_pool_base_pa); return ret; return ret; } } Loading @@ -433,24 +469,45 @@ static int veth_map_rx_tx_setup_info_params( else else rx_setup_info->smmu_enabled = false; rx_setup_info->smmu_enabled = false; rx_setup_info->smmu_enabled = false; /* RX Descriptor Base Physical Address*/ /* RX Descriptor Base Physical Address*/ if (!rx_setup_info->smmu_enabled) if (!rx_setup_info->smmu_enabled) rx_setup_info->ring_base_pa = veth_emac_mem->rx_desc_mem_paddr; rx_setup_info->ring_base_pa = veth_emac_mem->rx_desc_mem_paddr; rx_setup_info->ring_base_pa = veth_emac_mem->rx_desc_mem_paddr; /* RX Descriptor Base Virtual Address*/ /* RX Descriptor Base Virtual Address*/ rx_setup_info->ring_base_iova = veth_emac_mem->rx_desc_mem_paddr; if (rx_setup_info->smmu_enabled) rx_setup_info->ring_base_iova = veth_emac_mem->rx_desc_mem_iova; /* RX Descriptor Count*/ /* RX Descriptor Count*/ rx_setup_info->ntn_ring_size = VETH_RX_DESC_CNT; rx_setup_info->ntn_ring_size = VETH_RX_DESC_CNT; /* RX Buf pool base*/ if (!rx_setup_info->smmu_enabled) { rx_setup_info->buff_pool_base_pa = rx_setup_info->buff_pool_base_pa = veth_emac_mem->rx_buff_pool_base_pa; veth_emac_mem->rx_buff_pool_base_pa; } /*this may cause issues after smmu?*/ rx_setup_info->buff_pool_base_iova = rx_setup_info->buff_pool_base_iova = veth_emac_mem->rx_buff_pool_base_pa; veth_emac_mem->rx_buff_pool_base_iova; /* Assign IPA to pa*/ /*Map TX Buff Pool*/ if (emac_emb_smmu_ctx.valid) { /*store rx buf mem iova into buff pool addresses*/ veth_emac_mem->rx_buff_pool_base_va[0] = veth_emac_mem->rx_buf_mem_iova; } else { /*store rx buf mem p addr into buff pool addresse*/ veth_emac_mem->rx_buff_pool_base_va[0] = veth_emac_mem->rx_buf_mem_paddr; } for (i = 0; i < VETH_RX_DESC_CNT; i++) { veth_emac_mem->rx_buff_pool_base_va[i] = veth_emac_mem->rx_buff_pool_base_va[0] + i*VETH_ETH_FRAME_LEN_IPA; VETH_IPA_DEBUG( "%s: veth_emac_mem->rx_buff_pool_base[%d] 0x%x\n", __func__, i, veth_emac_mem->rx_buff_pool_base_va[i]); } /*RX buffer Count*/ /*RX buffer Count*/ rx_setup_info->num_buffers = VETH_RX_DESC_CNT - 1; rx_setup_info->num_buffers = VETH_RX_DESC_CNT - 1; Loading @@ -464,20 +521,26 @@ static int veth_map_rx_tx_setup_info_params( else else tx_setup_info->smmu_enabled = false; tx_setup_info->smmu_enabled = false; tx_setup_info->smmu_enabled = false; if (!tx_setup_info->smmu_enabled) tx_setup_info->ring_base_pa = tx_setup_info->ring_base_pa = veth_emac_mem->tx_desc_mem_paddr; veth_emac_mem->tx_desc_mem_paddr; /* TX Descriptor Base Virtual Address*/ /* TX Descriptor Base Virtual Address*/ tx_setup_info->ring_base_iova = veth_emac_mem->tx_desc_mem_paddr; if (tx_setup_info->smmu_enabled) tx_setup_info->ring_base_iova = veth_emac_mem->tx_desc_mem_iova; /* TX Descriptor Count*/ /* TX Descriptor Count*/ tx_setup_info->ntn_ring_size = VETH_TX_DESC_CNT; tx_setup_info->ntn_ring_size = VETH_TX_DESC_CNT; tx_setup_info->buff_pool_base_pa = veth_emac_mem->tx_buff_pool_base_pa; /* Tx Buf pool base*/ if (!tx_setup_info->smmu_enabled) { tx_setup_info->buff_pool_base_pa = veth_emac_mem->tx_buff_pool_base_pa; } tx_setup_info->buff_pool_base_iova = tx_setup_info->buff_pool_base_iova = veth_emac_mem->tx_buff_pool_base_pa; veth_emac_mem->tx_buff_pool_base_iova; /* TX buffer Count*/ /* TX buffer Count*/ tx_setup_info->num_buffers = VETH_TX_DESC_CNT-1; tx_setup_info->num_buffers = VETH_TX_DESC_CNT-1; Loading @@ -485,76 +548,93 @@ static int veth_map_rx_tx_setup_info_params( /* TX Frame length */ /* TX Frame length */ tx_setup_info->data_buff_size = VETH_ETH_FRAME_LEN_IPA; tx_setup_info->data_buff_size = VETH_ETH_FRAME_LEN_IPA; /* Allocate RX Buff List*/ /*Map TX Buff Pool*/ //Todo: Free this correctly if (emac_emb_smmu_ctx.valid) { rx_setup_info->data_buff_list = kcalloc(rx_setup_info->num_buffers, /*store tx buf iova addr in buff pool addresses*/ sizeof(struct ntn_buff_smmu_map), GFP_KERNEL); /*store tx buf p addr in buff pool addresses*/ if (rx_setup_info->data_buff_list == NULL) { veth_emac_mem->tx_buff_pool_base_va[0] = ret = -ENOMEM; veth_emac_mem->tx_buf_mem_iova; return ret; } else { veth_emac_mem->tx_buff_pool_base_va[0] = veth_emac_mem->tx_buf_mem_paddr; } for (i = 0; i < VETH_TX_DESC_CNT; i++) { veth_emac_mem->tx_buff_pool_base_va[i] = veth_emac_mem->tx_buff_pool_base_va[0] + i*VETH_ETH_FRAME_LEN_IPA; VETH_IPA_DEBUG( "%s: veth_emac_mem->tx_buff_pool_base[%d] 0x%x\n", __func__, i, veth_emac_mem->tx_buff_pool_base_va[i]); } } /* Allocate TX Buff List*/ //Todo: Free this correctly /* Allocate and Populate RX Buff List*/ tx_setup_info->data_buff_list = kcalloc(tx_setup_info->num_buffers, rx_setup_info->data_buff_list = kcalloc(rx_setup_info->num_buffers, sizeof(struct ntn_buff_smmu_map), GFP_KERNEL); sizeof(struct ntn_buff_smmu_map), GFP_KERNEL); if (tx_setup_info->data_buff_list == NULL) { if (rx_setup_info->data_buff_list == NULL) { ret = -ENOMEM; ret = -ENOMEM; return ret; return ret; } } /*Populate RX Buff list. */ rx_setup_info->data_buff_list[0].iova = veth_emac_mem->rx_buff_pool_base[0]; if (!rx_setup_info->smmu_enabled) { if (!rx_setup_info->smmu_enabled) { /* this case we use p addr in rx_buff_pool_base[0]*/ rx_setup_info->data_buff_list[0].pa = rx_setup_info->data_buff_list[0].pa = rx_setup_info->data_buff_list[0].iova; veth_emac_mem->rx_buf_mem_paddr; //VETH_IPA_DEBUG //("rx_setup_info->data_buff_list[0].pa = 0x%lx", // rx_setup_info->data_buff_list[0].pa); } else { } else { rx_setup_info->data_buff_list[0].pa = rx_setup_info->data_buff_list[0].pa = veth_emac_mem->rx_buf_mem_paddr; veth_emac_mem->rx_buf_mem_paddr; //VETH_IPA_DEBUG //("rx_setup_info->data_buff_list[0].pa = 0x%lx", rx_setup_info->data_buff_list[0].iova = // rx_setup_info->data_buff_list[0].pa); veth_emac_mem->rx_buf_mem_iova; } } for (i = 0; i <= rx_setup_info->num_buffers; i++) { for (i = 0; i <= rx_setup_info->num_buffers; i++) { rx_setup_info->data_buff_list[i].iova = rx_setup_info->data_buff_list[i].iova = veth_emac_mem->rx_buff_pool_base[i]; rx_setup_info->data_buff_list[0].iova + i*VETH_ETH_FRAME_LEN_IPA; rx_setup_info->data_buff_list[i].pa = rx_setup_info->data_buff_list[i].pa = veth_emac_mem->rx_buff_pool_base[i]; rx_setup_info->data_buff_list[0].pa + //VETH_IPA_DEBUG i*VETH_ETH_FRAME_LEN_IPA; //("rx_setup_info->data_buff_list[%d].pa = 0x%lx", // veth_emac_mem->rx_buf_mem_paddr[i]; // i, rx_setup_info->data_buff_list[i].pa); VETH_IPA_INFO("rx_setup_info->data_buff_list[%d].iova = 0x%lx", i, rx_setup_info->data_buff_list[i].iova); VETH_IPA_INFO("rx_setup_info->data_buff_list[%d].pa = 0x%lx", i, rx_setup_info->data_buff_list[i].pa); } } /*Populate TX Buff list. */ tx_setup_info->data_buff_list[0].iova = /* Allocate and Populate TX Buff List*/ veth_emac_mem->tx_buff_pool_base[0]; tx_setup_info->data_buff_list = kcalloc(tx_setup_info->num_buffers, sizeof(struct ntn_buff_smmu_map), GFP_KERNEL); if (tx_setup_info->data_buff_list == NULL) { ret = -ENOMEM; return ret; } if (!tx_setup_info->smmu_enabled) { if (!tx_setup_info->smmu_enabled) { /* this case we use p addr in rx_buff_pool_base[0]*/ tx_setup_info->data_buff_list[0].pa = tx_setup_info->data_buff_list[0].pa = tx_setup_info->data_buff_list[0].iova; veth_emac_mem->tx_buf_mem_paddr; //VETH_IPA_DEBUG //("tx_setup_info->data_buff_list[0].pa = 0x%lx", // tx_setup_info->data_buff_list[0].pa); } else { } else { tx_setup_info->data_buff_list[0].pa = tx_setup_info->data_buff_list[0].pa = veth_emac_mem->tx_buf_mem_paddr; veth_emac_mem->tx_buf_mem_paddr; //VETH_IPA_INFO //("tx_setup_info->data_buff_list[0].pa = 0x%lx", //tx_setup_info->data_buff_list[0].pa); } tx_setup_info->data_buff_list[0].iova = veth_emac_mem->tx_buf_mem_iova; } for (i = 0; i <= tx_setup_info->num_buffers; i++) { for (i = 0; i <= tx_setup_info->num_buffers; i++) { tx_setup_info->data_buff_list[i].iova = tx_setup_info->data_buff_list[i].iova = veth_emac_mem->tx_buff_pool_base[i]; tx_setup_info->data_buff_list[0].iova + i*VETH_ETH_FRAME_LEN_IPA; tx_setup_info->data_buff_list[i].pa = tx_setup_info->data_buff_list[i].pa = veth_emac_mem->tx_buff_pool_base[i]; tx_setup_info->data_buff_list[0].pa + //VETH_IPA_DEBUG( i*VETH_ETH_FRAME_LEN_IPA; //"tx_setup_info->data_buff_list[%d].pa = 0x%lx", VETH_IPA_INFO("tx_setup_info->data_buff_list[%d].iova = 0x%lx", // i,tx_setup_info->data_buff_list[i].pa); i, tx_setup_info->data_buff_list[i].iova); VETH_IPA_INFO("tx_setup_info->data_buff_list[%d].pa = 0x%lx", i, tx_setup_info->data_buff_list[i].pa); } } return ret; return ret; Loading @@ -572,10 +652,10 @@ int veth_ipa_offload_connect(struct veth_ipa_dev *pdata) int ret = 0; int ret = 0; /* Hard code SMMU Disable for PHASE 1*/ /* Hard code SMMU Enable for PHASE 1*/ emac_emb_smmu_ctx.valid = false; emac_emb_smmu_ctx.valid = true; VETH_IPA_DEBUG("%s - begin smmu_s2_enb=%d\n", __func__, VETH_IPA_DEBUG("%s - begin\n", __func__); emac_emb_smmu_ctx.valid); if (!pdata) { if (!pdata) { VETH_IPA_ERROR("Null Param %s\n", __func__); VETH_IPA_ERROR("Null Param %s\n", __func__); Loading Loading @@ -947,7 +1027,9 @@ static void veth_ipa_offload_event_handler( VETH_IPA_DEBUG("%s - veth_emac_init\n", VETH_IPA_DEBUG("%s - veth_emac_init\n", __func__); __func__); ret = veth_emac_init(&(pdata->veth_emac_mem), pdata); ret = veth_emac_init(&(pdata->veth_emac_mem), pdata, emac_emb_smmu_ctx.valid); if (ret) { if (ret) { pr_err("%s: veth_alloc_emac_export_mem failed error %d", pr_err("%s: veth_alloc_emac_export_mem failed error %d", __func__, __func__, Loading Loading @@ -1361,32 +1443,59 @@ static int veth_ipa_uc_ready(struct veth_ipa_dev *pdata) */ */ static int veth_ipa_emac_evt_mgmt(void *arg) static int veth_ipa_emac_evt_mgmt(void *arg) { { /*Wait on HAV receive here*/ int ret = 0; int ret = 0; int timeout_ms = 100; int timeout_ms = 100; int pdata_recv = 0; struct emac_hab_mm_message pdata_recv; int pdate_size = sizeof(pdata_recv); //veth_emac_import_iova msg; int pdata_size = sizeof(pdata_recv); struct veth_ipa_dev *pdata = (struct veth_ipa_dev *)arg; struct veth_ipa_dev *pdata = (struct veth_ipa_dev *)arg; //memset(&msg, 0, sizeof(struct veth_emac_import_iova) ); VETH_IPA_INFO("%s: vc_id %d\n", __func__, pdata->veth_emac_mem.vc_id); VETH_IPA_INFO("%s: vc_id %d\n", __func__, pdata->veth_emac_mem.vc_id); while (1) { while (1) { ret = habmm_socket_recv(pdata->veth_emac_mem.vc_id, ret = habmm_socket_recv(pdata->veth_emac_mem.vc_id, &pdata_recv, &pdata_recv, &pdate_size, &pdata_size, timeout_ms, timeout_ms, HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING); 0x0); VETH_IPA_INFO("EVENT ID Received: %x", pdata_recv.event_id); if (!ret) { if (!ret) { VETH_IPA_INFO("%s: pdata_recv %d\n", __func__, VETH_IPA_INFO("%s: msg->event_id %d\n", __func__, pdata_recv); pdata_recv); switch (pdata_recv.event_id) { switch (pdata_recv) { case EV_IPA_EMAC_INIT: case EV_IPA_EMAC_INIT: /* To avoid spurious events, possibly not required once state * machine is available */ if (!pdata->prv_ipa.emac_init) { if (!pdata->prv_ipa.emac_init) { VETH_IPA_INFO("EMAC_INIT\n"); VETH_IPA_INFO("EMAC_INIT event received\n"); pr_info("%s: emac_init set to true ", __func__); veth_ipa_emac_init_done_cb(pdata); veth_ipa_emac_init_done_cb(pdata); pdata->prv_ipa.emac_init = true; pdata->prv_ipa.emac_init = true; } } break; break; case EV_IPA_EMAC_SETUP: case EV_IPA_EMAC_SETUP: /*use memcpy_s later instead*/ pdata->veth_emac_mem.tx_desc_mem_iova = (dma_addr_t) pdata_recv.msg_type.iova.tx_desc_mem_iova; pdata->veth_emac_mem.rx_desc_mem_iova = (dma_addr_t) pdata_recv.msg_type.iova.rx_desc_mem_iova; pdata->veth_emac_mem.tx_buf_mem_iova = (dma_addr_t) pdata_recv.msg_type.iova.tx_buf_mem_iova; pdata->veth_emac_mem.rx_buf_mem_iova = (dma_addr_t) pdata_recv.msg_type.iova.rx_buf_mem_iova; pdata->veth_emac_mem.tx_buff_pool_base_iova = (dma_addr_t) pdata_recv.msg_type.iova.tx_buf_pool_base_iova; pdata->veth_emac_mem.rx_buff_pool_base_iova = (dma_addr_t) pdata_recv.msg_type.iova.rx_buf_pool_base_iova; VETH_IPA_INFO("EMAC_SETUP event received\n"); VETH_IPA_INFO("EMAC_SETUP event received\n"); VETH_IPA_INFO("union received: %x", pdata->veth_emac_mem.tx_buff_pool_base_iova); veth_ipa_emac_setup_done_cb(pdata); veth_ipa_emac_setup_done_cb(pdata); break; break; case EV_PHY_LINK_UP: case EV_PHY_LINK_UP: Loading @@ -1408,6 +1517,7 @@ static int veth_ipa_emac_evt_mgmt(void *arg) } } } } } } //kfree(msg); return 0; return 0; } } /** /** Loading Loading @@ -1817,7 +1927,7 @@ static int veth_ipa_stop(struct net_device *net) VETH_IPA_DEBUG("network device stopped\n"); VETH_IPA_DEBUG("network device stopped\n"); if (pdata->prv_ipa.ipa_uc_ready) { if (pdata->prv_ipa.ipa_uc_ready) { pr_info("%s: veth_ipa_stop veth_disable_ipa_offload", pr_info("%s: veth_disable_ipa_offload", __func__); __func__); veth_disable_ipa_offload(pdata); veth_disable_ipa_offload(pdata); ipa_uc_offload_dereg_rdyCB(IPA_UC_NTN); ipa_uc_offload_dereg_rdyCB(IPA_UC_NTN); Loading @@ -1835,7 +1945,7 @@ static int veth_ipa_stop(struct net_device *net) //HAB call for BE driver in the mutex lock causes a deadlock //HAB call for BE driver in the mutex lock causes a deadlock ret = veth_emac_stop_offload(&(pdata->veth_emac_mem), pdata); ret = veth_emac_stop_offload(&(pdata->veth_emac_mem), pdata); if (ret < 0) { if (ret < 0) { pr_err("%s: veth_emac_stop_offload failed", __func__); pr_err("%s: failed", __func__); return ret; return ret; } } Loading
drivers/platform/msm/veth_ipa/veth_ipa.h +32 −10 Original line number Original line Diff line number Diff line Loading @@ -119,7 +119,7 @@ static void *ipa_veth_logbuf; #define VETH_TX_DESC_CNT 256 /*la uses 128*/ #define VETH_TX_DESC_CNT 256 /*la uses 128*/ /*IPA can support 2KB max pkt length*/ /*IPA can support 2KB max pkt length*/ #define VETH_ETH_FRAME_LEN_IPA (1<<11) #define VETH_ETH_FRAME_LEN_IPA (1<<12) #define VETH_IPA_LOCK() mutex_lock(&pdata->prv_ipa.ipa_lock) #define VETH_IPA_LOCK() mutex_lock(&pdata->prv_ipa.ipa_lock) #define VETH_IPA_UNLOCK() mutex_unlock(&pdata->prv_ipa.ipa_lock) #define VETH_IPA_UNLOCK() mutex_unlock(&pdata->prv_ipa.ipa_lock) Loading Loading @@ -158,6 +158,8 @@ struct veth_emac_exp { uint32_t rx_desc_exp_id; uint32_t rx_desc_exp_id; uint32_t tx_buff_exp_id; uint32_t tx_buff_exp_id; uint32_t rx_buff_exp_id; uint32_t rx_buff_exp_id; uint32_t rx_buf_pool_exp_id; uint32_t tx_buf_pool_exp_id; int event_id; int event_id; }; }; Loading @@ -172,7 +174,7 @@ struct veth_emac_export_mem { dma_addr_t tx_buf_mem_paddr; dma_addr_t tx_buf_mem_paddr; dma_addr_t tx_buf_mem_iova; dma_addr_t tx_buf_mem_iova; uint32_t *tx_buff_pool_base; uint32_t *tx_buff_pool_base_va; dma_addr_t tx_buff_pool_base_iova; dma_addr_t tx_buff_pool_base_iova; dma_addr_t tx_buff_pool_base_pa; dma_addr_t tx_buff_pool_base_pa; Loading @@ -186,7 +188,7 @@ struct veth_emac_export_mem { dma_addr_t rx_buf_mem_paddr; dma_addr_t rx_buf_mem_paddr; dma_addr_t rx_buf_mem_iova; dma_addr_t rx_buf_mem_iova; uint32_t *rx_buff_pool_base; uint32_t *rx_buff_pool_base_va; dma_addr_t rx_buff_pool_base_iova; dma_addr_t rx_buff_pool_base_iova; dma_addr_t rx_buff_pool_base_pa; dma_addr_t rx_buff_pool_base_pa; Loading Loading @@ -405,6 +407,26 @@ struct emac_emb_smmu_cb_ctx { int ret; int ret; }; }; /* Maintain Order same on FE*/ struct emac_ipa_iovas { /*iova addresses*/ void *tx_desc_mem_iova; void *tx_buf_mem_iova; void *tx_buf_pool_base_iova; void *rx_desc_mem_iova; void *rx_buf_mem_iova; void *rx_buf_pool_base_iova; }; struct emac_hab_mm_message { int event_id; union msg_type { struct emac_ipa_iovas iova; } msg_type; }; #define GET_MEM_PDEV_DEV (emac_emb_smmu_ctx.valid ? \ #define GET_MEM_PDEV_DEV (emac_emb_smmu_ctx.valid ? \ &emac_emb_smmu_ctx.smmu_pdev->dev : ¶ms->pdev->dev) &emac_emb_smmu_ctx.smmu_pdev->dev : ¶ms->pdev->dev) Loading