Loading drivers/dma/Kconfig +1 −1 Original line number Diff line number Diff line Loading @@ -115,7 +115,7 @@ config BCM_SBA_RAID select DMA_ENGINE_RAID select ASYNC_TX_DISABLE_XOR_VAL_DMA select ASYNC_TX_DISABLE_PQ_VAL_DMA default ARCH_BCM_IPROC default m if ARCH_BCM_IPROC help Enable support for Broadcom SBA RAID Engine. The SBA RAID engine is available on most of the Broadcom iProc SoCs. It Loading drivers/dma/bcm-sba-raid.c +37 −80 Original line number Diff line number Diff line /* * Copyright (C) 2017 Broadcom * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* Loading @@ -25,11 +30,8 @@ * * The Broadcom SBA RAID driver does not require any register programming * except submitting request to SBA hardware device via mailbox channels. * This driver implements a DMA device with one DMA channel using a set * of mailbox channels provided by Broadcom SoC specific ring manager * driver. To exploit parallelism (as described above), all DMA request * coming to SBA RAID DMA channel are broken down to smaller requests * and submitted to multiple mailbox channels in round-robin fashion. * This driver implements a DMA device with one DMA channel using a single * mailbox channel provided by Broadcom SoC specific ring manager driver. * For having more SBA DMA channels, we can create more SBA device nodes * in Broadcom SoC specific DTS based on number of hardware rings supported * by Broadcom SoC ring manager. Loading Loading @@ -85,6 +87,7 @@ #define SBA_CMD_GALOIS 0xe #define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192 #define SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL 8 /* Driver helper macros */ #define to_sba_request(tx) \ Loading Loading @@ -142,9 +145,7 @@ struct sba_device { u32 max_cmds_pool_size; /* Maibox client and Mailbox channels */ struct mbox_client client; int mchans_count; atomic_t mchans_current; struct mbox_chan **mchans; struct mbox_chan *mchan; struct device *mbox_dev; /* DMA device and DMA channel */ struct dma_device dma_dev; Loading Loading @@ -200,14 +201,6 @@ static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0) /* ====== General helper routines ===== */ static void sba_peek_mchans(struct sba_device *sba) { int mchan_idx; for (mchan_idx = 0; mchan_idx < sba->mchans_count; mchan_idx++) mbox_client_peek_data(sba->mchans[mchan_idx]); } static struct sba_request *sba_alloc_request(struct sba_device *sba) { bool found = false; Loading @@ -231,7 +224,7 @@ static struct sba_request *sba_alloc_request(struct sba_device *sba) * would have completed which will create more * room for new requests. */ sba_peek_mchans(sba); mbox_client_peek_data(sba->mchan); return NULL; } Loading Loading @@ -369,15 +362,11 @@ static void sba_cleanup_pending_requests(struct sba_device *sba) static int sba_send_mbox_request(struct sba_device *sba, struct sba_request *req) { int mchans_idx, ret = 0; /* Select mailbox channel in round-robin fashion */ mchans_idx = atomic_inc_return(&sba->mchans_current); mchans_idx = mchans_idx % sba->mchans_count; int ret = 0; /* Send message for the request */ req->msg.error = 0; ret = mbox_send_message(sba->mchans[mchans_idx], &req->msg); ret = mbox_send_message(sba->mchan, &req->msg); if (ret < 0) { dev_err(sba->dev, "send message failed with error %d", ret); return ret; Loading @@ -390,7 +379,7 @@ static int sba_send_mbox_request(struct sba_device *sba, } /* Signal txdone for mailbox channel */ mbox_client_txdone(sba->mchans[mchans_idx], ret); mbox_client_txdone(sba->mchan, ret); return ret; } Loading @@ -402,13 +391,8 @@ static void _sba_process_pending_requests(struct sba_device *sba) u32 count; struct sba_request *req; /* * Process few pending requests * * For now, we process (<number_of_mailbox_channels> * 8) * number of requests at a time. */ count = sba->mchans_count * 8; /* Process few pending requests */ count = SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL; while (!list_empty(&sba->reqs_pending_list) && count) { /* Get the first pending request */ req = list_first_entry(&sba->reqs_pending_list, Loading Loading @@ -442,7 +426,9 @@ static void sba_process_received_request(struct sba_device *sba, WARN_ON(tx->cookie < 0); if (tx->cookie > 0) { spin_lock_irqsave(&sba->reqs_lock, flags); dma_cookie_complete(tx); spin_unlock_irqrestore(&sba->reqs_lock, flags); dmaengine_desc_get_callback_invoke(tx, NULL); dma_descriptor_unmap(tx); tx->callback = NULL; Loading Loading @@ -570,7 +556,7 @@ static enum dma_status sba_tx_status(struct dma_chan *dchan, if (ret == DMA_COMPLETE) return ret; sba_peek_mchans(sba); mbox_client_peek_data(sba->mchan); return dma_cookie_status(dchan, cookie, txstate); } Loading Loading @@ -1637,7 +1623,7 @@ static int sba_async_register(struct sba_device *sba) static int sba_probe(struct platform_device *pdev) { int i, ret = 0, mchans_count; int ret = 0; struct sba_device *sba; struct platform_device *mbox_pdev; struct of_phandle_args args; Loading @@ -1650,12 +1636,11 @@ static int sba_probe(struct platform_device *pdev) sba->dev = &pdev->dev; platform_set_drvdata(pdev, sba); /* Number of channels equals number of mailbox channels */ /* Number of mailbox channels should be atleast 1 */ ret = of_count_phandle_with_args(pdev->dev.of_node, "mboxes", "#mbox-cells"); if (ret <= 0) return -ENODEV; mchans_count = ret; /* Determine SBA version from DT compatible string */ if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba")) Loading Loading @@ -1688,7 +1673,7 @@ static int sba_probe(struct platform_device *pdev) default: return -EINVAL; } sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL * mchans_count; sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL; sba->max_cmd_per_req = sba->max_pq_srcs + 3; sba->max_xor_srcs = sba->max_cmd_per_req - 1; sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size; Loading @@ -1702,55 +1687,30 @@ static int sba_probe(struct platform_device *pdev) sba->client.knows_txdone = true; sba->client.tx_tout = 0; /* Allocate mailbox channel array */ sba->mchans = devm_kcalloc(&pdev->dev, mchans_count, sizeof(*sba->mchans), GFP_KERNEL); if (!sba->mchans) return -ENOMEM; /* Request mailbox channels */ sba->mchans_count = 0; for (i = 0; i < mchans_count; i++) { sba->mchans[i] = mbox_request_channel(&sba->client, i); if (IS_ERR(sba->mchans[i])) { ret = PTR_ERR(sba->mchans[i]); goto fail_free_mchans; } sba->mchans_count++; /* Request mailbox channel */ sba->mchan = mbox_request_channel(&sba->client, 0); if (IS_ERR(sba->mchan)) { ret = PTR_ERR(sba->mchan); goto fail_free_mchan; } atomic_set(&sba->mchans_current, 0); /* Find-out underlying mailbox device */ ret = of_parse_phandle_with_args(pdev->dev.of_node, "mboxes", "#mbox-cells", 0, &args); if (ret) goto fail_free_mchans; goto fail_free_mchan; mbox_pdev = of_find_device_by_node(args.np); of_node_put(args.np); if (!mbox_pdev) { ret = -ENODEV; goto fail_free_mchans; goto fail_free_mchan; } sba->mbox_dev = &mbox_pdev->dev; /* All mailbox channels should be of same ring manager device */ for (i = 1; i < mchans_count; i++) { ret = of_parse_phandle_with_args(pdev->dev.of_node, "mboxes", "#mbox-cells", i, &args); if (ret) goto fail_free_mchans; mbox_pdev = of_find_device_by_node(args.np); of_node_put(args.np); if (sba->mbox_dev != &mbox_pdev->dev) { ret = -EINVAL; goto fail_free_mchans; } } /* Prealloc channel resource */ ret = sba_prealloc_channel_resources(sba); if (ret) goto fail_free_mchans; goto fail_free_mchan; /* Check availability of debugfs */ if (!debugfs_initialized()) Loading @@ -1777,24 +1737,22 @@ static int sba_probe(struct platform_device *pdev) goto fail_free_resources; /* Print device info */ dev_info(sba->dev, "%s using SBAv%d and %d mailbox channels", dev_info(sba->dev, "%s using SBAv%d mailbox channel from %s", dma_chan_name(&sba->dma_chan), sba->ver+1, sba->mchans_count); dev_name(sba->mbox_dev)); return 0; fail_free_resources: debugfs_remove_recursive(sba->root); sba_freeup_channel_resources(sba); fail_free_mchans: for (i = 0; i < sba->mchans_count; i++) mbox_free_channel(sba->mchans[i]); fail_free_mchan: mbox_free_channel(sba->mchan); return ret; } static int sba_remove(struct platform_device *pdev) { int i; struct sba_device *sba = platform_get_drvdata(pdev); dma_async_device_unregister(&sba->dma_dev); Loading @@ -1803,8 +1761,7 @@ static int sba_remove(struct platform_device *pdev) sba_freeup_channel_resources(sba); for (i = 0; i < sba->mchans_count; i++) mbox_free_channel(sba->mchans[i]); mbox_free_channel(sba->mchan); return 0; } Loading Loading
drivers/dma/Kconfig +1 −1 Original line number Diff line number Diff line Loading @@ -115,7 +115,7 @@ config BCM_SBA_RAID select DMA_ENGINE_RAID select ASYNC_TX_DISABLE_XOR_VAL_DMA select ASYNC_TX_DISABLE_PQ_VAL_DMA default ARCH_BCM_IPROC default m if ARCH_BCM_IPROC help Enable support for Broadcom SBA RAID Engine. The SBA RAID engine is available on most of the Broadcom iProc SoCs. It Loading
drivers/dma/bcm-sba-raid.c +37 −80 Original line number Diff line number Diff line /* * Copyright (C) 2017 Broadcom * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* Loading @@ -25,11 +30,8 @@ * * The Broadcom SBA RAID driver does not require any register programming * except submitting request to SBA hardware device via mailbox channels. * This driver implements a DMA device with one DMA channel using a set * of mailbox channels provided by Broadcom SoC specific ring manager * driver. To exploit parallelism (as described above), all DMA request * coming to SBA RAID DMA channel are broken down to smaller requests * and submitted to multiple mailbox channels in round-robin fashion. * This driver implements a DMA device with one DMA channel using a single * mailbox channel provided by Broadcom SoC specific ring manager driver. * For having more SBA DMA channels, we can create more SBA device nodes * in Broadcom SoC specific DTS based on number of hardware rings supported * by Broadcom SoC ring manager. Loading Loading @@ -85,6 +87,7 @@ #define SBA_CMD_GALOIS 0xe #define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192 #define SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL 8 /* Driver helper macros */ #define to_sba_request(tx) \ Loading Loading @@ -142,9 +145,7 @@ struct sba_device { u32 max_cmds_pool_size; /* Maibox client and Mailbox channels */ struct mbox_client client; int mchans_count; atomic_t mchans_current; struct mbox_chan **mchans; struct mbox_chan *mchan; struct device *mbox_dev; /* DMA device and DMA channel */ struct dma_device dma_dev; Loading Loading @@ -200,14 +201,6 @@ static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0) /* ====== General helper routines ===== */ static void sba_peek_mchans(struct sba_device *sba) { int mchan_idx; for (mchan_idx = 0; mchan_idx < sba->mchans_count; mchan_idx++) mbox_client_peek_data(sba->mchans[mchan_idx]); } static struct sba_request *sba_alloc_request(struct sba_device *sba) { bool found = false; Loading @@ -231,7 +224,7 @@ static struct sba_request *sba_alloc_request(struct sba_device *sba) * would have completed which will create more * room for new requests. */ sba_peek_mchans(sba); mbox_client_peek_data(sba->mchan); return NULL; } Loading Loading @@ -369,15 +362,11 @@ static void sba_cleanup_pending_requests(struct sba_device *sba) static int sba_send_mbox_request(struct sba_device *sba, struct sba_request *req) { int mchans_idx, ret = 0; /* Select mailbox channel in round-robin fashion */ mchans_idx = atomic_inc_return(&sba->mchans_current); mchans_idx = mchans_idx % sba->mchans_count; int ret = 0; /* Send message for the request */ req->msg.error = 0; ret = mbox_send_message(sba->mchans[mchans_idx], &req->msg); ret = mbox_send_message(sba->mchan, &req->msg); if (ret < 0) { dev_err(sba->dev, "send message failed with error %d", ret); return ret; Loading @@ -390,7 +379,7 @@ static int sba_send_mbox_request(struct sba_device *sba, } /* Signal txdone for mailbox channel */ mbox_client_txdone(sba->mchans[mchans_idx], ret); mbox_client_txdone(sba->mchan, ret); return ret; } Loading @@ -402,13 +391,8 @@ static void _sba_process_pending_requests(struct sba_device *sba) u32 count; struct sba_request *req; /* * Process few pending requests * * For now, we process (<number_of_mailbox_channels> * 8) * number of requests at a time. */ count = sba->mchans_count * 8; /* Process few pending requests */ count = SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL; while (!list_empty(&sba->reqs_pending_list) && count) { /* Get the first pending request */ req = list_first_entry(&sba->reqs_pending_list, Loading Loading @@ -442,7 +426,9 @@ static void sba_process_received_request(struct sba_device *sba, WARN_ON(tx->cookie < 0); if (tx->cookie > 0) { spin_lock_irqsave(&sba->reqs_lock, flags); dma_cookie_complete(tx); spin_unlock_irqrestore(&sba->reqs_lock, flags); dmaengine_desc_get_callback_invoke(tx, NULL); dma_descriptor_unmap(tx); tx->callback = NULL; Loading Loading @@ -570,7 +556,7 @@ static enum dma_status sba_tx_status(struct dma_chan *dchan, if (ret == DMA_COMPLETE) return ret; sba_peek_mchans(sba); mbox_client_peek_data(sba->mchan); return dma_cookie_status(dchan, cookie, txstate); } Loading Loading @@ -1637,7 +1623,7 @@ static int sba_async_register(struct sba_device *sba) static int sba_probe(struct platform_device *pdev) { int i, ret = 0, mchans_count; int ret = 0; struct sba_device *sba; struct platform_device *mbox_pdev; struct of_phandle_args args; Loading @@ -1650,12 +1636,11 @@ static int sba_probe(struct platform_device *pdev) sba->dev = &pdev->dev; platform_set_drvdata(pdev, sba); /* Number of channels equals number of mailbox channels */ /* Number of mailbox channels should be atleast 1 */ ret = of_count_phandle_with_args(pdev->dev.of_node, "mboxes", "#mbox-cells"); if (ret <= 0) return -ENODEV; mchans_count = ret; /* Determine SBA version from DT compatible string */ if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba")) Loading Loading @@ -1688,7 +1673,7 @@ static int sba_probe(struct platform_device *pdev) default: return -EINVAL; } sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL * mchans_count; sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL; sba->max_cmd_per_req = sba->max_pq_srcs + 3; sba->max_xor_srcs = sba->max_cmd_per_req - 1; sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size; Loading @@ -1702,55 +1687,30 @@ static int sba_probe(struct platform_device *pdev) sba->client.knows_txdone = true; sba->client.tx_tout = 0; /* Allocate mailbox channel array */ sba->mchans = devm_kcalloc(&pdev->dev, mchans_count, sizeof(*sba->mchans), GFP_KERNEL); if (!sba->mchans) return -ENOMEM; /* Request mailbox channels */ sba->mchans_count = 0; for (i = 0; i < mchans_count; i++) { sba->mchans[i] = mbox_request_channel(&sba->client, i); if (IS_ERR(sba->mchans[i])) { ret = PTR_ERR(sba->mchans[i]); goto fail_free_mchans; } sba->mchans_count++; /* Request mailbox channel */ sba->mchan = mbox_request_channel(&sba->client, 0); if (IS_ERR(sba->mchan)) { ret = PTR_ERR(sba->mchan); goto fail_free_mchan; } atomic_set(&sba->mchans_current, 0); /* Find-out underlying mailbox device */ ret = of_parse_phandle_with_args(pdev->dev.of_node, "mboxes", "#mbox-cells", 0, &args); if (ret) goto fail_free_mchans; goto fail_free_mchan; mbox_pdev = of_find_device_by_node(args.np); of_node_put(args.np); if (!mbox_pdev) { ret = -ENODEV; goto fail_free_mchans; goto fail_free_mchan; } sba->mbox_dev = &mbox_pdev->dev; /* All mailbox channels should be of same ring manager device */ for (i = 1; i < mchans_count; i++) { ret = of_parse_phandle_with_args(pdev->dev.of_node, "mboxes", "#mbox-cells", i, &args); if (ret) goto fail_free_mchans; mbox_pdev = of_find_device_by_node(args.np); of_node_put(args.np); if (sba->mbox_dev != &mbox_pdev->dev) { ret = -EINVAL; goto fail_free_mchans; } } /* Prealloc channel resource */ ret = sba_prealloc_channel_resources(sba); if (ret) goto fail_free_mchans; goto fail_free_mchan; /* Check availability of debugfs */ if (!debugfs_initialized()) Loading @@ -1777,24 +1737,22 @@ static int sba_probe(struct platform_device *pdev) goto fail_free_resources; /* Print device info */ dev_info(sba->dev, "%s using SBAv%d and %d mailbox channels", dev_info(sba->dev, "%s using SBAv%d mailbox channel from %s", dma_chan_name(&sba->dma_chan), sba->ver+1, sba->mchans_count); dev_name(sba->mbox_dev)); return 0; fail_free_resources: debugfs_remove_recursive(sba->root); sba_freeup_channel_resources(sba); fail_free_mchans: for (i = 0; i < sba->mchans_count; i++) mbox_free_channel(sba->mchans[i]); fail_free_mchan: mbox_free_channel(sba->mchan); return ret; } static int sba_remove(struct platform_device *pdev) { int i; struct sba_device *sba = platform_get_drvdata(pdev); dma_async_device_unregister(&sba->dma_dev); Loading @@ -1803,8 +1761,7 @@ static int sba_remove(struct platform_device *pdev) sba_freeup_channel_resources(sba); for (i = 0; i < sba->mchans_count; i++) mbox_free_channel(sba->mchans[i]); mbox_free_channel(sba->mchan); return 0; } Loading