Loading drivers/mmc/core/mmc.c +15 −9 Original line number Diff line number Diff line Loading @@ -30,6 +30,7 @@ #include "pwrseq.h" #define DEFAULT_CMD6_TIMEOUT_MS 500 #define MIN_CACHE_EN_TIMEOUT_MS 1600 static const unsigned int tran_exp[] = { 10000, 100000, 1000000, 10000000, Loading Loading @@ -526,8 +527,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd) card->cid.year += 16; /* check whether the eMMC card supports BKOPS */ if (!mmc_card_broken_hpi(card) && ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) { if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) { card->ext_csd.bkops = 1; card->ext_csd.man_bkops_en = (ext_csd[EXT_CSD_BKOPS_EN] & Loading Loading @@ -1785,20 +1785,26 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, if (err) { pr_warn("%s: Enabling HPI failed\n", mmc_hostname(card->host)); card->ext_csd.hpi_en = 0; err = 0; } else } else { card->ext_csd.hpi_en = 1; } } /* * If cache size is higher than 0, this indicates * the existence of cache and it can be turned on. * If cache size is higher than 0, this indicates the existence of cache * and it can be turned on. Note that some eMMCs from Micron has been * reported to need ~800 ms timeout, while enabling the cache after * sudden power failure tests. Let's extend the timeout to a minimum of * DEFAULT_CACHE_EN_TIMEOUT_MS and do it for all cards. */ if (!mmc_card_broken_hpi(card) && card->ext_csd.cache_size > 0) { if (card->ext_csd.cache_size > 0) { unsigned int timeout_ms = MIN_CACHE_EN_TIMEOUT_MS; timeout_ms = max(card->ext_csd.generic_cmd6_time, timeout_ms); err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CACHE_CTRL, 1, card->ext_csd.generic_cmd6_time); EXT_CSD_CACHE_CTRL, 1, timeout_ms); if (err && err != -EBADMSG) goto free_card; Loading drivers/mmc/host/omap_hsmmc.c +11 −1 Original line number Diff line number Diff line Loading @@ -1899,7 +1899,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev) mmc->max_blk_size = 512; /* Block Length at max can be 1024 */ mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; mmc->max_seg_size = mmc->max_req_size; mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | MMC_CAP_CMD23; Loading Loading @@ -1929,6 +1928,17 @@ static int omap_hsmmc_probe(struct platform_device *pdev) goto err_irq; } /* * Limit the maximum segment size to the lower of the request size * and the DMA engine device segment size limits. In reality, with * 32-bit transfers, the DMA engine can do longer segments than this * but there is no way to represent that in the DMA model - if we * increase this figure here, we get warnings from the DMA API debug. */ mmc->max_seg_size = min3(mmc->max_req_size, dma_get_max_seg_size(host->rx_chan->device->dev), dma_get_max_seg_size(host->tx_chan->device->dev)); /* Request IRQ for MMC operations */ ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0, mmc_hostname(mmc), host); Loading drivers/mmc/host/sdhci-tegra.c +4 −4 Original line number Diff line number Diff line Loading @@ -510,25 +510,25 @@ static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host) err = device_property_read_u32(host->mmc->parent, "nvidia,pad-autocal-pull-up-offset-3v3-timeout", &autocal->pull_up_3v3); &autocal->pull_up_3v3_timeout); if (err) autocal->pull_up_3v3_timeout = 0; err = device_property_read_u32(host->mmc->parent, "nvidia,pad-autocal-pull-down-offset-3v3-timeout", &autocal->pull_down_3v3); &autocal->pull_down_3v3_timeout); if (err) autocal->pull_down_3v3_timeout = 0; err = device_property_read_u32(host->mmc->parent, "nvidia,pad-autocal-pull-up-offset-1v8-timeout", &autocal->pull_up_1v8); &autocal->pull_up_1v8_timeout); if (err) autocal->pull_up_1v8_timeout = 0; err = device_property_read_u32(host->mmc->parent, "nvidia,pad-autocal-pull-down-offset-1v8-timeout", &autocal->pull_down_1v8); &autocal->pull_down_1v8_timeout); if (err) autocal->pull_down_1v8_timeout = 0; Loading drivers/mmc/host/sdhci.c +2 −2 Original line number Diff line number Diff line Loading @@ -127,12 +127,12 @@ static void sdhci_do_enable_v4_mode(struct sdhci_host *host) { u16 ctrl2; ctrl2 = sdhci_readb(host, SDHCI_HOST_CONTROL2); ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); if (ctrl2 & SDHCI_CTRL_V4_MODE) return; ctrl2 |= SDHCI_CTRL_V4_MODE; sdhci_writeb(host, ctrl2, SDHCI_HOST_CONTROL); sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); } /* Loading Loading
drivers/mmc/core/mmc.c +15 −9 Original line number Diff line number Diff line Loading @@ -30,6 +30,7 @@ #include "pwrseq.h" #define DEFAULT_CMD6_TIMEOUT_MS 500 #define MIN_CACHE_EN_TIMEOUT_MS 1600 static const unsigned int tran_exp[] = { 10000, 100000, 1000000, 10000000, Loading Loading @@ -526,8 +527,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd) card->cid.year += 16; /* check whether the eMMC card supports BKOPS */ if (!mmc_card_broken_hpi(card) && ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) { if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) { card->ext_csd.bkops = 1; card->ext_csd.man_bkops_en = (ext_csd[EXT_CSD_BKOPS_EN] & Loading Loading @@ -1785,20 +1785,26 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, if (err) { pr_warn("%s: Enabling HPI failed\n", mmc_hostname(card->host)); card->ext_csd.hpi_en = 0; err = 0; } else } else { card->ext_csd.hpi_en = 1; } } /* * If cache size is higher than 0, this indicates * the existence of cache and it can be turned on. * If cache size is higher than 0, this indicates the existence of cache * and it can be turned on. Note that some eMMCs from Micron has been * reported to need ~800 ms timeout, while enabling the cache after * sudden power failure tests. Let's extend the timeout to a minimum of * DEFAULT_CACHE_EN_TIMEOUT_MS and do it for all cards. */ if (!mmc_card_broken_hpi(card) && card->ext_csd.cache_size > 0) { if (card->ext_csd.cache_size > 0) { unsigned int timeout_ms = MIN_CACHE_EN_TIMEOUT_MS; timeout_ms = max(card->ext_csd.generic_cmd6_time, timeout_ms); err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CACHE_CTRL, 1, card->ext_csd.generic_cmd6_time); EXT_CSD_CACHE_CTRL, 1, timeout_ms); if (err && err != -EBADMSG) goto free_card; Loading
drivers/mmc/host/omap_hsmmc.c +11 −1 Original line number Diff line number Diff line Loading @@ -1899,7 +1899,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev) mmc->max_blk_size = 512; /* Block Length at max can be 1024 */ mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; mmc->max_seg_size = mmc->max_req_size; mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | MMC_CAP_CMD23; Loading Loading @@ -1929,6 +1928,17 @@ static int omap_hsmmc_probe(struct platform_device *pdev) goto err_irq; } /* * Limit the maximum segment size to the lower of the request size * and the DMA engine device segment size limits. In reality, with * 32-bit transfers, the DMA engine can do longer segments than this * but there is no way to represent that in the DMA model - if we * increase this figure here, we get warnings from the DMA API debug. */ mmc->max_seg_size = min3(mmc->max_req_size, dma_get_max_seg_size(host->rx_chan->device->dev), dma_get_max_seg_size(host->tx_chan->device->dev)); /* Request IRQ for MMC operations */ ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0, mmc_hostname(mmc), host); Loading
drivers/mmc/host/sdhci-tegra.c +4 −4 Original line number Diff line number Diff line Loading @@ -510,25 +510,25 @@ static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host) err = device_property_read_u32(host->mmc->parent, "nvidia,pad-autocal-pull-up-offset-3v3-timeout", &autocal->pull_up_3v3); &autocal->pull_up_3v3_timeout); if (err) autocal->pull_up_3v3_timeout = 0; err = device_property_read_u32(host->mmc->parent, "nvidia,pad-autocal-pull-down-offset-3v3-timeout", &autocal->pull_down_3v3); &autocal->pull_down_3v3_timeout); if (err) autocal->pull_down_3v3_timeout = 0; err = device_property_read_u32(host->mmc->parent, "nvidia,pad-autocal-pull-up-offset-1v8-timeout", &autocal->pull_up_1v8); &autocal->pull_up_1v8_timeout); if (err) autocal->pull_up_1v8_timeout = 0; err = device_property_read_u32(host->mmc->parent, "nvidia,pad-autocal-pull-down-offset-1v8-timeout", &autocal->pull_down_1v8); &autocal->pull_down_1v8_timeout); if (err) autocal->pull_down_1v8_timeout = 0; Loading
drivers/mmc/host/sdhci.c +2 −2 Original line number Diff line number Diff line Loading @@ -127,12 +127,12 @@ static void sdhci_do_enable_v4_mode(struct sdhci_host *host) { u16 ctrl2; ctrl2 = sdhci_readb(host, SDHCI_HOST_CONTROL2); ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); if (ctrl2 & SDHCI_CTRL_V4_MODE) return; ctrl2 |= SDHCI_CTRL_V4_MODE; sdhci_writeb(host, ctrl2, SDHCI_HOST_CONTROL); sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); } /* Loading