Message ID | 20240814140700.116379-3-massimiliano.pellizzer@canonical.com |
---|---|
State | New |
Headers | show |
Series | CVE-2024-26787 | expand |
On Wed, Aug 14, 2024 at 04:06:47PM +0200, Massimiliano Pellizzer wrote: > From: Yann Gautier <yann.gautier@foss.st.com> > > [ Upstream commit 970dc9c11a17994ab878016b536612ab00d1441d ] > > In SDIO mode, the sg list for requests can be unaligned with what the > STM32 SDMMC internal DMA can support. In that case, instead of failing, > use a temporary bounce buffer to copy from/to the sg list. > This buffer is limited to 1MB. But for that we need to also limit > max_req_size to 1MB. It has not shown any throughput penalties for > SD-cards or eMMC. > > Signed-off-by: Yann Gautier <yann.gautier@foss.st.com> > Link: https://lore.kernel.org/r/20220328145114.334577-1-yann.gautier@foss.st.com > Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org> > Stable-dep-of: 6b1ba3f9040b ("mmc: mmci: stm32: fix DMA API overlapping mappings warning") > Signed-off-by: Sasha Levin <sashal@kernel.org> > (cherry picked from commit 287093040fc5cda96d25f70a5aa83c975a149c04 > linux-5.15.y) No newline. Please keep this in one line. (cherry picked from commit 287093040fc5cda96d25f70a5aa83c975a149c04 linux-5.15.y) The next patch needs to be fixed as well. > CVE-2024-26787 > Signed-off-by: Massimiliano Pellizzer <massimiliano.pellizzer@canonical.com> > --- > drivers/mmc/host/mmci_stm32_sdmmc.c | 88 +++++++++++++++++++++++------ > 1 file changed, 71 insertions(+), 17 deletions(-) > > diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c > index aa8c0ab9efd1..cab3a52879ba 100644 > --- a/drivers/mmc/host/mmci_stm32_sdmmc.c > +++ b/drivers/mmc/host/mmci_stm32_sdmmc.c > @@ -23,11 +23,16 @@ struct sdmmc_lli_desc { > struct sdmmc_idma { > dma_addr_t sg_dma; > void *sg_cpu; > + dma_addr_t bounce_dma_addr; > + void *bounce_buf; > + bool use_bounce_buffer; > }; > > int sdmmc_idma_validate_data(struct mmci_host *host, > struct mmc_data *data) > { > + struct sdmmc_idma *idma = host->dma_priv; > + struct device *dev = mmc_dev(host->mmc); > struct scatterlist *sg; > int i; > > @@ -35,41 +40,69 @@ int sdmmc_idma_validate_data(struct mmci_host *host, > * idma has constraints on idmabase & idmasize for each element > * excepted the last element which has no constraint on idmasize > */ > + idma->use_bounce_buffer = false; > for_each_sg(data->sg, sg, data->sg_len - 1, i) { > if (!IS_ALIGNED(sg->offset, sizeof(u32)) || > !IS_ALIGNED(sg->length, SDMMC_IDMA_BURST)) { > - dev_err(mmc_dev(host->mmc), > + dev_dbg(mmc_dev(host->mmc), > "unaligned scatterlist: ofst:%x length:%d\n", > data->sg->offset, data->sg->length); > - return -EINVAL; > + goto use_bounce_buffer; > } > } > > if (!IS_ALIGNED(sg->offset, sizeof(u32))) { > - dev_err(mmc_dev(host->mmc), > + dev_dbg(mmc_dev(host->mmc), > "unaligned last scatterlist: ofst:%x length:%d\n", > data->sg->offset, data->sg->length); > - return -EINVAL; > + goto use_bounce_buffer; > + } > + > + return 0; > + > +use_bounce_buffer: > + if (!idma->bounce_buf) { > + idma->bounce_buf = dmam_alloc_coherent(dev, > + host->mmc->max_req_size, > + &idma->bounce_dma_addr, > + GFP_KERNEL); > + if (!idma->bounce_buf) { > + dev_err(dev, "Unable to map allocate DMA bounce buffer.\n"); > + return -ENOMEM; > + } > } > > + idma->use_bounce_buffer = true; > + > return 0; > } > > static int _sdmmc_idma_prep_data(struct mmci_host *host, > struct mmc_data *data) > { > - int n_elem; > + struct sdmmc_idma *idma = host->dma_priv; > > - n_elem = dma_map_sg(mmc_dev(host->mmc), > - data->sg, > - data->sg_len, > - mmc_get_dma_dir(data)); > + if (idma->use_bounce_buffer) { > + if (data->flags & MMC_DATA_WRITE) { > + unsigned int xfer_bytes = data->blksz * data->blocks; > > - if (!n_elem) { > - dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n"); > - return -EINVAL; > - } > + sg_copy_to_buffer(data->sg, data->sg_len, > + idma->bounce_buf, xfer_bytes); > + dma_wmb(); > + } > + } else { > + int n_elem; > > + n_elem = dma_map_sg(mmc_dev(host->mmc), > + data->sg, > + data->sg_len, > + mmc_get_dma_dir(data)); > + > + if (!n_elem) { > + dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n"); > + return -EINVAL; > + } > + } > return 0; > } > > @@ -86,8 +119,19 @@ static int sdmmc_idma_prep_data(struct mmci_host *host, > static void sdmmc_idma_unprep_data(struct mmci_host *host, > struct mmc_data *data, int err) > { > - dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, > - mmc_get_dma_dir(data)); > + struct sdmmc_idma *idma = host->dma_priv; > + > + if (idma->use_bounce_buffer) { > + if (data->flags & MMC_DATA_READ) { > + unsigned int xfer_bytes = data->blksz * data->blocks; > + > + sg_copy_from_buffer(data->sg, data->sg_len, > + idma->bounce_buf, xfer_bytes); > + } > + } else { > + dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, > + mmc_get_dma_dir(data)); > + } > } > > static int sdmmc_idma_setup(struct mmci_host *host) > @@ -112,6 +156,8 @@ static int sdmmc_idma_setup(struct mmci_host *host) > host->mmc->max_segs = SDMMC_LLI_BUF_LEN / > sizeof(struct sdmmc_lli_desc); > host->mmc->max_seg_size = host->variant->stm32_idmabsize_mask; > + > + host->mmc->max_req_size = SZ_1M; > } else { > host->mmc->max_segs = 1; > host->mmc->max_seg_size = host->mmc->max_req_size; > @@ -129,8 +175,16 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl) > struct scatterlist *sg; > int i; > > - if (!host->variant->dma_lli || data->sg_len == 1) { > - writel_relaxed(sg_dma_address(data->sg), > + if (!host->variant->dma_lli || data->sg_len == 1 || > + idma->use_bounce_buffer) { > + u32 dma_addr; > + > + if (idma->use_bounce_buffer) > + dma_addr = idma->bounce_dma_addr; > + else > + dma_addr = sg_dma_address(data->sg); > + > + writel_relaxed(dma_addr, > host->base + MMCI_STM32_IDMABASE0R); > writel_relaxed(MMCI_STM32_IDMAEN, > host->base + MMCI_STM32_IDMACTRLR);
diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c index aa8c0ab9efd1..cab3a52879ba 100644 --- a/drivers/mmc/host/mmci_stm32_sdmmc.c +++ b/drivers/mmc/host/mmci_stm32_sdmmc.c @@ -23,11 +23,16 @@ struct sdmmc_lli_desc { struct sdmmc_idma { dma_addr_t sg_dma; void *sg_cpu; + dma_addr_t bounce_dma_addr; + void *bounce_buf; + bool use_bounce_buffer; }; int sdmmc_idma_validate_data(struct mmci_host *host, struct mmc_data *data) { + struct sdmmc_idma *idma = host->dma_priv; + struct device *dev = mmc_dev(host->mmc); struct scatterlist *sg; int i; @@ -35,41 +40,69 @@ int sdmmc_idma_validate_data(struct mmci_host *host, * idma has constraints on idmabase & idmasize for each element * excepted the last element which has no constraint on idmasize */ + idma->use_bounce_buffer = false; for_each_sg(data->sg, sg, data->sg_len - 1, i) { if (!IS_ALIGNED(sg->offset, sizeof(u32)) || !IS_ALIGNED(sg->length, SDMMC_IDMA_BURST)) { - dev_err(mmc_dev(host->mmc), + dev_dbg(mmc_dev(host->mmc), "unaligned scatterlist: ofst:%x length:%d\n", data->sg->offset, data->sg->length); - return -EINVAL; + goto use_bounce_buffer; } } if (!IS_ALIGNED(sg->offset, sizeof(u32))) { - dev_err(mmc_dev(host->mmc), + dev_dbg(mmc_dev(host->mmc), "unaligned last scatterlist: ofst:%x length:%d\n", data->sg->offset, data->sg->length); - return -EINVAL; + goto use_bounce_buffer; + } + + return 0; + +use_bounce_buffer: + if (!idma->bounce_buf) { + idma->bounce_buf = dmam_alloc_coherent(dev, + host->mmc->max_req_size, + &idma->bounce_dma_addr, + GFP_KERNEL); + if (!idma->bounce_buf) { + dev_err(dev, "Unable to map allocate DMA bounce buffer.\n"); + return -ENOMEM; + } } + idma->use_bounce_buffer = true; + return 0; } static int _sdmmc_idma_prep_data(struct mmci_host *host, struct mmc_data *data) { - int n_elem; + struct sdmmc_idma *idma = host->dma_priv; - n_elem = dma_map_sg(mmc_dev(host->mmc), - data->sg, - data->sg_len, - mmc_get_dma_dir(data)); + if (idma->use_bounce_buffer) { + if (data->flags & MMC_DATA_WRITE) { + unsigned int xfer_bytes = data->blksz * data->blocks; - if (!n_elem) { - dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n"); - return -EINVAL; - } + sg_copy_to_buffer(data->sg, data->sg_len, + idma->bounce_buf, xfer_bytes); + dma_wmb(); + } + } else { + int n_elem; + n_elem = dma_map_sg(mmc_dev(host->mmc), + data->sg, + data->sg_len, + mmc_get_dma_dir(data)); + + if (!n_elem) { + dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n"); + return -EINVAL; + } + } return 0; } @@ -86,8 +119,19 @@ static int sdmmc_idma_prep_data(struct mmci_host *host, static void sdmmc_idma_unprep_data(struct mmci_host *host, struct mmc_data *data, int err) { - dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, - mmc_get_dma_dir(data)); + struct sdmmc_idma *idma = host->dma_priv; + + if (idma->use_bounce_buffer) { + if (data->flags & MMC_DATA_READ) { + unsigned int xfer_bytes = data->blksz * data->blocks; + + sg_copy_from_buffer(data->sg, data->sg_len, + idma->bounce_buf, xfer_bytes); + } + } else { + dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + mmc_get_dma_dir(data)); + } } static int sdmmc_idma_setup(struct mmci_host *host) @@ -112,6 +156,8 @@ static int sdmmc_idma_setup(struct mmci_host *host) host->mmc->max_segs = SDMMC_LLI_BUF_LEN / sizeof(struct sdmmc_lli_desc); host->mmc->max_seg_size = host->variant->stm32_idmabsize_mask; + + host->mmc->max_req_size = SZ_1M; } else { host->mmc->max_segs = 1; host->mmc->max_seg_size = host->mmc->max_req_size; @@ -129,8 +175,16 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl) struct scatterlist *sg; int i; - if (!host->variant->dma_lli || data->sg_len == 1) { - writel_relaxed(sg_dma_address(data->sg), + if (!host->variant->dma_lli || data->sg_len == 1 || + idma->use_bounce_buffer) { + u32 dma_addr; + + if (idma->use_bounce_buffer) + dma_addr = idma->bounce_dma_addr; + else + dma_addr = sg_dma_address(data->sg); + + writel_relaxed(dma_addr, host->base + MMCI_STM32_IDMABASE0R); writel_relaxed(MMCI_STM32_IDMAEN, host->base + MMCI_STM32_IDMACTRLR);