Message ID | 20230731224304.111081-6-sean.anderson@seco.com |
---|---|
State | Changes Requested |
Delegated to: | Tom Rini |
Headers | show |
Series | spl: Use common function for loading/parsing images | expand |
El Mon, Jul 31, 2023 at 06:42:57PM -0400, Sean Anderson deia: > This converts the mmc loader to spl_load. Legacy images are handled by > spl_load (via spl_parse_image_header), so mmc_load_legacy can be > omitted. > Yes. I haven't used the legacy case, but by looking at the code, it seems to me that mmc_load_legacy left the image at some mapped memory at [ spl_image->load_addr, spl_image->load_addr + size ) and the new code does too, but when the image is not aligned, the memory that gets written to was at [ spl_image->load_addr, spl_image->load_addr + size + spl_image->offset % mmc->read_bl_len ) and it will be at [ spl_image->load_addr - spl_image->offset % mmc->read_bl_len, spl_image->load_addr + size ) after this patch. Meaning both with or without this patch some memory outside the image gets written when the image is not aligned on media, but before it was some part of a block at the end and now is that part before the beginning. Whether that's better or worse I don't know. It depends on whether it's a problem to write in non mapped memory, whether there's something worth preserving there, and whether some SOC has some sort of special behaving memory just there, like it happened with the issue Jerome Forissier found (note in this case it wasn't legacy, it was simple fit) https://patchwork.ozlabs.org/project/uboot/patch/c941d835a85255ff81a21c72069c3a9fc9a8a255.1656489154.git.jerome.forissier@linaro.org/ > Signed-off-by: Sean Anderson <sean.anderson@seco.com> > --- > > Changes in v5: > - Rework to load header in spl_load > > common/spl/spl_mmc.c | 91 ++++---------------------------------------- > 1 file changed, 8 insertions(+), 83 deletions(-) > > diff --git a/common/spl/spl_mmc.c b/common/spl/spl_mmc.c > index a665091b00..c926a42c0f 100644 > --- a/common/spl/spl_mmc.c > +++ b/common/spl/spl_mmc.c > @@ -17,48 +17,6 @@ > #include <mmc.h> > #include <image.h> > > -static int mmc_load_legacy(struct spl_image_info *spl_image, > - struct spl_boot_device *bootdev, > - struct mmc *mmc, > - ulong sector, struct legacy_img_hdr *header) > -{ > - u32 image_offset_sectors; > - u32 image_size_sectors; > - unsigned long count; > - u32 image_offset; > - int ret; > - > - ret = spl_parse_image_header(spl_image, bootdev, header); > - if (ret) > - return ret; > - > - /* convert offset to sectors - round down */ > - image_offset_sectors = spl_image->offset / mmc->read_bl_len; > - /* calculate remaining offset */ > - image_offset = spl_image->offset % mmc->read_bl_len; > - > - /* convert size to sectors - round up */ > - image_size_sectors = (spl_image->size + mmc->read_bl_len - 1) / > - mmc->read_bl_len; > - > - /* Read the header too to avoid extra memcpy */ > - count = blk_dread(mmc_get_blk_desc(mmc), > - sector + image_offset_sectors, > - image_size_sectors, > - (void *)(ulong)spl_image->load_addr); > - debug("read %x sectors to %lx\n", image_size_sectors, > - spl_image->load_addr); > - if (count != image_size_sectors) > - return -EIO; > - > - if (image_offset) > - memmove((void *)(ulong)spl_image->load_addr, > - (void *)(ulong)spl_image->load_addr + image_offset, > - spl_image->size); > - > - return 0; > -} > - > static ulong h_spl_load_read(struct spl_load_info *load, ulong sector, > ulong count, void *buf) > { > @@ -82,47 +40,14 @@ int mmc_load_image_raw_sector(struct spl_image_info *spl_image, > struct spl_boot_device *bootdev, > struct mmc *mmc, unsigned long sector) > { > - unsigned long count; > - struct legacy_img_hdr *header; > - struct blk_desc *bd = mmc_get_blk_desc(mmc); > - int ret = 0; > - > - header = spl_get_load_buffer(-sizeof(*header), bd->blksz); > - > - /* read image header to find the image size & load address */ > - count = blk_dread(bd, sector, 1, header); > - debug("hdr read sector %lx, count=%lu\n", sector, count); > - if (count == 0) { > - ret = -EIO; > - goto end; > - } > - > - if (IS_ENABLED(CONFIG_SPL_LOAD_FIT) && > - image_get_magic(header) == FDT_MAGIC) { > - struct spl_load_info load; > - > - debug("Found FIT\n"); > - load.dev = mmc; > - load.priv = NULL; > - load.filename = NULL; > - load.bl_len = mmc->read_bl_len; > - load.read = h_spl_load_read; > - ret = spl_load_simple_fit(spl_image, &load, sector, header); > - } else if (IS_ENABLED(CONFIG_SPL_LOAD_IMX_CONTAINER)) { > - struct spl_load_info load; > - > - load.dev = mmc; > - load.priv = NULL; > - load.filename = NULL; > - load.bl_len = mmc->read_bl_len; > - load.read = h_spl_load_read; > - > - ret = spl_load_imx_container(spl_image, &load, sector); > - } else { > - ret = mmc_load_legacy(spl_image, bootdev, mmc, sector, header); > - } > - > -end: > + int ret; > + struct spl_load_info load = { > + .dev = mmc, > + .bl_len = mmc->read_bl_len, > + .read = h_spl_load_read, > + }; > + > + ret = spl_load(spl_image, bootdev, &load, 0, sector); > if (ret) { > #ifdef CONFIG_SPL_LIBCOMMON_SUPPORT > puts("mmc_load_image_raw_sector: mmc block read error\n"); > -- > 2.40.1 >
On 8/3/23 04:31, Xavier Drudis Ferran wrote: > El Mon, Jul 31, 2023 at 06:42:57PM -0400, Sean Anderson deia: >> This converts the mmc loader to spl_load. Legacy images are handled by >> spl_load (via spl_parse_image_header), so mmc_load_legacy can be >> omitted. >> > > Yes. I haven't used the legacy case, but by looking at the code, it > seems to me that mmc_load_legacy left the image at some mapped memory > at [ spl_image->load_addr, spl_image->load_addr + size ) > and the new code does too, but when the image is not aligned, the > memory that gets written to > was at [ spl_image->load_addr, spl_image->load_addr + size + spl_image->offset % mmc->read_bl_len ) > and it will > be at [ spl_image->load_addr - spl_image->offset % mmc->read_bl_len, spl_image->load_addr + size ) > after this patch. > > Meaning both with or without this patch some memory outside the image > gets written when the image is not aligned on media, but before it was > some part of a block at the end and now is that part before the > beginning. > > Whether that's better or worse I don't know. It depends on whether > it's a problem to write in non mapped memory, whether there's > something worth preserving there, and whether some SOC has some sort > of special behaving memory just there, like it happened with the issue > Jerome Forissier found (note in this case it wasn't legacy, it was > simple fit) Fundamentally, we can't really deal with unaligned images without a bounce-buffer. The method used by SPL_LOAD_FIT_IMAGE_BUFFER_SIZE will continue working, since we call into the FIT routines to load the image. I would like to defer bounce buffering for other images until someone actually needs it. Note that in the FIT case, you can also do `mkimage -EB`, at least if you aren't using FIT_LOAD_FULL. --Sean > https://cas5-0-urlprotect.trendmicro.com:443/wis/clicktime/v1/query?url=https%3a%2f%2fpatchwork.ozlabs.org%2fproject%2fuboot%2fpatch%2fc941d835a85255ff81a21c72069c3a9fc9a8a255.1656489154.git.jerome.forissier%40linaro.org%2f&umid=1ca400c8-7d50-4ae9-9abc-31dac6468719&auth=d807158c60b7d2502abde8a2fc01f40662980862-09f1f8fbc507564d04c74bc07523f5da694b0761 > >> Signed-off-by: Sean Anderson <sean.anderson@seco.com> >> --- >> >> Changes in v5: >> - Rework to load header in spl_load >> >> common/spl/spl_mmc.c | 91 ++++---------------------------------------- >> 1 file changed, 8 insertions(+), 83 deletions(-) >> >> diff --git a/common/spl/spl_mmc.c b/common/spl/spl_mmc.c >> index a665091b00..c926a42c0f 100644 >> --- a/common/spl/spl_mmc.c >> +++ b/common/spl/spl_mmc.c >> @@ -17,48 +17,6 @@ >> #include <mmc.h> >> #include <image.h> >> >> -static int mmc_load_legacy(struct spl_image_info *spl_image, >> - struct spl_boot_device *bootdev, >> - struct mmc *mmc, >> - ulong sector, struct legacy_img_hdr *header) >> -{ >> - u32 image_offset_sectors; >> - u32 image_size_sectors; >> - unsigned long count; >> - u32 image_offset; >> - int ret; >> - >> - ret = spl_parse_image_header(spl_image, bootdev, header); >> - if (ret) >> - return ret; >> - >> - /* convert offset to sectors - round down */ >> - image_offset_sectors = spl_image->offset / mmc->read_bl_len; >> - /* calculate remaining offset */ >> - image_offset = spl_image->offset % mmc->read_bl_len; >> - >> - /* convert size to sectors - round up */ >> - image_size_sectors = (spl_image->size + mmc->read_bl_len - 1) / >> - mmc->read_bl_len; >> - >> - /* Read the header too to avoid extra memcpy */ >> - count = blk_dread(mmc_get_blk_desc(mmc), >> - sector + image_offset_sectors, >> - image_size_sectors, >> - (void *)(ulong)spl_image->load_addr); >> - debug("read %x sectors to %lx\n", image_size_sectors, >> - spl_image->load_addr); >> - if (count != image_size_sectors) >> - return -EIO; >> - >> - if (image_offset) >> - memmove((void *)(ulong)spl_image->load_addr, >> - (void *)(ulong)spl_image->load_addr + image_offset, >> - spl_image->size); >> - >> - return 0; >> -} >> - >> static ulong h_spl_load_read(struct spl_load_info *load, ulong sector, >> ulong count, void *buf) >> { >> @@ -82,47 +40,14 @@ int mmc_load_image_raw_sector(struct spl_image_info *spl_image, >> struct spl_boot_device *bootdev, >> struct mmc *mmc, unsigned long sector) >> { >> - unsigned long count; >> - struct legacy_img_hdr *header; >> - struct blk_desc *bd = mmc_get_blk_desc(mmc); >> - int ret = 0; >> - >> - header = spl_get_load_buffer(-sizeof(*header), bd->blksz); >> - >> - /* read image header to find the image size & load address */ >> - count = blk_dread(bd, sector, 1, header); >> - debug("hdr read sector %lx, count=%lu\n", sector, count); >> - if (count == 0) { >> - ret = -EIO; >> - goto end; >> - } >> - >> - if (IS_ENABLED(CONFIG_SPL_LOAD_FIT) && >> - image_get_magic(header) == FDT_MAGIC) { >> - struct spl_load_info load; >> - >> - debug("Found FIT\n"); >> - load.dev = mmc; >> - load.priv = NULL; >> - load.filename = NULL; >> - load.bl_len = mmc->read_bl_len; >> - load.read = h_spl_load_read; >> - ret = spl_load_simple_fit(spl_image, &load, sector, header); >> - } else if (IS_ENABLED(CONFIG_SPL_LOAD_IMX_CONTAINER)) { >> - struct spl_load_info load; >> - >> - load.dev = mmc; >> - load.priv = NULL; >> - load.filename = NULL; >> - load.bl_len = mmc->read_bl_len; >> - load.read = h_spl_load_read; >> - >> - ret = spl_load_imx_container(spl_image, &load, sector); >> - } else { >> - ret = mmc_load_legacy(spl_image, bootdev, mmc, sector, header); >> - } >> - >> -end: >> + int ret; >> + struct spl_load_info load = { >> + .dev = mmc, >> + .bl_len = mmc->read_bl_len, >> + .read = h_spl_load_read, >> + }; >> + >> + ret = spl_load(spl_image, bootdev, &load, 0, sector); >> if (ret) { >> #ifdef CONFIG_SPL_LIBCOMMON_SUPPORT >> puts("mmc_load_image_raw_sector: mmc block read error\n"); >> -- >> 2.40.1 >>
On 2023-08-03 18:11, Sean Anderson wrote: > On 8/3/23 04:31, Xavier Drudis Ferran wrote: >> El Mon, Jul 31, 2023 at 06:42:57PM -0400, Sean Anderson deia: >>> This converts the mmc loader to spl_load. Legacy images are handled by >>> spl_load (via spl_parse_image_header), so mmc_load_legacy can be >>> omitted. >>> >> >> Yes. I haven't used the legacy case, but by looking at the code, it >> seems to me that mmc_load_legacy left the image at some mapped memory >> at [ spl_image->load_addr, spl_image->load_addr + size ) >> and the new code does too, but when the image is not aligned, the >> memory that gets written to >> was at [ spl_image->load_addr, spl_image->load_addr + size + spl_image->offset % mmc->read_bl_len ) >> and it will >> be at [ spl_image->load_addr - spl_image->offset % mmc->read_bl_len, spl_image->load_addr + size ) >> after this patch. >> >> Meaning both with or without this patch some memory outside the image >> gets written when the image is not aligned on media, but before it was >> some part of a block at the end and now is that part before the >> beginning. >> >> Whether that's better or worse I don't know. It depends on whether >> it's a problem to write in non mapped memory, whether there's >> something worth preserving there, and whether some SOC has some sort >> of special behaving memory just there, like it happened with the issue >> Jerome Forissier found (note in this case it wasn't legacy, it was >> simple fit) > > Fundamentally, we can't really deal with unaligned images without a > bounce-buffer. The method used by SPL_LOAD_FIT_IMAGE_BUFFER_SIZE will > continue working, since we call into the FIT routines to load the image. > I would like to defer bounce buffering for other images until someone > actually needs it. > > Note that in the FIT case, you can also do `mkimage -EB`, at least if > you aren't using FIT_LOAD_FULL. With the following two commits introduced in v2023.04-rc1, the alignment issue for Rockchip has been fixed and all external data is now accessed block aligned. 9b2fd2d22852 ("binman: Add support for align argument to mkimage tool") 5ad03fc77dfa ("rockchip: Align FIT image data to SD/MMC block length") Regards, Jonas > > --Sean > >> https://patchwork.ozlabs.org/project/uboot/patch/c941d835a85255ff81a21c72069c3a9fc9a8a255.1656489154.git.jerome.forissier@linaro.org/ >> >>> Signed-off-by: Sean Anderson <sean.anderson@seco.com> >>> --- >>> >>> Changes in v5: >>> - Rework to load header in spl_load >>> >>> common/spl/spl_mmc.c | 91 ++++---------------------------------------- >>> 1 file changed, 8 insertions(+), 83 deletions(-) >>> [...]
El Sun, Sep 03, 2023 at 08:17:26AM +0000, Jonas Karlman deia: > > Fundamentally, we can't really deal with unaligned images without a > > bounce-buffer. The method used by SPL_LOAD_FIT_IMAGE_BUFFER_SIZE will > > continue working, since we call into the FIT routines to load the image. Yes > > I would like to defer bounce buffering for other images until someone > > actually needs it. > > Fine. > > Note that in the FIT case, you can also do `mkimage -EB`, at least if > > you aren't using FIT_LOAD_FULL. > > With the following two commits introduced in v2023.04-rc1, the alignment > issue for Rockchip has been fixed and all external data is now accessed > block aligned. > > 9b2fd2d22852 ("binman: Add support for align argument to mkimage tool") > 5ad03fc77dfa ("rockchip: Align FIT image data to SD/MMC block length") > > Regards, > Jonas > Well, yes, thanks. I was carrying Jerome's patch still thinking it was needed for me, but I just tried without and it works too, in mmc. In spi I didn't try but it should be even easier (bl_len=1). For me it's still odd to write outside intended memory. Would a warning in case legacy image loading writes before load_addr be acceptable ? Just in case someone was using the memory there for something else.
On 9/4/23 08:59, Xavier Drudis Ferran wrote: > El Sun, Sep 03, 2023 at 08:17:26AM +0000, Jonas Karlman deia: > >> > Fundamentally, we can't really deal with unaligned images without a >> > bounce-buffer. The method used by SPL_LOAD_FIT_IMAGE_BUFFER_SIZE will >> > continue working, since we call into the FIT routines to load the image. > > Yes > >> > I would like to defer bounce buffering for other images until someone >> > actually needs it. >> > > > Fine. > >> > Note that in the FIT case, you can also do `mkimage -EB`, at least if >> > you aren't using FIT_LOAD_FULL. >> >> With the following two commits introduced in v2023.04-rc1, the alignment >> issue for Rockchip has been fixed and all external data is now accessed >> block aligned. >> >> 9b2fd2d22852 ("binman: Add support for align argument to mkimage tool") >> 5ad03fc77dfa ("rockchip: Align FIT image data to SD/MMC block length") >> >> Regards, >> Jonas >> > > Well, yes, thanks. > > I was carrying Jerome's patch still thinking it was needed for me, but > I just tried without and it works too, in mmc. In spi I didn't try but > it should be even easier (bl_len=1). > > For me it's still odd to write outside intended memory. Would a warning > in case legacy image loading writes before load_addr be acceptable ? > Just in case someone was using the memory there for something else. We could add something, but it would have to be behind SHOW_ERRORS. This series is already having a very tough time reducing bloat without adding any (other) new features. --Sean
El Tue, Sep 05, 2023 at 11:02:44AM -0400, Sean Anderson deia: > On 9/4/23 08:59, Xavier Drudis Ferran wrote: > > El Sun, Sep 03, 2023 at 08:17:26AM +0000, Jonas Karlman deia: > > > >> > Fundamentally, we can't really deal with unaligned images without a > >> > bounce-buffer. The method used by SPL_LOAD_FIT_IMAGE_BUFFER_SIZE will > >> > continue working, since we call into the FIT routines to load the image. > > > > Yes > > > >> > I would like to defer bounce buffering for other images until someone > >> > actually needs it. > >> > > > > > Fine. > > > >> > Note that in the FIT case, you can also do `mkimage -EB`, at least if > >> > you aren't using FIT_LOAD_FULL. > >> > >> With the following two commits introduced in v2023.04-rc1, the alignment > >> issue for Rockchip has been fixed and all external data is now accessed > >> block aligned. > >> > >> 9b2fd2d22852 ("binman: Add support for align argument to mkimage tool") > >> 5ad03fc77dfa ("rockchip: Align FIT image data to SD/MMC block length") > >> > >> Regards, > >> Jonas > >> > > > > Well, yes, thanks. > > > > I was carrying Jerome's patch still thinking it was needed for me, but > > I just tried without and it works too, in mmc. In spi I didn't try but > > it should be even easier (bl_len=1). > > > > For me it's still odd to write outside intended memory. Would a warning > > in case legacy image loading writes before load_addr be acceptable ? > > Just in case someone was using the memory there for something else. > > We could add something, but it would have to be behind SHOW_ERRORS. This > series is already having a very tough time reducing bloat without adding > any (other) new features. > When I looked at it I thought the check and warning was only needed at the end of spl_load, for legacy images. For fit it was already aligned by calculating offset as a integer multiple of sectors. I might misremember now. In cases where the buffer is reserved it could just include the extra bytes at the start before the load address, as I tried to suggest (v5 02/11), and then no warning is needed. Which is silly because the spl_get_load_buffer doesn't do much at all. But someday it might do more... Your changes moves the extra bytes from the end to the start, and I find it more unexpected than just having extra bytes at the end for an image that might not have such a fixed length. I'd expect people to leave room at the end when planning memory, thinking a future image might be bigger, but not thinking to leave room at the start. But I'm not all that sure... If not a check and warning at least note something in documentation or comments somewhere... And I think you are really adding new features, in that even if the formats and methods stay the same, there'll be more combinations available after your change. So I'd be tolerant to a small code size increase in exchange for the new flexibility. But I can understand some cases can't afford the luxury, maybe. Whatever you decide, looking forward to test a little your v6 if I can.
diff --git a/common/spl/spl_mmc.c b/common/spl/spl_mmc.c index a665091b00..c926a42c0f 100644 --- a/common/spl/spl_mmc.c +++ b/common/spl/spl_mmc.c @@ -17,48 +17,6 @@ #include <mmc.h> #include <image.h> -static int mmc_load_legacy(struct spl_image_info *spl_image, - struct spl_boot_device *bootdev, - struct mmc *mmc, - ulong sector, struct legacy_img_hdr *header) -{ - u32 image_offset_sectors; - u32 image_size_sectors; - unsigned long count; - u32 image_offset; - int ret; - - ret = spl_parse_image_header(spl_image, bootdev, header); - if (ret) - return ret; - - /* convert offset to sectors - round down */ - image_offset_sectors = spl_image->offset / mmc->read_bl_len; - /* calculate remaining offset */ - image_offset = spl_image->offset % mmc->read_bl_len; - - /* convert size to sectors - round up */ - image_size_sectors = (spl_image->size + mmc->read_bl_len - 1) / - mmc->read_bl_len; - - /* Read the header too to avoid extra memcpy */ - count = blk_dread(mmc_get_blk_desc(mmc), - sector + image_offset_sectors, - image_size_sectors, - (void *)(ulong)spl_image->load_addr); - debug("read %x sectors to %lx\n", image_size_sectors, - spl_image->load_addr); - if (count != image_size_sectors) - return -EIO; - - if (image_offset) - memmove((void *)(ulong)spl_image->load_addr, - (void *)(ulong)spl_image->load_addr + image_offset, - spl_image->size); - - return 0; -} - static ulong h_spl_load_read(struct spl_load_info *load, ulong sector, ulong count, void *buf) { @@ -82,47 +40,14 @@ int mmc_load_image_raw_sector(struct spl_image_info *spl_image, struct spl_boot_device *bootdev, struct mmc *mmc, unsigned long sector) { - unsigned long count; - struct legacy_img_hdr *header; - struct blk_desc *bd = mmc_get_blk_desc(mmc); - int ret = 0; - - header = spl_get_load_buffer(-sizeof(*header), bd->blksz); - - /* read image header to find the image size & load address */ - count = blk_dread(bd, sector, 1, header); - debug("hdr read sector %lx, count=%lu\n", sector, count); - if (count == 0) { - ret = -EIO; - goto end; - } - - if (IS_ENABLED(CONFIG_SPL_LOAD_FIT) && - image_get_magic(header) == FDT_MAGIC) { - struct spl_load_info load; - - debug("Found FIT\n"); - load.dev = mmc; - load.priv = NULL; - load.filename = NULL; - load.bl_len = mmc->read_bl_len; - load.read = h_spl_load_read; - ret = spl_load_simple_fit(spl_image, &load, sector, header); - } else if (IS_ENABLED(CONFIG_SPL_LOAD_IMX_CONTAINER)) { - struct spl_load_info load; - - load.dev = mmc; - load.priv = NULL; - load.filename = NULL; - load.bl_len = mmc->read_bl_len; - load.read = h_spl_load_read; - - ret = spl_load_imx_container(spl_image, &load, sector); - } else { - ret = mmc_load_legacy(spl_image, bootdev, mmc, sector, header); - } - -end: + int ret; + struct spl_load_info load = { + .dev = mmc, + .bl_len = mmc->read_bl_len, + .read = h_spl_load_read, + }; + + ret = spl_load(spl_image, bootdev, &load, 0, sector); if (ret) { #ifdef CONFIG_SPL_LIBCOMMON_SUPPORT puts("mmc_load_image_raw_sector: mmc block read error\n");
This converts the mmc loader to spl_load. Legacy images are handled by spl_load (via spl_parse_image_header), so mmc_load_legacy can be omitted. Signed-off-by: Sean Anderson <sean.anderson@seco.com> --- Changes in v5: - Rework to load header in spl_load common/spl/spl_mmc.c | 91 ++++---------------------------------------- 1 file changed, 8 insertions(+), 83 deletions(-)