From a38c3af868ad2a7a7c04667e559570d5f81b1d51 Mon Sep 17 00:00:00 2001 From: Stefan Agner Date: Thu, 6 Dec 2018 14:57:09 +0100 Subject: [PATCH 01/12] mtd: nand: raw: allow to disable unneeded ECC layouts Each ECC layout consumes about 2984 bytes in the .data section. Allow to disable the default ECC layouts if a driver is known to provide its own ECC layout. Signed-off-by: Stefan Agner Reviewed-by: Lukasz Majewski Reviewed-by: Miquel Raynal --- drivers/mtd/nand/raw/Kconfig | 7 +++++++ drivers/mtd/nand/raw/nand_base.c | 4 ++++ 2 files changed, 11 insertions(+) diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig index fd1723feda..ffc6cc98aa 100644 --- a/drivers/mtd/nand/raw/Kconfig +++ b/drivers/mtd/nand/raw/Kconfig @@ -9,6 +9,12 @@ config SYS_NAND_SELF_INIT This option, if enabled, provides more flexible and linux-like NAND initialization process. +config SYS_NAND_DRIVER_ECC_LAYOUT + bool + help + Omit standard ECC layouts to safe space. Select this if your driver + is known to provide its own ECC layout. + config NAND_ATMEL bool "Support Atmel NAND controller" imply SYS_NAND_USE_FLASH_BBT @@ -81,6 +87,7 @@ config NAND_OMAP_ELM config NAND_VF610_NFC bool "Support for Freescale NFC for VF610" select SYS_NAND_SELF_INIT + select SYS_NAND_DRIVER_ECC_LAYOUT imply CMD_NAND help Enables support for NAND Flash Controller on some Freescale diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 92daebe120..6d2ff58d86 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -47,6 +47,7 @@ #include /* Define default oob placement schemes for large and small page devices */ +#ifdef CONFIG_SYS_NAND_DRIVER_ECC_LAYOUT static struct nand_ecclayout nand_oob_8 = { .eccbytes = 3, .eccpos = {0, 1, 2}, @@ -89,6 +90,7 @@ static struct nand_ecclayout nand_oob_128 = { {.offset = 2, .length = 78} } }; +#endif static int nand_get_device(struct mtd_info *mtd, int new_state); @@ -4339,6 +4341,7 @@ int nand_scan_tail(struct mtd_info *mtd) */ if (!ecc->layout && (ecc->mode != NAND_ECC_SOFT_BCH)) { switch (mtd->oobsize) { +#ifdef CONFIG_SYS_NAND_DRIVER_ECC_LAYOUT case 8: ecc->layout = &nand_oob_8; break; @@ -4351,6 +4354,7 @@ int nand_scan_tail(struct mtd_info *mtd) case 128: ecc->layout = &nand_oob_128; break; +#endif default: pr_warn("No oob scheme defined for oobsize %d\n", mtd->oobsize); From 9a9d66f5eff0f443de4c2c6ca3e27771ed14b1b4 Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Tue, 11 Dec 2018 10:13:56 +0100 Subject: [PATCH 02/12] env: add spi_flash_read_env function The spi_flash_read_env function is a wrapper over spi_flash_read, which enables the env to read multiple flash page size from flash until '\0\0' is read or the end of env partition is reached. Instead of reading the entire env size. When it reads '\0\0', it stops reading further the env and assumes that the rest of env is '\0'. This is an optimization for large environments that contain few bytes environment variables. In this case it doesn't need to read the entire environment and only few pages. Signed-off-by: Horatiu Vultur --- env/sf.c | 56 +++++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 45 insertions(+), 11 deletions(-) diff --git a/env/sf.c b/env/sf.c index 23cbad5d88..b3dec82c35 100644 --- a/env/sf.c +++ b/env/sf.c @@ -81,6 +81,40 @@ static int setup_flash_device(void) return 0; } +static int is_end(const char *addr, size_t size) +{ + /* The end of env variables is marked by '\0\0' */ + int i = 0; + + for (i = 0; i < size - 1; ++i) + if (addr[i] == 0x0 && addr[i + 1] == 0x0) + return 1; + return 0; +} + +static int spi_flash_read_env(struct spi_flash *flash, u32 offset, size_t len, + void *buf) +{ + u32 addr = 0; + u32 page_size = flash->page_size; + + memset(buf, 0x0, len); + for (int i = 0; i < len / page_size; ++i) { + int ret = spi_flash_read(flash, offset, page_size, + &((char *)buf)[addr]); + + if (ret < 0) + return ret; + + if (is_end(&((char *)buf)[addr], page_size)) + return 0; + + addr += page_size; + offset += page_size; + } + return 0; +} + #if defined(CONFIG_ENV_OFFSET_REDUND) #ifdef CMD_SAVEENV static int env_sf_save(void) @@ -116,8 +150,8 @@ static int env_sf_save(void) ret = -ENOMEM; goto done; } - ret = spi_flash_read(env_flash, saved_offset, - saved_size, saved_buffer); + ret = spi_flash_read_env(env_flash, saved_offset, + saved_size, saved_buffer); if (ret) goto done; } @@ -183,10 +217,10 @@ static int env_sf_load(void) if (ret) goto out; - read1_fail = spi_flash_read(env_flash, CONFIG_ENV_OFFSET, - CONFIG_ENV_SIZE, tmp_env1); - read2_fail = spi_flash_read(env_flash, CONFIG_ENV_OFFSET_REDUND, - CONFIG_ENV_SIZE, tmp_env2); + read1_fail = spi_flash_read_env(env_flash, CONFIG_ENV_OFFSET, + CONFIG_ENV_SIZE, tmp_env1); + read2_fail = spi_flash_read_env(env_flash, CONFIG_ENV_OFFSET_REDUND, + CONFIG_ENV_SIZE, tmp_env2); ret = env_import_redund((char *)tmp_env1, read1_fail, (char *)tmp_env2, read2_fail); @@ -220,8 +254,8 @@ static int env_sf_save(void) if (!saved_buffer) goto done; - ret = spi_flash_read(env_flash, saved_offset, - saved_size, saved_buffer); + ret = spi_flash_read_env(env_flash, saved_offset, + saved_size, saved_buffer); if (ret) goto done; } @@ -277,10 +311,10 @@ static int env_sf_load(void) if (ret) goto out; - ret = spi_flash_read(env_flash, - CONFIG_ENV_OFFSET, CONFIG_ENV_SIZE, buf); + ret = spi_flash_read_env(env_flash, CONFIG_ENV_OFFSET, CONFIG_ENV_SIZE, + buf); if (ret) { - set_default_env("spi_flash_read() failed", 0); + set_default_env("spi_flash_read_env() failed", 0); goto err_read; } From a01ae0c23f3a72b05dab623806adee4634def172 Mon Sep 17 00:00:00 2001 From: Simon Goldschmidt Date: Mon, 14 Jan 2019 22:38:14 +0100 Subject: [PATCH 03/12] test: add test for lib/lmb.c Add basic tests for the lmb memory allocation code used to reserve and allocate memory during boot. Signed-off-by: Simon Goldschmidt Reviewed-by: Simon Glass --- test/lib/Makefile | 1 + test/lib/lmb.c | 297 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 298 insertions(+) create mode 100644 test/lib/lmb.c diff --git a/test/lib/Makefile b/test/lib/Makefile index ea68fae566..5a636aac74 100644 --- a/test/lib/Makefile +++ b/test/lib/Makefile @@ -3,3 +3,4 @@ # (C) Copyright 2018 # Mario Six, Guntermann & Drunck GmbH, mario.six@gdsys.cc obj-y += hexdump.o +obj-y += lmb.o diff --git a/test/lib/lmb.c b/test/lib/lmb.c new file mode 100644 index 0000000000..dd7ba14b34 --- /dev/null +++ b/test/lib/lmb.c @@ -0,0 +1,297 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * (C) Copyright 2018 Simon Goldschmidt + */ + +#include +#include +#include +#include + +static int check_lmb(struct unit_test_state *uts, struct lmb *lmb, + phys_addr_t ram_base, phys_size_t ram_size, + unsigned long num_reserved, + phys_addr_t base1, phys_size_t size1, + phys_addr_t base2, phys_size_t size2, + phys_addr_t base3, phys_size_t size3) +{ + ut_asserteq(lmb->memory.cnt, 1); + ut_asserteq(lmb->memory.region[0].base, ram_base); + ut_asserteq(lmb->memory.region[0].size, ram_size); + + ut_asserteq(lmb->reserved.cnt, num_reserved); + if (num_reserved > 0) { + ut_asserteq(lmb->reserved.region[0].base, base1); + ut_asserteq(lmb->reserved.region[0].size, size1); + } + if (num_reserved > 1) { + ut_asserteq(lmb->reserved.region[1].base, base2); + ut_asserteq(lmb->reserved.region[1].size, size2); + } + if (num_reserved > 2) { + ut_asserteq(lmb->reserved.region[2].base, base3); + ut_asserteq(lmb->reserved.region[2].size, size3); + } + return 0; +} + +#define ASSERT_LMB(lmb, ram_base, ram_size, num_reserved, base1, size1, \ + base2, size2, base3, size3) \ + ut_assert(!check_lmb(uts, lmb, ram_base, ram_size, \ + num_reserved, base1, size1, base2, size2, base3, \ + size3)) + +/* + * Test helper function that reserves 64 KiB somewhere in the simulated RAM and + * then does some alloc + free tests. + */ +static int test_multi_alloc(struct unit_test_state *uts, + const phys_addr_t ram, const phys_size_t ram_size, + const phys_addr_t alloc_64k_addr) +{ + const phys_addr_t ram_end = ram + ram_size; + const phys_addr_t alloc_64k_end = alloc_64k_addr + 0x10000; + + struct lmb lmb; + long ret; + phys_addr_t a, a2, b, b2, c, d; + + /* check for overflow */ + ut_assert(ram_end == 0 || ram_end > ram); + ut_assert(alloc_64k_end > alloc_64k_addr); + /* check input addresses + size */ + ut_assert(alloc_64k_addr >= ram + 8); + ut_assert(alloc_64k_end <= ram_end - 8); + + lmb_init(&lmb); + + ret = lmb_add(&lmb, ram, ram_size); + ut_asserteq(ret, 0); + + /* reserve 64KiB somewhere */ + ret = lmb_reserve(&lmb, alloc_64k_addr, 0x10000); + ut_asserteq(ret, 0); + ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000, + 0, 0, 0, 0); + + /* allocate somewhere, should be at the end of RAM */ + a = lmb_alloc(&lmb, 4, 1); + ut_asserteq(a, ram_end - 4); + ASSERT_LMB(&lmb, ram, ram_size, 2, alloc_64k_addr, 0x10000, + ram_end - 4, 4, 0, 0); + /* alloc below end of reserved region -> below reserved region */ + b = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end); + ut_asserteq(b, alloc_64k_addr - 4); + ASSERT_LMB(&lmb, ram, ram_size, 2, + alloc_64k_addr - 4, 0x10000 + 4, ram_end - 4, 4, 0, 0); + + /* 2nd time */ + c = lmb_alloc(&lmb, 4, 1); + ut_asserteq(c, ram_end - 8); + ASSERT_LMB(&lmb, ram, ram_size, 2, + alloc_64k_addr - 4, 0x10000 + 4, ram_end - 8, 8, 0, 0); + d = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end); + ut_asserteq(d, alloc_64k_addr - 8); + ASSERT_LMB(&lmb, ram, ram_size, 2, + alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 8, 0, 0); + + ret = lmb_free(&lmb, a, 4); + ut_asserteq(ret, 0); + ASSERT_LMB(&lmb, ram, ram_size, 2, + alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0); + /* allocate again to ensure we get the same address */ + a2 = lmb_alloc(&lmb, 4, 1); + ut_asserteq(a, a2); + ASSERT_LMB(&lmb, ram, ram_size, 2, + alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 8, 0, 0); + ret = lmb_free(&lmb, a2, 4); + ut_asserteq(ret, 0); + ASSERT_LMB(&lmb, ram, ram_size, 2, + alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0); + + ret = lmb_free(&lmb, b, 4); + ut_asserteq(ret, 0); + ASSERT_LMB(&lmb, ram, ram_size, 3, + alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000, + ram_end - 8, 4); + /* allocate again to ensure we get the same address */ + b2 = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end); + ut_asserteq(b, b2); + ASSERT_LMB(&lmb, ram, ram_size, 2, + alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0); + ret = lmb_free(&lmb, b2, 4); + ut_asserteq(ret, 0); + ASSERT_LMB(&lmb, ram, ram_size, 3, + alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000, + ram_end - 8, 4); + + ret = lmb_free(&lmb, c, 4); + ut_asserteq(ret, 0); + ASSERT_LMB(&lmb, ram, ram_size, 2, + alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000, 0, 0); + ret = lmb_free(&lmb, d, 4); + ut_asserteq(ret, 0); + ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000, + 0, 0, 0, 0); + + return 0; +} + +static int test_multi_alloc_512mb(struct unit_test_state *uts, + const phys_addr_t ram) +{ + return test_multi_alloc(uts, ram, 0x20000000, ram + 0x10000000); +} + +/* Create a memory region with one reserved region and allocate */ +static int lib_test_lmb_simple(struct unit_test_state *uts) +{ + /* simulate 512 MiB RAM beginning at 1GiB */ + return test_multi_alloc_512mb(uts, 0x40000000); +} + +DM_TEST(lib_test_lmb_simple, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT); + +/* Simulate 512 MiB RAM, allocate some blocks that fit/don't fit */ +static int test_bigblock(struct unit_test_state *uts, const phys_addr_t ram) +{ + const phys_size_t ram_size = 0x20000000; + const phys_size_t big_block_size = 0x10000000; + const phys_addr_t ram_end = ram + ram_size; + const phys_addr_t alloc_64k_addr = ram + 0x10000000; + struct lmb lmb; + long ret; + phys_addr_t a, b; + + /* check for overflow */ + ut_assert(ram_end == 0 || ram_end > ram); + + lmb_init(&lmb); + + ret = lmb_add(&lmb, ram, ram_size); + ut_asserteq(ret, 0); + + /* reserve 64KiB in the middle of RAM */ + ret = lmb_reserve(&lmb, alloc_64k_addr, 0x10000); + ut_asserteq(ret, 0); + ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000, + 0, 0, 0, 0); + + /* allocate a big block, should be below reserved */ + a = lmb_alloc(&lmb, big_block_size, 1); + ut_asserteq(a, ram); + ASSERT_LMB(&lmb, ram, ram_size, 1, a, + big_block_size + 0x10000, 0, 0, 0, 0); + /* allocate 2nd big block */ + /* This should fail, printing an error */ + b = lmb_alloc(&lmb, big_block_size, 1); + ut_asserteq(b, 0); + ASSERT_LMB(&lmb, ram, ram_size, 1, a, + big_block_size + 0x10000, 0, 0, 0, 0); + + ret = lmb_free(&lmb, a, big_block_size); + ut_asserteq(ret, 0); + ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000, + 0, 0, 0, 0); + + /* allocate too big block */ + /* This should fail, printing an error */ + a = lmb_alloc(&lmb, ram_size, 1); + ut_asserteq(a, 0); + ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000, + 0, 0, 0, 0); + + return 0; +} + +static int lib_test_lmb_big(struct unit_test_state *uts) +{ + return test_bigblock(uts, 0x40000000); +} + +DM_TEST(lib_test_lmb_big, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT); + +/* Simulate 512 MiB RAM, allocate a block without previous reservation */ +static int test_noreserved(struct unit_test_state *uts, const phys_addr_t ram) +{ + const phys_size_t ram_size = 0x20000000; + const phys_addr_t ram_end = ram + ram_size; + struct lmb lmb; + long ret; + phys_addr_t a, b; + + /* check for overflow */ + ut_assert(ram_end == 0 || ram_end > ram); + + lmb_init(&lmb); + + ret = lmb_add(&lmb, ram, ram_size); + ut_asserteq(ret, 0); + + /* allocate a block */ + a = lmb_alloc(&lmb, 4, 1); + ut_assert(a != 0); + /* and free it */ + ret = lmb_free(&lmb, a, 4); + ut_asserteq(ret, 0); + + /* allocate a block with base*/ + b = lmb_alloc_base(&lmb, 4, 1, ram_end); + ut_assert(a == b); + /* and free it */ + ret = lmb_free(&lmb, b, 4); + ut_asserteq(ret, 0); + + return 0; +} + +static int lib_test_lmb_noreserved(struct unit_test_state *uts) +{ + return test_noreserved(uts, 0x40000000); +} + +DM_TEST(lib_test_lmb_noreserved, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT); + +/* + * Simulate a RAM that starts at 0 and allocate down to address 0, which must + * fail as '0' means failure for the lmb_alloc functions. + */ +static int lib_test_lmb_at_0(struct unit_test_state *uts) +{ + const phys_addr_t ram = 0; + const phys_size_t ram_size = 0x20000000; + struct lmb lmb; + long ret; + phys_addr_t a, b; + + lmb_init(&lmb); + + ret = lmb_add(&lmb, ram, ram_size); + ut_asserteq(ret, 0); + + /* allocate nearly everything */ + a = lmb_alloc(&lmb, ram_size - 4, 1); + ut_asserteq(a, ram + 4); + ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4, + 0, 0, 0, 0); + /* allocate the rest */ + /* This should fail as the allocated address would be 0 */ + b = lmb_alloc(&lmb, 4, 1); + ut_asserteq(b, 0); + /* check that this was an error by checking lmb */ + ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4, + 0, 0, 0, 0); + /* check that this was an error by freeing b */ + ret = lmb_free(&lmb, b, 4); + ut_asserteq(ret, -1); + ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4, + 0, 0, 0, 0); + + ret = lmb_free(&lmb, a, ram_size - 4); + ut_asserteq(ret, 0); + ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0); + + return 0; +} + +DM_TEST(lib_test_lmb_at_0, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT); From d67f33cf4ee72fd9bc64d68cb51a77798b65cf3a Mon Sep 17 00:00:00 2001 From: Simon Goldschmidt Date: Mon, 14 Jan 2019 22:38:15 +0100 Subject: [PATCH 04/12] lmb: fix allocation at end of address range The lmb code fails if base + size of RAM overflows to zero. Fix this by calculating end as 'base + size - 1' instead of 'base + size' where appropriate. Added tests to assert this is fixed. Reviewed-by: Simon Glass Signed-off-by: Simon Goldschmidt --- lib/lmb.c | 29 ++++++++++++----------------- test/lib/lmb.c | 29 ++++++++++++++++++++++++++--- 2 files changed, 38 insertions(+), 20 deletions(-) diff --git a/lib/lmb.c b/lib/lmb.c index 1705417348..6d3dcf4e09 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -43,7 +43,10 @@ void lmb_dump_all(struct lmb *lmb) static long lmb_addrs_overlap(phys_addr_t base1, phys_size_t size1, phys_addr_t base2, phys_size_t size2) { - return ((base1 < (base2+size2)) && (base2 < (base1+size1))); + const phys_addr_t base1_end = base1 + size1 - 1; + const phys_addr_t base2_end = base2 + size2 - 1; + + return ((base1 <= base2_end) && (base2 <= base1_end)); } static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1, @@ -89,18 +92,9 @@ static void lmb_coalesce_regions(struct lmb_region *rgn, void lmb_init(struct lmb *lmb) { - /* Create a dummy zero size LMB which will get coalesced away later. - * This simplifies the lmb_add() code below... - */ - lmb->memory.region[0].base = 0; - lmb->memory.region[0].size = 0; - lmb->memory.cnt = 1; + lmb->memory.cnt = 0; lmb->memory.size = 0; - - /* Ditto. */ - lmb->reserved.region[0].base = 0; - lmb->reserved.region[0].size = 0; - lmb->reserved.cnt = 1; + lmb->reserved.cnt = 0; lmb->reserved.size = 0; } @@ -110,9 +104,10 @@ static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t unsigned long coalesced = 0; long adjacent, i; - if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) { + if (rgn->cnt == 0) { rgn->region[0].base = base; rgn->region[0].size = size; + rgn->cnt = 1; return 0; } @@ -183,7 +178,7 @@ long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size) { struct lmb_region *rgn = &(lmb->reserved); phys_addr_t rgnbegin, rgnend; - phys_addr_t end = base + size; + phys_addr_t end = base + size - 1; int i; rgnbegin = rgnend = 0; /* supress gcc warnings */ @@ -191,7 +186,7 @@ long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size) /* Find the region where (base, size) belongs to */ for (i=0; i < rgn->cnt; i++) { rgnbegin = rgn->region[i].base; - rgnend = rgnbegin + rgn->region[i].size; + rgnend = rgnbegin + rgn->region[i].size - 1; if ((rgnbegin <= base) && (end <= rgnend)) break; @@ -209,7 +204,7 @@ long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size) /* Check to see if region is matching at the front */ if (rgnbegin == base) { - rgn->region[i].base = end; + rgn->region[i].base = end + 1; rgn->region[i].size -= size; return 0; } @@ -225,7 +220,7 @@ long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size) * beginging of the hole and add the region after hole. */ rgn->region[i].size = base - rgn->region[i].base; - return lmb_add_region(rgn, end, rgnend - end); + return lmb_add_region(rgn, end + 1, rgnend - end); } long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size) diff --git a/test/lib/lmb.c b/test/lib/lmb.c index dd7ba14b34..fb7ca45ef1 100644 --- a/test/lib/lmb.c +++ b/test/lib/lmb.c @@ -146,8 +146,15 @@ static int test_multi_alloc_512mb(struct unit_test_state *uts, /* Create a memory region with one reserved region and allocate */ static int lib_test_lmb_simple(struct unit_test_state *uts) { + int ret; + /* simulate 512 MiB RAM beginning at 1GiB */ - return test_multi_alloc_512mb(uts, 0x40000000); + ret = test_multi_alloc_512mb(uts, 0x40000000); + if (ret) + return ret; + + /* simulate 512 MiB RAM beginning at 1.5GiB */ + return test_multi_alloc_512mb(uts, 0xE0000000); } DM_TEST(lib_test_lmb_simple, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT); @@ -206,7 +213,15 @@ static int test_bigblock(struct unit_test_state *uts, const phys_addr_t ram) static int lib_test_lmb_big(struct unit_test_state *uts) { - return test_bigblock(uts, 0x40000000); + int ret; + + /* simulate 512 MiB RAM beginning at 1GiB */ + ret = test_bigblock(uts, 0x40000000); + if (ret) + return ret; + + /* simulate 512 MiB RAM beginning at 1.5GiB */ + return test_bigblock(uts, 0xE0000000); } DM_TEST(lib_test_lmb_big, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT); @@ -247,7 +262,15 @@ static int test_noreserved(struct unit_test_state *uts, const phys_addr_t ram) static int lib_test_lmb_noreserved(struct unit_test_state *uts) { - return test_noreserved(uts, 0x40000000); + int ret; + + /* simulate 512 MiB RAM beginning at 1GiB */ + ret = test_noreserved(uts, 0x40000000); + if (ret) + return ret; + + /* simulate 512 MiB RAM beginning at 1.5GiB */ + return test_noreserved(uts, 0xE0000000); } DM_TEST(lib_test_lmb_noreserved, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT); From 0f7c51a676ca73f7950a7e4f9d8454e57324270c Mon Sep 17 00:00:00 2001 From: Simon Goldschmidt Date: Mon, 14 Jan 2019 22:38:16 +0100 Subject: [PATCH 05/12] lib: lmb: reserving overlapping regions should fail lmb_add_region handles overlapping regions wrong: instead of merging or rejecting to add a new reserved region that overlaps an existing one, it just adds the new region. Since internally the same function is used for lmb_alloc, change lmb_add_region to reject overlapping regions. Also, to keep reserved memory correct after 'free', reserved entries created by allocating memory must not set their size to a multiple of alignment but to the original size. This ensures the reserved region is completely removed when the caller calls 'lmb_free', as this one takes the same size as passed to 'lmb_alloc' etc. Add test to assert this. Reviewed-by: Simon Glass Signed-off-by: Simon Goldschmidt --- lib/lmb.c | 11 +++--- test/lib/lmb.c | 95 +++++++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 91 insertions(+), 15 deletions(-) diff --git a/lib/lmb.c b/lib/lmb.c index 6d3dcf4e09..cd297f8202 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -131,6 +131,9 @@ static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t rgn->region[i].size += size; coalesced++; break; + } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) { + /* regions overlap */ + return -1; } } @@ -269,11 +272,6 @@ static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size) return addr & ~(size - 1); } -static phys_addr_t lmb_align_up(phys_addr_t addr, ulong size) -{ - return (addr + (size - 1)) & ~(size - 1); -} - phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr) { long i, j; @@ -302,8 +300,7 @@ phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phy if (j < 0) { /* This area isn't reserved, take it */ if (lmb_add_region(&lmb->reserved, base, - lmb_align_up(size, - align)) < 0) + size) < 0) return 0; return base; } diff --git a/test/lib/lmb.c b/test/lib/lmb.c index fb7ca45ef1..e6acb70e76 100644 --- a/test/lib/lmb.c +++ b/test/lib/lmb.c @@ -227,13 +227,16 @@ static int lib_test_lmb_big(struct unit_test_state *uts) DM_TEST(lib_test_lmb_big, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT); /* Simulate 512 MiB RAM, allocate a block without previous reservation */ -static int test_noreserved(struct unit_test_state *uts, const phys_addr_t ram) +static int test_noreserved(struct unit_test_state *uts, const phys_addr_t ram, + const phys_addr_t alloc_size, const ulong align) { const phys_size_t ram_size = 0x20000000; const phys_addr_t ram_end = ram + ram_size; struct lmb lmb; long ret; phys_addr_t a, b; + const phys_addr_t alloc_size_aligned = (alloc_size + align - 1) & + ~(align - 1); /* check for overflow */ ut_assert(ram_end == 0 || ram_end > ram); @@ -242,20 +245,43 @@ static int test_noreserved(struct unit_test_state *uts, const phys_addr_t ram) ret = lmb_add(&lmb, ram, ram_size); ut_asserteq(ret, 0); + ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0); /* allocate a block */ - a = lmb_alloc(&lmb, 4, 1); + a = lmb_alloc(&lmb, alloc_size, align); ut_assert(a != 0); - /* and free it */ - ret = lmb_free(&lmb, a, 4); + ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned, + alloc_size, 0, 0, 0, 0); + /* allocate another block */ + b = lmb_alloc(&lmb, alloc_size, align); + ut_assert(b != 0); + if (alloc_size == alloc_size_aligned) { + ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - + (alloc_size_aligned * 2), alloc_size * 2, 0, 0, 0, + 0); + } else { + ASSERT_LMB(&lmb, ram, ram_size, 2, ram + ram_size - + (alloc_size_aligned * 2), alloc_size, ram + ram_size + - alloc_size_aligned, alloc_size, 0, 0); + } + /* and free them */ + ret = lmb_free(&lmb, b, alloc_size); ut_asserteq(ret, 0); + ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned, + alloc_size, 0, 0, 0, 0); + ret = lmb_free(&lmb, a, alloc_size); + ut_asserteq(ret, 0); + ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0); /* allocate a block with base*/ - b = lmb_alloc_base(&lmb, 4, 1, ram_end); + b = lmb_alloc_base(&lmb, alloc_size, align, ram_end); ut_assert(a == b); + ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned, + alloc_size, 0, 0, 0, 0); /* and free it */ - ret = lmb_free(&lmb, b, 4); + ret = lmb_free(&lmb, b, alloc_size); ut_asserteq(ret, 0); + ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0); return 0; } @@ -265,16 +291,30 @@ static int lib_test_lmb_noreserved(struct unit_test_state *uts) int ret; /* simulate 512 MiB RAM beginning at 1GiB */ - ret = test_noreserved(uts, 0x40000000); + ret = test_noreserved(uts, 0x40000000, 4, 1); if (ret) return ret; /* simulate 512 MiB RAM beginning at 1.5GiB */ - return test_noreserved(uts, 0xE0000000); + return test_noreserved(uts, 0xE0000000, 4, 1); } DM_TEST(lib_test_lmb_noreserved, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT); +static int lib_test_lmb_unaligned_size(struct unit_test_state *uts) +{ + int ret; + + /* simulate 512 MiB RAM beginning at 1GiB */ + ret = test_noreserved(uts, 0x40000000, 5, 8); + if (ret) + return ret; + + /* simulate 512 MiB RAM beginning at 1.5GiB */ + return test_noreserved(uts, 0xE0000000, 5, 8); +} + +DM_TEST(lib_test_lmb_unaligned_size, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT); /* * Simulate a RAM that starts at 0 and allocate down to address 0, which must * fail as '0' means failure for the lmb_alloc functions. @@ -318,3 +358,42 @@ static int lib_test_lmb_at_0(struct unit_test_state *uts) } DM_TEST(lib_test_lmb_at_0, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT); + +/* Check that calling lmb_reserve with overlapping regions fails. */ +static int lib_test_lmb_overlapping_reserve(struct unit_test_state *uts) +{ + const phys_addr_t ram = 0x40000000; + const phys_size_t ram_size = 0x20000000; + struct lmb lmb; + long ret; + + lmb_init(&lmb); + + ret = lmb_add(&lmb, ram, ram_size); + ut_asserteq(ret, 0); + + ret = lmb_reserve(&lmb, 0x40010000, 0x10000); + ut_asserteq(ret, 0); + ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000, + 0, 0, 0, 0); + /* allocate overlapping region should fail */ + ret = lmb_reserve(&lmb, 0x40011000, 0x10000); + ut_asserteq(ret, -1); + ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000, + 0, 0, 0, 0); + /* allocate 3nd region */ + ret = lmb_reserve(&lmb, 0x40030000, 0x10000); + ut_asserteq(ret, 0); + ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40010000, 0x10000, + 0x40030000, 0x10000, 0, 0); + /* allocate 2nd region */ + ret = lmb_reserve(&lmb, 0x40020000, 0x10000); + ut_assert(ret >= 0); + ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x30000, + 0, 0, 0, 0); + + return 0; +} + +DM_TEST(lib_test_lmb_overlapping_reserve, + DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT); From e2237a2c263783996a85b70ccbf1f967aa2ec182 Mon Sep 17 00:00:00 2001 From: Simon Goldschmidt Date: Mon, 14 Jan 2019 22:38:17 +0100 Subject: [PATCH 06/12] fdt: parse "reserved-memory" for memory reservation boot_fdt_add_mem_rsv_regions() adds reserved memory sections to an lmb struct. Currently, it only parses regions described by /memreserve/ entries. Extend this to the more commonly used scheme of the "reserved-memory" node. Signed-off-by: Simon Goldschmidt Reviewed-by: Simon Glass --- common/image-fdt.c | 53 +++++++++++++++++++++++++++++++++++++++------- lib/Makefile | 1 + 2 files changed, 46 insertions(+), 8 deletions(-) diff --git a/common/image-fdt.c b/common/image-fdt.c index 8ee5a13352..5988808f18 100644 --- a/common/image-fdt.c +++ b/common/image-fdt.c @@ -10,6 +10,7 @@ #include #include +#include #include #include #include @@ -67,30 +68,66 @@ static const image_header_t *image_get_fdt(ulong fdt_addr) } #endif +static void boot_fdt_reserve_region(struct lmb *lmb, uint64_t addr, + uint64_t size) +{ + int ret; + + ret = lmb_reserve(lmb, addr, size); + if (!ret) { + debug(" reserving fdt memory region: addr=%llx size=%llx\n", + (unsigned long long)addr, (unsigned long long)size); + } else { + puts("ERROR: reserving fdt memory region failed "); + printf("(addr=%llx size=%llx)\n", + (unsigned long long)addr, (unsigned long long)size); + } +} + /** - * boot_fdt_add_mem_rsv_regions - Mark the memreserve sections as unusable + * boot_fdt_add_mem_rsv_regions - Mark the memreserve and reserved-memory + * sections as unusable * @lmb: pointer to lmb handle, will be used for memory mgmt * @fdt_blob: pointer to fdt blob base address * - * Adds the memreserve regions in the dtb to the lmb block. Adding the - * memreserve regions prevents u-boot from using them to store the initrd - * or the fdt blob. + * Adds the and reserved-memorymemreserve regions in the dtb to the lmb block. + * Adding the memreserve regions prevents u-boot from using them to store the + * initrd or the fdt blob. */ void boot_fdt_add_mem_rsv_regions(struct lmb *lmb, void *fdt_blob) { uint64_t addr, size; - int i, total; + int i, total, ret; + int nodeoffset, subnode; + struct fdt_resource res; if (fdt_check_header(fdt_blob) != 0) return; + /* process memreserve sections */ total = fdt_num_mem_rsv(fdt_blob); for (i = 0; i < total; i++) { if (fdt_get_mem_rsv(fdt_blob, i, &addr, &size) != 0) continue; - printf(" reserving fdt memory region: addr=%llx size=%llx\n", - (unsigned long long)addr, (unsigned long long)size); - lmb_reserve(lmb, addr, size); + boot_fdt_reserve_region(lmb, addr, size); + } + + /* process reserved-memory */ + nodeoffset = fdt_subnode_offset(fdt_blob, 0, "reserved-memory"); + if (nodeoffset >= 0) { + subnode = fdt_first_subnode(fdt_blob, nodeoffset); + while (subnode >= 0) { + /* check if this subnode has a reg property */ + ret = fdt_get_resource(fdt_blob, subnode, "reg", 0, + &res); + if (!ret) { + addr = res.start; + size = res.end - res.start + 1; + boot_fdt_reserve_region(lmb, addr, size); + } + + subnode = fdt_next_subnode(fdt_blob, subnode); + } } } diff --git a/lib/Makefile b/lib/Makefile index f06d6316d4..f5de4a851b 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -30,6 +30,7 @@ obj-y += crc7.o obj-y += crc8.o obj-y += crc16.o obj-$(CONFIG_ERRNO_STR) += errno_str.o +obj-$(CONFIG_OF_LIBFDT) += fdtdec.o obj-$(CONFIG_FIT) += fdtdec_common.o obj-$(CONFIG_TEST_FDTDEC) += fdtdec_test.o obj-$(CONFIG_GZIP_COMPRESSED) += gzip.o From 4cc8af8037ebabd674d0a6bed202b0c711dc7699 Mon Sep 17 00:00:00 2001 From: Simon Goldschmidt Date: Mon, 14 Jan 2019 22:38:18 +0100 Subject: [PATCH 07/12] lib: lmb: extend lmb for checks at load time This adds two new functions, lmb_alloc_addr and lmb_get_unreserved_size. lmb_alloc_addr behaves like lmb_alloc, but it tries to allocate a pre-specified address range. Unlike lmb_reserve, this address range must be inside one of the memory ranges that has been set up with lmb_add. lmb_get_unreserved_size returns the number of bytes that can be used up to the next reserved region or the end of valid ram. This can be 0 if the address passed is reserved. Added test for these new functions. Reviewed-by: Simon Glass Signed-off-by: Simon Goldschmidt --- include/lmb.h | 3 + lib/lmb.c | 53 +++++++++++++ test/lib/lmb.c | 202 +++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 258 insertions(+) diff --git a/include/lmb.h b/include/lmb.h index f04d058093..7d7e2a78dc 100644 --- a/include/lmb.h +++ b/include/lmb.h @@ -38,6 +38,9 @@ extern phys_addr_t lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align phys_addr_t max_addr); extern phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr); +extern phys_addr_t lmb_alloc_addr(struct lmb *lmb, phys_addr_t base, + phys_size_t size); +extern phys_size_t lmb_get_unreserved_size(struct lmb *lmb, phys_addr_t addr); extern int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr); extern long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size); diff --git a/lib/lmb.c b/lib/lmb.c index cd297f8202..e380a0a722 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -313,6 +313,59 @@ phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phy return 0; } +/* + * Try to allocate a specific address range: must be in defined memory but not + * reserved + */ +phys_addr_t lmb_alloc_addr(struct lmb *lmb, phys_addr_t base, phys_size_t size) +{ + long j; + + /* Check if the requested address is in one of the memory regions */ + j = lmb_overlaps_region(&lmb->memory, base, size); + if (j >= 0) { + /* + * Check if the requested end address is in the same memory + * region we found. + */ + if (lmb_addrs_overlap(lmb->memory.region[j].base, + lmb->memory.region[j].size, base + size - + 1, 1)) { + /* ok, reserve the memory */ + if (lmb_reserve(lmb, base, size) >= 0) + return base; + } + } + return 0; +} + +/* Return number of bytes from a given address that are free */ +phys_size_t lmb_get_unreserved_size(struct lmb *lmb, phys_addr_t addr) +{ + int i; + long j; + + /* check if the requested address is in the memory regions */ + j = lmb_overlaps_region(&lmb->memory, addr, 1); + if (j >= 0) { + for (i = 0; i < lmb->reserved.cnt; i++) { + if (addr < lmb->reserved.region[i].base) { + /* first reserved range > requested address */ + return lmb->reserved.region[i].base - addr; + } + if (lmb->reserved.region[i].base + + lmb->reserved.region[i].size > addr) { + /* requested addr is in this reserved range */ + return 0; + } + } + /* if we come here: no reserved ranges above requested addr */ + return lmb->memory.region[lmb->memory.cnt - 1].base + + lmb->memory.region[lmb->memory.cnt - 1].size - addr; + } + return 0; +} + int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr) { int i; diff --git a/test/lib/lmb.c b/test/lib/lmb.c index e6acb70e76..058d3c332b 100644 --- a/test/lib/lmb.c +++ b/test/lib/lmb.c @@ -397,3 +397,205 @@ static int lib_test_lmb_overlapping_reserve(struct unit_test_state *uts) DM_TEST(lib_test_lmb_overlapping_reserve, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT); + +/* + * Simulate 512 MiB RAM, reserve 3 blocks, allocate addresses in between. + * Expect addresses outside the memory range to fail. + */ +static int test_alloc_addr(struct unit_test_state *uts, const phys_addr_t ram) +{ + const phys_size_t ram_size = 0x20000000; + const phys_addr_t ram_end = ram + ram_size; + const phys_size_t alloc_addr_a = ram + 0x8000000; + const phys_size_t alloc_addr_b = ram + 0x8000000 * 2; + const phys_size_t alloc_addr_c = ram + 0x8000000 * 3; + struct lmb lmb; + long ret; + phys_addr_t a, b, c, d, e; + + /* check for overflow */ + ut_assert(ram_end == 0 || ram_end > ram); + + lmb_init(&lmb); + + ret = lmb_add(&lmb, ram, ram_size); + ut_asserteq(ret, 0); + + /* reserve 3 blocks */ + ret = lmb_reserve(&lmb, alloc_addr_a, 0x10000); + ut_asserteq(ret, 0); + ret = lmb_reserve(&lmb, alloc_addr_b, 0x10000); + ut_asserteq(ret, 0); + ret = lmb_reserve(&lmb, alloc_addr_c, 0x10000); + ut_asserteq(ret, 0); + ASSERT_LMB(&lmb, ram, ram_size, 3, alloc_addr_a, 0x10000, + alloc_addr_b, 0x10000, alloc_addr_c, 0x10000); + + /* allocate blocks */ + a = lmb_alloc_addr(&lmb, ram, alloc_addr_a - ram); + ut_asserteq(a, ram); + ASSERT_LMB(&lmb, ram, ram_size, 3, ram, 0x8010000, + alloc_addr_b, 0x10000, alloc_addr_c, 0x10000); + b = lmb_alloc_addr(&lmb, alloc_addr_a + 0x10000, + alloc_addr_b - alloc_addr_a - 0x10000); + ut_asserteq(b, alloc_addr_a + 0x10000); + ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x10010000, + alloc_addr_c, 0x10000, 0, 0); + c = lmb_alloc_addr(&lmb, alloc_addr_b + 0x10000, + alloc_addr_c - alloc_addr_b - 0x10000); + ut_asserteq(c, alloc_addr_b + 0x10000); + ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000, + 0, 0, 0, 0); + d = lmb_alloc_addr(&lmb, alloc_addr_c + 0x10000, + ram_end - alloc_addr_c - 0x10000); + ut_asserteq(d, alloc_addr_c + 0x10000); + ASSERT_LMB(&lmb, ram, ram_size, 1, ram, ram_size, + 0, 0, 0, 0); + + /* allocating anything else should fail */ + e = lmb_alloc(&lmb, 1, 1); + ut_asserteq(e, 0); + ASSERT_LMB(&lmb, ram, ram_size, 1, ram, ram_size, + 0, 0, 0, 0); + + ret = lmb_free(&lmb, d, ram_end - alloc_addr_c - 0x10000); + ut_asserteq(ret, 0); + + /* allocate at 3 points in free range */ + + d = lmb_alloc_addr(&lmb, ram_end - 4, 4); + ut_asserteq(d, ram_end - 4); + ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x18010000, + d, 4, 0, 0); + ret = lmb_free(&lmb, d, 4); + ut_asserteq(ret, 0); + ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000, + 0, 0, 0, 0); + + d = lmb_alloc_addr(&lmb, ram_end - 128, 4); + ut_asserteq(d, ram_end - 128); + ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x18010000, + d, 4, 0, 0); + ret = lmb_free(&lmb, d, 4); + ut_asserteq(ret, 0); + ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000, + 0, 0, 0, 0); + + d = lmb_alloc_addr(&lmb, alloc_addr_c + 0x10000, 4); + ut_asserteq(d, alloc_addr_c + 0x10000); + ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010004, + 0, 0, 0, 0); + ret = lmb_free(&lmb, d, 4); + ut_asserteq(ret, 0); + ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000, + 0, 0, 0, 0); + + /* allocate at the bottom */ + ret = lmb_free(&lmb, a, alloc_addr_a - ram); + ut_asserteq(ret, 0); + ASSERT_LMB(&lmb, ram, ram_size, 1, ram + 0x8000000, 0x10010000, + 0, 0, 0, 0); + d = lmb_alloc_addr(&lmb, ram, 4); + ut_asserteq(d, ram); + ASSERT_LMB(&lmb, ram, ram_size, 2, d, 4, + ram + 0x8000000, 0x10010000, 0, 0); + + /* check that allocating outside memory fails */ + if (ram_end != 0) { + ret = lmb_alloc_addr(&lmb, ram_end, 1); + ut_asserteq(ret, 0); + } + if (ram != 0) { + ret = lmb_alloc_addr(&lmb, ram - 1, 1); + ut_asserteq(ret, 0); + } + + return 0; +} + +static int lib_test_lmb_alloc_addr(struct unit_test_state *uts) +{ + int ret; + + /* simulate 512 MiB RAM beginning at 1GiB */ + ret = test_alloc_addr(uts, 0x40000000); + if (ret) + return ret; + + /* simulate 512 MiB RAM beginning at 1.5GiB */ + return test_alloc_addr(uts, 0xE0000000); +} + +DM_TEST(lib_test_lmb_alloc_addr, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT); + +/* Simulate 512 MiB RAM, reserve 3 blocks, check addresses in between */ +static int test_get_unreserved_size(struct unit_test_state *uts, + const phys_addr_t ram) +{ + const phys_size_t ram_size = 0x20000000; + const phys_addr_t ram_end = ram + ram_size; + const phys_size_t alloc_addr_a = ram + 0x8000000; + const phys_size_t alloc_addr_b = ram + 0x8000000 * 2; + const phys_size_t alloc_addr_c = ram + 0x8000000 * 3; + struct lmb lmb; + long ret; + phys_size_t s; + + /* check for overflow */ + ut_assert(ram_end == 0 || ram_end > ram); + + lmb_init(&lmb); + + ret = lmb_add(&lmb, ram, ram_size); + ut_asserteq(ret, 0); + + /* reserve 3 blocks */ + ret = lmb_reserve(&lmb, alloc_addr_a, 0x10000); + ut_asserteq(ret, 0); + ret = lmb_reserve(&lmb, alloc_addr_b, 0x10000); + ut_asserteq(ret, 0); + ret = lmb_reserve(&lmb, alloc_addr_c, 0x10000); + ut_asserteq(ret, 0); + ASSERT_LMB(&lmb, ram, ram_size, 3, alloc_addr_a, 0x10000, + alloc_addr_b, 0x10000, alloc_addr_c, 0x10000); + + /* check addresses in between blocks */ + s = lmb_get_unreserved_size(&lmb, ram); + ut_asserteq(s, alloc_addr_a - ram); + s = lmb_get_unreserved_size(&lmb, ram + 0x10000); + ut_asserteq(s, alloc_addr_a - ram - 0x10000); + s = lmb_get_unreserved_size(&lmb, alloc_addr_a - 4); + ut_asserteq(s, 4); + + s = lmb_get_unreserved_size(&lmb, alloc_addr_a + 0x10000); + ut_asserteq(s, alloc_addr_b - alloc_addr_a - 0x10000); + s = lmb_get_unreserved_size(&lmb, alloc_addr_a + 0x20000); + ut_asserteq(s, alloc_addr_b - alloc_addr_a - 0x20000); + s = lmb_get_unreserved_size(&lmb, alloc_addr_b - 4); + ut_asserteq(s, 4); + + s = lmb_get_unreserved_size(&lmb, alloc_addr_c + 0x10000); + ut_asserteq(s, ram_end - alloc_addr_c - 0x10000); + s = lmb_get_unreserved_size(&lmb, alloc_addr_c + 0x20000); + ut_asserteq(s, ram_end - alloc_addr_c - 0x20000); + s = lmb_get_unreserved_size(&lmb, ram_end - 4); + ut_asserteq(s, 4); + + return 0; +} + +static int lib_test_lmb_get_unreserved_size(struct unit_test_state *uts) +{ + int ret; + + /* simulate 512 MiB RAM beginning at 1GiB */ + ret = test_get_unreserved_size(uts, 0x40000000); + if (ret) + return ret; + + /* simulate 512 MiB RAM beginning at 1.5GiB */ + return test_get_unreserved_size(uts, 0xE0000000); +} + +DM_TEST(lib_test_lmb_get_unreserved_size, + DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT); From aa3c609e2be5a837e7b81e308d47f55b67666bd6 Mon Sep 17 00:00:00 2001 From: Simon Goldschmidt Date: Mon, 14 Jan 2019 22:38:19 +0100 Subject: [PATCH 08/12] fs: prevent overwriting reserved memory This fixes CVE-2018-18440 ("insufficient boundary checks in filesystem image load") by using lmb to check the load size of a file against reserved memory addresses. Signed-off-by: Simon Goldschmidt Reviewed-by: Simon Glass --- fs/fs.c | 56 ++++++++++++++++++++++++++++++++++++++++++++++++--- include/lmb.h | 2 ++ lib/lmb.c | 13 ++++++++++++ 3 files changed, 68 insertions(+), 3 deletions(-) diff --git a/fs/fs.c b/fs/fs.c index cb265174e2..7fd22101ef 100644 --- a/fs/fs.c +++ b/fs/fs.c @@ -429,13 +429,57 @@ int fs_size(const char *filename, loff_t *size) return ret; } -int fs_read(const char *filename, ulong addr, loff_t offset, loff_t len, - loff_t *actread) +#ifdef CONFIG_LMB +/* Check if a file may be read to the given address */ +static int fs_read_lmb_check(const char *filename, ulong addr, loff_t offset, + loff_t len, struct fstype_info *info) +{ + struct lmb lmb; + int ret; + loff_t size; + loff_t read_len; + + /* get the actual size of the file */ + ret = info->size(filename, &size); + if (ret) + return ret; + if (offset >= size) { + /* offset >= EOF, no bytes will be written */ + return 0; + } + read_len = size - offset; + + /* limit to 'len' if it is smaller */ + if (len && len < read_len) + read_len = len; + + lmb_init_and_reserve(&lmb, gd->bd->bi_dram[0].start, + gd->bd->bi_dram[0].size, (void *)gd->fdt_blob); + lmb_dump_all(&lmb); + + if (lmb_alloc_addr(&lmb, addr, read_len) == addr) + return 0; + + printf("** Reading file would overwrite reserved memory **\n"); + return -ENOSPC; +} +#endif + +static int _fs_read(const char *filename, ulong addr, loff_t offset, loff_t len, + int do_lmb_check, loff_t *actread) { struct fstype_info *info = fs_get_info(fs_type); void *buf; int ret; +#ifdef CONFIG_LMB + if (do_lmb_check) { + ret = fs_read_lmb_check(filename, addr, offset, len, info); + if (ret) + return ret; + } +#endif + /* * We don't actually know how many bytes are being read, since len==0 * means read the whole file. @@ -452,6 +496,12 @@ int fs_read(const char *filename, ulong addr, loff_t offset, loff_t len, return ret; } +int fs_read(const char *filename, ulong addr, loff_t offset, loff_t len, + loff_t *actread) +{ + return _fs_read(filename, addr, offset, len, 0, actread); +} + int fs_write(const char *filename, ulong addr, loff_t offset, loff_t len, loff_t *actwrite) { @@ -622,7 +672,7 @@ int do_load(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[], pos = 0; time = get_timer(0); - ret = fs_read(filename, addr, pos, bytes, &len_read); + ret = _fs_read(filename, addr, pos, bytes, 1, &len_read); time = get_timer(time); if (ret < 0) return 1; diff --git a/include/lmb.h b/include/lmb.h index 7d7e2a78dc..62da85e716 100644 --- a/include/lmb.h +++ b/include/lmb.h @@ -31,6 +31,8 @@ struct lmb { extern struct lmb lmb; extern void lmb_init(struct lmb *lmb); +extern void lmb_init_and_reserve(struct lmb *lmb, phys_addr_t base, + phys_size_t size, void *fdt_blob); extern long lmb_add(struct lmb *lmb, phys_addr_t base, phys_size_t size); extern long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size); extern phys_addr_t lmb_alloc(struct lmb *lmb, phys_size_t size, ulong align); diff --git a/lib/lmb.c b/lib/lmb.c index e380a0a722..3407705fa7 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -98,6 +98,19 @@ void lmb_init(struct lmb *lmb) lmb->reserved.size = 0; } +/* Initialize the struct, add memory and call arch/board reserve functions */ +void lmb_init_and_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size, + void *fdt_blob) +{ + lmb_init(lmb); + lmb_add(lmb, base, size); + arch_lmb_reserve(lmb); + board_lmb_reserve(lmb); + + if (IMAGE_ENABLE_OF_LIBFDT && fdt_blob) + boot_fdt_add_mem_rsv_regions(lmb, fdt_blob); +} + /* This routine called with relocation disabled. */ static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t size) { From 5b978dab7b8b534e9bf50b97a875dbc9c15cfb54 Mon Sep 17 00:00:00 2001 From: Simon Goldschmidt Date: Mon, 14 Jan 2019 22:38:20 +0100 Subject: [PATCH 09/12] bootm: use new common function lmb_init_and_reserve This reduces duplicate code only. Signed-off-by: Simon Goldschmidt Reviewed-by: Simon Glass --- common/bootm.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/common/bootm.c b/common/bootm.c index 80f304ce9f..a4618b6d2e 100644 --- a/common/bootm.c +++ b/common/bootm.c @@ -56,15 +56,11 @@ static void boot_start_lmb(bootm_headers_t *images) ulong mem_start; phys_size_t mem_size; - lmb_init(&images->lmb); - mem_start = env_get_bootm_low(); mem_size = env_get_bootm_size(); - lmb_add(&images->lmb, (phys_addr_t)mem_start, mem_size); - - arch_lmb_reserve(&images->lmb); - board_lmb_reserve(&images->lmb); + lmb_init_and_reserve(&images->lmb, (phys_addr_t)mem_start, mem_size, + NULL); } #else #define lmb_reserve(lmb, base, size) From a85c213f4725fc097e28795d87136e05508ba70b Mon Sep 17 00:00:00 2001 From: Simon Goldschmidt Date: Mon, 14 Jan 2019 22:38:21 +0100 Subject: [PATCH 10/12] lmb: remove unused extern declaration lmb.h includes an extern declaration of "struct lmb lmb;" which is not used anywhere, so remove it. Reviewed-by: Simon Glass Signed-off-by: Simon Goldschmidt --- include/lmb.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/include/lmb.h b/include/lmb.h index 62da85e716..1bb003e35e 100644 --- a/include/lmb.h +++ b/include/lmb.h @@ -28,8 +28,6 @@ struct lmb { struct lmb_region reserved; }; -extern struct lmb lmb; - extern void lmb_init(struct lmb *lmb); extern void lmb_init_and_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size, void *fdt_blob); From a156c47e39ad7d007c88919103ee0ee131c6203b Mon Sep 17 00:00:00 2001 From: Simon Goldschmidt Date: Mon, 14 Jan 2019 22:38:22 +0100 Subject: [PATCH 11/12] tftp: prevent overwriting reserved memory This fixes CVE-2018-18439 ("insufficient boundary checks in network image boot") by using lmb to check for a valid range to store received blocks. Signed-off-by: Simon Goldschmidt Acked-by: Joe Hershberger [trini: Always build lib/lmb.o on LMB and lib/fdtdec.o on OF_LIBFDT] Signed-off-by: Tom Rini --- lib/Makefile | 4 +-- net/tftp.c | 71 +++++++++++++++++++++++++++++++++++++++++++++------- 2 files changed, 64 insertions(+), 11 deletions(-) diff --git a/lib/Makefile b/lib/Makefile index f5de4a851b..61d7ff0678 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -30,14 +30,12 @@ obj-y += crc7.o obj-y += crc8.o obj-y += crc16.o obj-$(CONFIG_ERRNO_STR) += errno_str.o -obj-$(CONFIG_OF_LIBFDT) += fdtdec.o obj-$(CONFIG_FIT) += fdtdec_common.o obj-$(CONFIG_TEST_FDTDEC) += fdtdec_test.o obj-$(CONFIG_GZIP_COMPRESSED) += gzip.o obj-$(CONFIG_GENERATE_SMBIOS_TABLE) += smbios.o obj-$(CONFIG_IMAGE_SPARSE) += image-sparse.o obj-y += initcall.o -obj-$(CONFIG_LMB) += lmb.o obj-y += ldiv.o obj-$(CONFIG_MD5) += md5.o obj-y += net_utils.o @@ -89,9 +87,11 @@ obj-y += crc32.o obj-$(CONFIG_CRC32C) += crc32c.o obj-y += ctype.o obj-y += div64.o +obj-$(CONFIG_OF_LIBFDT) += fdtdec.o obj-y += hang.o obj-y += linux_compat.o obj-y += linux_string.o +obj-$(CONFIG_LMB) += lmb.o obj-y += membuff.o obj-$(CONFIG_REGEX) += slre.o obj-y += string.o diff --git a/net/tftp.c b/net/tftp.c index 68ffd81414..a9335b1b7e 100644 --- a/net/tftp.c +++ b/net/tftp.c @@ -17,6 +17,8 @@ #include #endif +DECLARE_GLOBAL_DATA_PTR; + /* Well known TFTP port # */ #define WELL_KNOWN_PORT 69 /* Millisecs to timeout for lost pkt */ @@ -81,6 +83,10 @@ static ulong tftp_block_wrap; /* memory offset due to wrapping */ static ulong tftp_block_wrap_offset; static int tftp_state; +static ulong tftp_load_addr; +#ifdef CONFIG_LMB +static ulong tftp_load_size; +#endif #ifdef CONFIG_TFTP_TSIZE /* The file size reported by the server */ static int tftp_tsize; @@ -164,10 +170,11 @@ static void mcast_cleanup(void) #endif /* CONFIG_MCAST_TFTP */ -static inline void store_block(int block, uchar *src, unsigned len) +static inline int store_block(int block, uchar *src, unsigned int len) { ulong offset = block * tftp_block_size + tftp_block_wrap_offset; ulong newsize = offset + len; + ulong store_addr = tftp_load_addr + offset; #ifdef CONFIG_SYS_DIRECT_FLASH_TFTP int i, rc = 0; @@ -175,24 +182,32 @@ static inline void store_block(int block, uchar *src, unsigned len) /* start address in flash? */ if (flash_info[i].flash_id == FLASH_UNKNOWN) continue; - if (load_addr + offset >= flash_info[i].start[0]) { + if (store_addr >= flash_info[i].start[0]) { rc = 1; break; } } if (rc) { /* Flash is destination for this packet */ - rc = flash_write((char *)src, (ulong)(load_addr+offset), len); + rc = flash_write((char *)src, store_addr, len); if (rc) { flash_perror(rc); - net_set_state(NETLOOP_FAIL); - return; + return rc; } } else #endif /* CONFIG_SYS_DIRECT_FLASH_TFTP */ { - void *ptr = map_sysmem(load_addr + offset, len); + void *ptr; +#ifdef CONFIG_LMB + if (store_addr < tftp_load_addr || + store_addr + len > tftp_load_addr + tftp_load_size) { + puts("\nTFTP error: "); + puts("trying to overwrite reserved memory...\n"); + return -1; + } +#endif + ptr = map_sysmem(store_addr, len); memcpy(ptr, src, len); unmap_sysmem(ptr); } @@ -203,6 +218,8 @@ static inline void store_block(int block, uchar *src, unsigned len) if (net_boot_file_size < newsize) net_boot_file_size = newsize; + + return 0; } /* Clear our state ready for a new transfer */ @@ -606,7 +623,11 @@ static void tftp_handler(uchar *pkt, unsigned dest, struct in_addr sip, timeout_count_max = tftp_timeout_count_max; net_set_timeout_handler(timeout_ms, tftp_timeout_handler); - store_block(tftp_cur_block - 1, pkt + 2, len); + if (store_block(tftp_cur_block - 1, pkt + 2, len)) { + eth_halt(); + net_set_state(NETLOOP_FAIL); + break; + } /* * Acknowledge the block just received, which will prompt @@ -695,6 +716,25 @@ static void tftp_timeout_handler(void) } } +/* Initialize tftp_load_addr and tftp_load_size from load_addr and lmb */ +static int tftp_init_load_addr(void) +{ +#ifdef CONFIG_LMB + struct lmb lmb; + phys_size_t max_size; + + lmb_init_and_reserve(&lmb, gd->bd->bi_dram[0].start, + gd->bd->bi_dram[0].size, (void *)gd->fdt_blob); + + max_size = lmb_get_unreserved_size(&lmb, load_addr); + if (!max_size) + return -1; + + tftp_load_size = max_size; +#endif + tftp_load_addr = load_addr; + return 0; +} void tftp_start(enum proto_t protocol) { @@ -791,7 +831,14 @@ void tftp_start(enum proto_t protocol) } else #endif { - printf("Load address: 0x%lx\n", load_addr); + if (tftp_init_load_addr()) { + eth_halt(); + net_set_state(NETLOOP_FAIL); + puts("\nTFTP error: "); + puts("trying to overwrite reserved memory...\n"); + return; + } + printf("Load address: 0x%lx\n", tftp_load_addr); puts("Loading: *\b"); tftp_state = STATE_SEND_RRQ; #ifdef CONFIG_CMD_BOOTEFI @@ -842,9 +889,15 @@ void tftp_start_server(void) { tftp_filename[0] = 0; + if (tftp_init_load_addr()) { + eth_halt(); + net_set_state(NETLOOP_FAIL); + puts("\nTFTP error: trying to overwrite reserved memory...\n"); + return; + } printf("Using %s device\n", eth_get_name()); printf("Listening for TFTP transfer on %pI4\n", &net_ip); - printf("Load address: 0x%lx\n", load_addr); + printf("Load address: 0x%lx\n", tftp_load_addr); puts("Loading: *\b"); From f8878da5571e5a1170f9c49214be5bafbc1b23a4 Mon Sep 17 00:00:00 2001 From: Simon Goldschmidt Date: Mon, 14 Jan 2019 22:38:23 +0100 Subject: [PATCH 12/12] arm: bootm: fix sp detection at end of address range This fixes 'arch_lmb_reserve()' for ARM that tries to detect in which DRAM bank 'sp' is in. This code failed if a bank was at the end of physical address range (i.e. size + length overflowed to 0). To fix this, calculate 'bank_end' as 'size + length - 1' so that such banks end at 0xffffffff, not 0. Fixes: 15751403b6 ("ARM: bootm: don't assume sp is in DRAM bank 0") Reported-by: Frank Wunderlich Signed-off-by: Simon Goldschmidt Reviewed-by: Stephen Warren --- arch/arm/lib/bootm.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/arch/arm/lib/bootm.c b/arch/arm/lib/bootm.c index c3c1d2fdfa..329f20c2bf 100644 --- a/arch/arm/lib/bootm.c +++ b/arch/arm/lib/bootm.c @@ -64,13 +64,15 @@ void arch_lmb_reserve(struct lmb *lmb) /* adjust sp by 4K to be safe */ sp -= 4096; for (bank = 0; bank < CONFIG_NR_DRAM_BANKS; bank++) { - if (sp < gd->bd->bi_dram[bank].start) + if (!gd->bd->bi_dram[bank].size || + sp < gd->bd->bi_dram[bank].start) continue; + /* Watch out for RAM at end of address space! */ bank_end = gd->bd->bi_dram[bank].start + - gd->bd->bi_dram[bank].size; - if (sp >= bank_end) + gd->bd->bi_dram[bank].size - 1; + if (sp > bank_end) continue; - lmb_reserve(lmb, sp, bank_end - sp); + lmb_reserve(lmb, sp, bank_end - sp + 1); break; } }