Merge branch '2019-01-16-master-imports'

- Fixes for CVE-2018-18440 and CVE-2018-18439
- Patch to allow disabling unneeded NAND ECC layouts
- Optimize SPI flash env read process
This commit is contained in:
Tom Rini 2019-01-17 17:42:03 -05:00
commit e964df1e2a
13 changed files with 915 additions and 68 deletions

View file

@ -64,13 +64,15 @@ void arch_lmb_reserve(struct lmb *lmb)
/* adjust sp by 4K to be safe */
sp -= 4096;
for (bank = 0; bank < CONFIG_NR_DRAM_BANKS; bank++) {
if (sp < gd->bd->bi_dram[bank].start)
if (!gd->bd->bi_dram[bank].size ||
sp < gd->bd->bi_dram[bank].start)
continue;
/* Watch out for RAM at end of address space! */
bank_end = gd->bd->bi_dram[bank].start +
gd->bd->bi_dram[bank].size;
if (sp >= bank_end)
gd->bd->bi_dram[bank].size - 1;
if (sp > bank_end)
continue;
lmb_reserve(lmb, sp, bank_end - sp);
lmb_reserve(lmb, sp, bank_end - sp + 1);
break;
}
}

View file

@ -56,15 +56,11 @@ static void boot_start_lmb(bootm_headers_t *images)
ulong mem_start;
phys_size_t mem_size;
lmb_init(&images->lmb);
mem_start = env_get_bootm_low();
mem_size = env_get_bootm_size();
lmb_add(&images->lmb, (phys_addr_t)mem_start, mem_size);
arch_lmb_reserve(&images->lmb);
board_lmb_reserve(&images->lmb);
lmb_init_and_reserve(&images->lmb, (phys_addr_t)mem_start, mem_size,
NULL);
}
#else
#define lmb_reserve(lmb, base, size)

View file

@ -10,6 +10,7 @@
#include <common.h>
#include <fdt_support.h>
#include <fdtdec.h>
#include <errno.h>
#include <image.h>
#include <linux/libfdt.h>
@ -67,30 +68,66 @@ static const image_header_t *image_get_fdt(ulong fdt_addr)
}
#endif
static void boot_fdt_reserve_region(struct lmb *lmb, uint64_t addr,
uint64_t size)
{
int ret;
ret = lmb_reserve(lmb, addr, size);
if (!ret) {
debug(" reserving fdt memory region: addr=%llx size=%llx\n",
(unsigned long long)addr, (unsigned long long)size);
} else {
puts("ERROR: reserving fdt memory region failed ");
printf("(addr=%llx size=%llx)\n",
(unsigned long long)addr, (unsigned long long)size);
}
}
/**
* boot_fdt_add_mem_rsv_regions - Mark the memreserve sections as unusable
* boot_fdt_add_mem_rsv_regions - Mark the memreserve and reserved-memory
* sections as unusable
* @lmb: pointer to lmb handle, will be used for memory mgmt
* @fdt_blob: pointer to fdt blob base address
*
* Adds the memreserve regions in the dtb to the lmb block. Adding the
* memreserve regions prevents u-boot from using them to store the initrd
* or the fdt blob.
* Adds the and reserved-memorymemreserve regions in the dtb to the lmb block.
* Adding the memreserve regions prevents u-boot from using them to store the
* initrd or the fdt blob.
*/
void boot_fdt_add_mem_rsv_regions(struct lmb *lmb, void *fdt_blob)
{
uint64_t addr, size;
int i, total;
int i, total, ret;
int nodeoffset, subnode;
struct fdt_resource res;
if (fdt_check_header(fdt_blob) != 0)
return;
/* process memreserve sections */
total = fdt_num_mem_rsv(fdt_blob);
for (i = 0; i < total; i++) {
if (fdt_get_mem_rsv(fdt_blob, i, &addr, &size) != 0)
continue;
printf(" reserving fdt memory region: addr=%llx size=%llx\n",
(unsigned long long)addr, (unsigned long long)size);
lmb_reserve(lmb, addr, size);
boot_fdt_reserve_region(lmb, addr, size);
}
/* process reserved-memory */
nodeoffset = fdt_subnode_offset(fdt_blob, 0, "reserved-memory");
if (nodeoffset >= 0) {
subnode = fdt_first_subnode(fdt_blob, nodeoffset);
while (subnode >= 0) {
/* check if this subnode has a reg property */
ret = fdt_get_resource(fdt_blob, subnode, "reg", 0,
&res);
if (!ret) {
addr = res.start;
size = res.end - res.start + 1;
boot_fdt_reserve_region(lmb, addr, size);
}
subnode = fdt_next_subnode(fdt_blob, subnode);
}
}
}

View file

@ -9,6 +9,12 @@ config SYS_NAND_SELF_INIT
This option, if enabled, provides more flexible and linux-like
NAND initialization process.
config SYS_NAND_DRIVER_ECC_LAYOUT
bool
help
Omit standard ECC layouts to safe space. Select this if your driver
is known to provide its own ECC layout.
config NAND_ATMEL
bool "Support Atmel NAND controller"
imply SYS_NAND_USE_FLASH_BBT
@ -81,6 +87,7 @@ config NAND_OMAP_ELM
config NAND_VF610_NFC
bool "Support for Freescale NFC for VF610"
select SYS_NAND_SELF_INIT
select SYS_NAND_DRIVER_ECC_LAYOUT
imply CMD_NAND
help
Enables support for NAND Flash Controller on some Freescale

View file

@ -47,6 +47,7 @@
#include <linux/errno.h>
/* Define default oob placement schemes for large and small page devices */
#ifdef CONFIG_SYS_NAND_DRIVER_ECC_LAYOUT
static struct nand_ecclayout nand_oob_8 = {
.eccbytes = 3,
.eccpos = {0, 1, 2},
@ -89,6 +90,7 @@ static struct nand_ecclayout nand_oob_128 = {
{.offset = 2,
.length = 78} }
};
#endif
static int nand_get_device(struct mtd_info *mtd, int new_state);
@ -4339,6 +4341,7 @@ int nand_scan_tail(struct mtd_info *mtd)
*/
if (!ecc->layout && (ecc->mode != NAND_ECC_SOFT_BCH)) {
switch (mtd->oobsize) {
#ifdef CONFIG_SYS_NAND_DRIVER_ECC_LAYOUT
case 8:
ecc->layout = &nand_oob_8;
break;
@ -4351,6 +4354,7 @@ int nand_scan_tail(struct mtd_info *mtd)
case 128:
ecc->layout = &nand_oob_128;
break;
#endif
default:
pr_warn("No oob scheme defined for oobsize %d\n",
mtd->oobsize);

56
env/sf.c vendored
View file

@ -81,6 +81,40 @@ static int setup_flash_device(void)
return 0;
}
static int is_end(const char *addr, size_t size)
{
/* The end of env variables is marked by '\0\0' */
int i = 0;
for (i = 0; i < size - 1; ++i)
if (addr[i] == 0x0 && addr[i + 1] == 0x0)
return 1;
return 0;
}
static int spi_flash_read_env(struct spi_flash *flash, u32 offset, size_t len,
void *buf)
{
u32 addr = 0;
u32 page_size = flash->page_size;
memset(buf, 0x0, len);
for (int i = 0; i < len / page_size; ++i) {
int ret = spi_flash_read(flash, offset, page_size,
&((char *)buf)[addr]);
if (ret < 0)
return ret;
if (is_end(&((char *)buf)[addr], page_size))
return 0;
addr += page_size;
offset += page_size;
}
return 0;
}
#if defined(CONFIG_ENV_OFFSET_REDUND)
#ifdef CMD_SAVEENV
static int env_sf_save(void)
@ -116,8 +150,8 @@ static int env_sf_save(void)
ret = -ENOMEM;
goto done;
}
ret = spi_flash_read(env_flash, saved_offset,
saved_size, saved_buffer);
ret = spi_flash_read_env(env_flash, saved_offset,
saved_size, saved_buffer);
if (ret)
goto done;
}
@ -183,10 +217,10 @@ static int env_sf_load(void)
if (ret)
goto out;
read1_fail = spi_flash_read(env_flash, CONFIG_ENV_OFFSET,
CONFIG_ENV_SIZE, tmp_env1);
read2_fail = spi_flash_read(env_flash, CONFIG_ENV_OFFSET_REDUND,
CONFIG_ENV_SIZE, tmp_env2);
read1_fail = spi_flash_read_env(env_flash, CONFIG_ENV_OFFSET,
CONFIG_ENV_SIZE, tmp_env1);
read2_fail = spi_flash_read_env(env_flash, CONFIG_ENV_OFFSET_REDUND,
CONFIG_ENV_SIZE, tmp_env2);
ret = env_import_redund((char *)tmp_env1, read1_fail, (char *)tmp_env2,
read2_fail);
@ -220,8 +254,8 @@ static int env_sf_save(void)
if (!saved_buffer)
goto done;
ret = spi_flash_read(env_flash, saved_offset,
saved_size, saved_buffer);
ret = spi_flash_read_env(env_flash, saved_offset,
saved_size, saved_buffer);
if (ret)
goto done;
}
@ -277,10 +311,10 @@ static int env_sf_load(void)
if (ret)
goto out;
ret = spi_flash_read(env_flash,
CONFIG_ENV_OFFSET, CONFIG_ENV_SIZE, buf);
ret = spi_flash_read_env(env_flash, CONFIG_ENV_OFFSET, CONFIG_ENV_SIZE,
buf);
if (ret) {
set_default_env("spi_flash_read() failed", 0);
set_default_env("spi_flash_read_env() failed", 0);
goto err_read;
}

56
fs/fs.c
View file

@ -429,13 +429,57 @@ int fs_size(const char *filename, loff_t *size)
return ret;
}
int fs_read(const char *filename, ulong addr, loff_t offset, loff_t len,
loff_t *actread)
#ifdef CONFIG_LMB
/* Check if a file may be read to the given address */
static int fs_read_lmb_check(const char *filename, ulong addr, loff_t offset,
loff_t len, struct fstype_info *info)
{
struct lmb lmb;
int ret;
loff_t size;
loff_t read_len;
/* get the actual size of the file */
ret = info->size(filename, &size);
if (ret)
return ret;
if (offset >= size) {
/* offset >= EOF, no bytes will be written */
return 0;
}
read_len = size - offset;
/* limit to 'len' if it is smaller */
if (len && len < read_len)
read_len = len;
lmb_init_and_reserve(&lmb, gd->bd->bi_dram[0].start,
gd->bd->bi_dram[0].size, (void *)gd->fdt_blob);
lmb_dump_all(&lmb);
if (lmb_alloc_addr(&lmb, addr, read_len) == addr)
return 0;
printf("** Reading file would overwrite reserved memory **\n");
return -ENOSPC;
}
#endif
static int _fs_read(const char *filename, ulong addr, loff_t offset, loff_t len,
int do_lmb_check, loff_t *actread)
{
struct fstype_info *info = fs_get_info(fs_type);
void *buf;
int ret;
#ifdef CONFIG_LMB
if (do_lmb_check) {
ret = fs_read_lmb_check(filename, addr, offset, len, info);
if (ret)
return ret;
}
#endif
/*
* We don't actually know how many bytes are being read, since len==0
* means read the whole file.
@ -452,6 +496,12 @@ int fs_read(const char *filename, ulong addr, loff_t offset, loff_t len,
return ret;
}
int fs_read(const char *filename, ulong addr, loff_t offset, loff_t len,
loff_t *actread)
{
return _fs_read(filename, addr, offset, len, 0, actread);
}
int fs_write(const char *filename, ulong addr, loff_t offset, loff_t len,
loff_t *actwrite)
{
@ -622,7 +672,7 @@ int do_load(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[],
pos = 0;
time = get_timer(0);
ret = fs_read(filename, addr, pos, bytes, &len_read);
ret = _fs_read(filename, addr, pos, bytes, 1, &len_read);
time = get_timer(time);
if (ret < 0)
return 1;

View file

@ -28,9 +28,9 @@ struct lmb {
struct lmb_region reserved;
};
extern struct lmb lmb;
extern void lmb_init(struct lmb *lmb);
extern void lmb_init_and_reserve(struct lmb *lmb, phys_addr_t base,
phys_size_t size, void *fdt_blob);
extern long lmb_add(struct lmb *lmb, phys_addr_t base, phys_size_t size);
extern long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size);
extern phys_addr_t lmb_alloc(struct lmb *lmb, phys_size_t size, ulong align);
@ -38,6 +38,9 @@ extern phys_addr_t lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align
phys_addr_t max_addr);
extern phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align,
phys_addr_t max_addr);
extern phys_addr_t lmb_alloc_addr(struct lmb *lmb, phys_addr_t base,
phys_size_t size);
extern phys_size_t lmb_get_unreserved_size(struct lmb *lmb, phys_addr_t addr);
extern int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr);
extern long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size);

View file

@ -36,7 +36,6 @@ obj-$(CONFIG_GZIP_COMPRESSED) += gzip.o
obj-$(CONFIG_GENERATE_SMBIOS_TABLE) += smbios.o
obj-$(CONFIG_IMAGE_SPARSE) += image-sparse.o
obj-y += initcall.o
obj-$(CONFIG_LMB) += lmb.o
obj-y += ldiv.o
obj-$(CONFIG_MD5) += md5.o
obj-y += net_utils.o
@ -88,9 +87,11 @@ obj-y += crc32.o
obj-$(CONFIG_CRC32C) += crc32c.o
obj-y += ctype.o
obj-y += div64.o
obj-$(CONFIG_OF_LIBFDT) += fdtdec.o
obj-y += hang.o
obj-y += linux_compat.o
obj-y += linux_string.o
obj-$(CONFIG_LMB) += lmb.o
obj-y += membuff.o
obj-$(CONFIG_REGEX) += slre.o
obj-y += string.o

106
lib/lmb.c
View file

@ -43,7 +43,10 @@ void lmb_dump_all(struct lmb *lmb)
static long lmb_addrs_overlap(phys_addr_t base1,
phys_size_t size1, phys_addr_t base2, phys_size_t size2)
{
return ((base1 < (base2+size2)) && (base2 < (base1+size1)));
const phys_addr_t base1_end = base1 + size1 - 1;
const phys_addr_t base2_end = base2 + size2 - 1;
return ((base1 <= base2_end) && (base2 <= base1_end));
}
static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1,
@ -89,30 +92,35 @@ static void lmb_coalesce_regions(struct lmb_region *rgn,
void lmb_init(struct lmb *lmb)
{
/* Create a dummy zero size LMB which will get coalesced away later.
* This simplifies the lmb_add() code below...
*/
lmb->memory.region[0].base = 0;
lmb->memory.region[0].size = 0;
lmb->memory.cnt = 1;
lmb->memory.cnt = 0;
lmb->memory.size = 0;
/* Ditto. */
lmb->reserved.region[0].base = 0;
lmb->reserved.region[0].size = 0;
lmb->reserved.cnt = 1;
lmb->reserved.cnt = 0;
lmb->reserved.size = 0;
}
/* Initialize the struct, add memory and call arch/board reserve functions */
void lmb_init_and_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size,
void *fdt_blob)
{
lmb_init(lmb);
lmb_add(lmb, base, size);
arch_lmb_reserve(lmb);
board_lmb_reserve(lmb);
if (IMAGE_ENABLE_OF_LIBFDT && fdt_blob)
boot_fdt_add_mem_rsv_regions(lmb, fdt_blob);
}
/* This routine called with relocation disabled. */
static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t size)
{
unsigned long coalesced = 0;
long adjacent, i;
if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) {
if (rgn->cnt == 0) {
rgn->region[0].base = base;
rgn->region[0].size = size;
rgn->cnt = 1;
return 0;
}
@ -136,6 +144,9 @@ static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t
rgn->region[i].size += size;
coalesced++;
break;
} else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
/* regions overlap */
return -1;
}
}
@ -183,7 +194,7 @@ long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size)
{
struct lmb_region *rgn = &(lmb->reserved);
phys_addr_t rgnbegin, rgnend;
phys_addr_t end = base + size;
phys_addr_t end = base + size - 1;
int i;
rgnbegin = rgnend = 0; /* supress gcc warnings */
@ -191,7 +202,7 @@ long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size)
/* Find the region where (base, size) belongs to */
for (i=0; i < rgn->cnt; i++) {
rgnbegin = rgn->region[i].base;
rgnend = rgnbegin + rgn->region[i].size;
rgnend = rgnbegin + rgn->region[i].size - 1;
if ((rgnbegin <= base) && (end <= rgnend))
break;
@ -209,7 +220,7 @@ long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size)
/* Check to see if region is matching at the front */
if (rgnbegin == base) {
rgn->region[i].base = end;
rgn->region[i].base = end + 1;
rgn->region[i].size -= size;
return 0;
}
@ -225,7 +236,7 @@ long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size)
* beginging of the hole and add the region after hole.
*/
rgn->region[i].size = base - rgn->region[i].base;
return lmb_add_region(rgn, end, rgnend - end);
return lmb_add_region(rgn, end + 1, rgnend - end);
}
long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size)
@ -274,11 +285,6 @@ static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
return addr & ~(size - 1);
}
static phys_addr_t lmb_align_up(phys_addr_t addr, ulong size)
{
return (addr + (size - 1)) & ~(size - 1);
}
phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr)
{
long i, j;
@ -307,8 +313,7 @@ phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phy
if (j < 0) {
/* This area isn't reserved, take it */
if (lmb_add_region(&lmb->reserved, base,
lmb_align_up(size,
align)) < 0)
size) < 0)
return 0;
return base;
}
@ -321,6 +326,59 @@ phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phy
return 0;
}
/*
* Try to allocate a specific address range: must be in defined memory but not
* reserved
*/
phys_addr_t lmb_alloc_addr(struct lmb *lmb, phys_addr_t base, phys_size_t size)
{
long j;
/* Check if the requested address is in one of the memory regions */
j = lmb_overlaps_region(&lmb->memory, base, size);
if (j >= 0) {
/*
* Check if the requested end address is in the same memory
* region we found.
*/
if (lmb_addrs_overlap(lmb->memory.region[j].base,
lmb->memory.region[j].size, base + size -
1, 1)) {
/* ok, reserve the memory */
if (lmb_reserve(lmb, base, size) >= 0)
return base;
}
}
return 0;
}
/* Return number of bytes from a given address that are free */
phys_size_t lmb_get_unreserved_size(struct lmb *lmb, phys_addr_t addr)
{
int i;
long j;
/* check if the requested address is in the memory regions */
j = lmb_overlaps_region(&lmb->memory, addr, 1);
if (j >= 0) {
for (i = 0; i < lmb->reserved.cnt; i++) {
if (addr < lmb->reserved.region[i].base) {
/* first reserved range > requested address */
return lmb->reserved.region[i].base - addr;
}
if (lmb->reserved.region[i].base +
lmb->reserved.region[i].size > addr) {
/* requested addr is in this reserved range */
return 0;
}
}
/* if we come here: no reserved ranges above requested addr */
return lmb->memory.region[lmb->memory.cnt - 1].base +
lmb->memory.region[lmb->memory.cnt - 1].size - addr;
}
return 0;
}
int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr)
{
int i;

View file

@ -17,6 +17,8 @@
#include <flash.h>
#endif
DECLARE_GLOBAL_DATA_PTR;
/* Well known TFTP port # */
#define WELL_KNOWN_PORT 69
/* Millisecs to timeout for lost pkt */
@ -81,6 +83,10 @@ static ulong tftp_block_wrap;
/* memory offset due to wrapping */
static ulong tftp_block_wrap_offset;
static int tftp_state;
static ulong tftp_load_addr;
#ifdef CONFIG_LMB
static ulong tftp_load_size;
#endif
#ifdef CONFIG_TFTP_TSIZE
/* The file size reported by the server */
static int tftp_tsize;
@ -164,10 +170,11 @@ static void mcast_cleanup(void)
#endif /* CONFIG_MCAST_TFTP */
static inline void store_block(int block, uchar *src, unsigned len)
static inline int store_block(int block, uchar *src, unsigned int len)
{
ulong offset = block * tftp_block_size + tftp_block_wrap_offset;
ulong newsize = offset + len;
ulong store_addr = tftp_load_addr + offset;
#ifdef CONFIG_SYS_DIRECT_FLASH_TFTP
int i, rc = 0;
@ -175,24 +182,32 @@ static inline void store_block(int block, uchar *src, unsigned len)
/* start address in flash? */
if (flash_info[i].flash_id == FLASH_UNKNOWN)
continue;
if (load_addr + offset >= flash_info[i].start[0]) {
if (store_addr >= flash_info[i].start[0]) {
rc = 1;
break;
}
}
if (rc) { /* Flash is destination for this packet */
rc = flash_write((char *)src, (ulong)(load_addr+offset), len);
rc = flash_write((char *)src, store_addr, len);
if (rc) {
flash_perror(rc);
net_set_state(NETLOOP_FAIL);
return;
return rc;
}
} else
#endif /* CONFIG_SYS_DIRECT_FLASH_TFTP */
{
void *ptr = map_sysmem(load_addr + offset, len);
void *ptr;
#ifdef CONFIG_LMB
if (store_addr < tftp_load_addr ||
store_addr + len > tftp_load_addr + tftp_load_size) {
puts("\nTFTP error: ");
puts("trying to overwrite reserved memory...\n");
return -1;
}
#endif
ptr = map_sysmem(store_addr, len);
memcpy(ptr, src, len);
unmap_sysmem(ptr);
}
@ -203,6 +218,8 @@ static inline void store_block(int block, uchar *src, unsigned len)
if (net_boot_file_size < newsize)
net_boot_file_size = newsize;
return 0;
}
/* Clear our state ready for a new transfer */
@ -606,7 +623,11 @@ static void tftp_handler(uchar *pkt, unsigned dest, struct in_addr sip,
timeout_count_max = tftp_timeout_count_max;
net_set_timeout_handler(timeout_ms, tftp_timeout_handler);
store_block(tftp_cur_block - 1, pkt + 2, len);
if (store_block(tftp_cur_block - 1, pkt + 2, len)) {
eth_halt();
net_set_state(NETLOOP_FAIL);
break;
}
/*
* Acknowledge the block just received, which will prompt
@ -695,6 +716,25 @@ static void tftp_timeout_handler(void)
}
}
/* Initialize tftp_load_addr and tftp_load_size from load_addr and lmb */
static int tftp_init_load_addr(void)
{
#ifdef CONFIG_LMB
struct lmb lmb;
phys_size_t max_size;
lmb_init_and_reserve(&lmb, gd->bd->bi_dram[0].start,
gd->bd->bi_dram[0].size, (void *)gd->fdt_blob);
max_size = lmb_get_unreserved_size(&lmb, load_addr);
if (!max_size)
return -1;
tftp_load_size = max_size;
#endif
tftp_load_addr = load_addr;
return 0;
}
void tftp_start(enum proto_t protocol)
{
@ -791,7 +831,14 @@ void tftp_start(enum proto_t protocol)
} else
#endif
{
printf("Load address: 0x%lx\n", load_addr);
if (tftp_init_load_addr()) {
eth_halt();
net_set_state(NETLOOP_FAIL);
puts("\nTFTP error: ");
puts("trying to overwrite reserved memory...\n");
return;
}
printf("Load address: 0x%lx\n", tftp_load_addr);
puts("Loading: *\b");
tftp_state = STATE_SEND_RRQ;
#ifdef CONFIG_CMD_BOOTEFI
@ -842,9 +889,15 @@ void tftp_start_server(void)
{
tftp_filename[0] = 0;
if (tftp_init_load_addr()) {
eth_halt();
net_set_state(NETLOOP_FAIL);
puts("\nTFTP error: trying to overwrite reserved memory...\n");
return;
}
printf("Using %s device\n", eth_get_name());
printf("Listening for TFTP transfer on %pI4\n", &net_ip);
printf("Load address: 0x%lx\n", load_addr);
printf("Load address: 0x%lx\n", tftp_load_addr);
puts("Loading: *\b");

View file

@ -3,3 +3,4 @@
# (C) Copyright 2018
# Mario Six, Guntermann & Drunck GmbH, mario.six@gdsys.cc
obj-y += hexdump.o
obj-y += lmb.o

601
test/lib/lmb.c Normal file
View file

@ -0,0 +1,601 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* (C) Copyright 2018 Simon Goldschmidt
*/
#include <common.h>
#include <lmb.h>
#include <dm/test.h>
#include <test/ut.h>
static int check_lmb(struct unit_test_state *uts, struct lmb *lmb,
phys_addr_t ram_base, phys_size_t ram_size,
unsigned long num_reserved,
phys_addr_t base1, phys_size_t size1,
phys_addr_t base2, phys_size_t size2,
phys_addr_t base3, phys_size_t size3)
{
ut_asserteq(lmb->memory.cnt, 1);
ut_asserteq(lmb->memory.region[0].base, ram_base);
ut_asserteq(lmb->memory.region[0].size, ram_size);
ut_asserteq(lmb->reserved.cnt, num_reserved);
if (num_reserved > 0) {
ut_asserteq(lmb->reserved.region[0].base, base1);
ut_asserteq(lmb->reserved.region[0].size, size1);
}
if (num_reserved > 1) {
ut_asserteq(lmb->reserved.region[1].base, base2);
ut_asserteq(lmb->reserved.region[1].size, size2);
}
if (num_reserved > 2) {
ut_asserteq(lmb->reserved.region[2].base, base3);
ut_asserteq(lmb->reserved.region[2].size, size3);
}
return 0;
}
#define ASSERT_LMB(lmb, ram_base, ram_size, num_reserved, base1, size1, \
base2, size2, base3, size3) \
ut_assert(!check_lmb(uts, lmb, ram_base, ram_size, \
num_reserved, base1, size1, base2, size2, base3, \
size3))
/*
* Test helper function that reserves 64 KiB somewhere in the simulated RAM and
* then does some alloc + free tests.
*/
static int test_multi_alloc(struct unit_test_state *uts,
const phys_addr_t ram, const phys_size_t ram_size,
const phys_addr_t alloc_64k_addr)
{
const phys_addr_t ram_end = ram + ram_size;
const phys_addr_t alloc_64k_end = alloc_64k_addr + 0x10000;
struct lmb lmb;
long ret;
phys_addr_t a, a2, b, b2, c, d;
/* check for overflow */
ut_assert(ram_end == 0 || ram_end > ram);
ut_assert(alloc_64k_end > alloc_64k_addr);
/* check input addresses + size */
ut_assert(alloc_64k_addr >= ram + 8);
ut_assert(alloc_64k_end <= ram_end - 8);
lmb_init(&lmb);
ret = lmb_add(&lmb, ram, ram_size);
ut_asserteq(ret, 0);
/* reserve 64KiB somewhere */
ret = lmb_reserve(&lmb, alloc_64k_addr, 0x10000);
ut_asserteq(ret, 0);
ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
0, 0, 0, 0);
/* allocate somewhere, should be at the end of RAM */
a = lmb_alloc(&lmb, 4, 1);
ut_asserteq(a, ram_end - 4);
ASSERT_LMB(&lmb, ram, ram_size, 2, alloc_64k_addr, 0x10000,
ram_end - 4, 4, 0, 0);
/* alloc below end of reserved region -> below reserved region */
b = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
ut_asserteq(b, alloc_64k_addr - 4);
ASSERT_LMB(&lmb, ram, ram_size, 2,
alloc_64k_addr - 4, 0x10000 + 4, ram_end - 4, 4, 0, 0);
/* 2nd time */
c = lmb_alloc(&lmb, 4, 1);
ut_asserteq(c, ram_end - 8);
ASSERT_LMB(&lmb, ram, ram_size, 2,
alloc_64k_addr - 4, 0x10000 + 4, ram_end - 8, 8, 0, 0);
d = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
ut_asserteq(d, alloc_64k_addr - 8);
ASSERT_LMB(&lmb, ram, ram_size, 2,
alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 8, 0, 0);
ret = lmb_free(&lmb, a, 4);
ut_asserteq(ret, 0);
ASSERT_LMB(&lmb, ram, ram_size, 2,
alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
/* allocate again to ensure we get the same address */
a2 = lmb_alloc(&lmb, 4, 1);
ut_asserteq(a, a2);
ASSERT_LMB(&lmb, ram, ram_size, 2,
alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 8, 0, 0);
ret = lmb_free(&lmb, a2, 4);
ut_asserteq(ret, 0);
ASSERT_LMB(&lmb, ram, ram_size, 2,
alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
ret = lmb_free(&lmb, b, 4);
ut_asserteq(ret, 0);
ASSERT_LMB(&lmb, ram, ram_size, 3,
alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000,
ram_end - 8, 4);
/* allocate again to ensure we get the same address */
b2 = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
ut_asserteq(b, b2);
ASSERT_LMB(&lmb, ram, ram_size, 2,
alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
ret = lmb_free(&lmb, b2, 4);
ut_asserteq(ret, 0);
ASSERT_LMB(&lmb, ram, ram_size, 3,
alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000,
ram_end - 8, 4);
ret = lmb_free(&lmb, c, 4);
ut_asserteq(ret, 0);
ASSERT_LMB(&lmb, ram, ram_size, 2,
alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000, 0, 0);
ret = lmb_free(&lmb, d, 4);
ut_asserteq(ret, 0);
ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
0, 0, 0, 0);
return 0;
}
static int test_multi_alloc_512mb(struct unit_test_state *uts,
const phys_addr_t ram)
{
return test_multi_alloc(uts, ram, 0x20000000, ram + 0x10000000);
}
/* Create a memory region with one reserved region and allocate */
static int lib_test_lmb_simple(struct unit_test_state *uts)
{
int ret;
/* simulate 512 MiB RAM beginning at 1GiB */
ret = test_multi_alloc_512mb(uts, 0x40000000);
if (ret)
return ret;
/* simulate 512 MiB RAM beginning at 1.5GiB */
return test_multi_alloc_512mb(uts, 0xE0000000);
}
DM_TEST(lib_test_lmb_simple, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
/* Simulate 512 MiB RAM, allocate some blocks that fit/don't fit */
static int test_bigblock(struct unit_test_state *uts, const phys_addr_t ram)
{
const phys_size_t ram_size = 0x20000000;
const phys_size_t big_block_size = 0x10000000;
const phys_addr_t ram_end = ram + ram_size;
const phys_addr_t alloc_64k_addr = ram + 0x10000000;
struct lmb lmb;
long ret;
phys_addr_t a, b;
/* check for overflow */
ut_assert(ram_end == 0 || ram_end > ram);
lmb_init(&lmb);
ret = lmb_add(&lmb, ram, ram_size);
ut_asserteq(ret, 0);
/* reserve 64KiB in the middle of RAM */
ret = lmb_reserve(&lmb, alloc_64k_addr, 0x10000);
ut_asserteq(ret, 0);
ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
0, 0, 0, 0);
/* allocate a big block, should be below reserved */
a = lmb_alloc(&lmb, big_block_size, 1);
ut_asserteq(a, ram);
ASSERT_LMB(&lmb, ram, ram_size, 1, a,
big_block_size + 0x10000, 0, 0, 0, 0);
/* allocate 2nd big block */
/* This should fail, printing an error */
b = lmb_alloc(&lmb, big_block_size, 1);
ut_asserteq(b, 0);
ASSERT_LMB(&lmb, ram, ram_size, 1, a,
big_block_size + 0x10000, 0, 0, 0, 0);
ret = lmb_free(&lmb, a, big_block_size);
ut_asserteq(ret, 0);
ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
0, 0, 0, 0);
/* allocate too big block */
/* This should fail, printing an error */
a = lmb_alloc(&lmb, ram_size, 1);
ut_asserteq(a, 0);
ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
0, 0, 0, 0);
return 0;
}
static int lib_test_lmb_big(struct unit_test_state *uts)
{
int ret;
/* simulate 512 MiB RAM beginning at 1GiB */
ret = test_bigblock(uts, 0x40000000);
if (ret)
return ret;
/* simulate 512 MiB RAM beginning at 1.5GiB */
return test_bigblock(uts, 0xE0000000);
}
DM_TEST(lib_test_lmb_big, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
/* Simulate 512 MiB RAM, allocate a block without previous reservation */
static int test_noreserved(struct unit_test_state *uts, const phys_addr_t ram,
const phys_addr_t alloc_size, const ulong align)
{
const phys_size_t ram_size = 0x20000000;
const phys_addr_t ram_end = ram + ram_size;
struct lmb lmb;
long ret;
phys_addr_t a, b;
const phys_addr_t alloc_size_aligned = (alloc_size + align - 1) &
~(align - 1);
/* check for overflow */
ut_assert(ram_end == 0 || ram_end > ram);
lmb_init(&lmb);
ret = lmb_add(&lmb, ram, ram_size);
ut_asserteq(ret, 0);
ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
/* allocate a block */
a = lmb_alloc(&lmb, alloc_size, align);
ut_assert(a != 0);
ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
alloc_size, 0, 0, 0, 0);
/* allocate another block */
b = lmb_alloc(&lmb, alloc_size, align);
ut_assert(b != 0);
if (alloc_size == alloc_size_aligned) {
ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size -
(alloc_size_aligned * 2), alloc_size * 2, 0, 0, 0,
0);
} else {
ASSERT_LMB(&lmb, ram, ram_size, 2, ram + ram_size -
(alloc_size_aligned * 2), alloc_size, ram + ram_size
- alloc_size_aligned, alloc_size, 0, 0);
}
/* and free them */
ret = lmb_free(&lmb, b, alloc_size);
ut_asserteq(ret, 0);
ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
alloc_size, 0, 0, 0, 0);
ret = lmb_free(&lmb, a, alloc_size);
ut_asserteq(ret, 0);
ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
/* allocate a block with base*/
b = lmb_alloc_base(&lmb, alloc_size, align, ram_end);
ut_assert(a == b);
ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
alloc_size, 0, 0, 0, 0);
/* and free it */
ret = lmb_free(&lmb, b, alloc_size);
ut_asserteq(ret, 0);
ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
return 0;
}
static int lib_test_lmb_noreserved(struct unit_test_state *uts)
{
int ret;
/* simulate 512 MiB RAM beginning at 1GiB */
ret = test_noreserved(uts, 0x40000000, 4, 1);
if (ret)
return ret;
/* simulate 512 MiB RAM beginning at 1.5GiB */
return test_noreserved(uts, 0xE0000000, 4, 1);
}
DM_TEST(lib_test_lmb_noreserved, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
static int lib_test_lmb_unaligned_size(struct unit_test_state *uts)
{
int ret;
/* simulate 512 MiB RAM beginning at 1GiB */
ret = test_noreserved(uts, 0x40000000, 5, 8);
if (ret)
return ret;
/* simulate 512 MiB RAM beginning at 1.5GiB */
return test_noreserved(uts, 0xE0000000, 5, 8);
}
DM_TEST(lib_test_lmb_unaligned_size, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
/*
* Simulate a RAM that starts at 0 and allocate down to address 0, which must
* fail as '0' means failure for the lmb_alloc functions.
*/
static int lib_test_lmb_at_0(struct unit_test_state *uts)
{
const phys_addr_t ram = 0;
const phys_size_t ram_size = 0x20000000;
struct lmb lmb;
long ret;
phys_addr_t a, b;
lmb_init(&lmb);
ret = lmb_add(&lmb, ram, ram_size);
ut_asserteq(ret, 0);
/* allocate nearly everything */
a = lmb_alloc(&lmb, ram_size - 4, 1);
ut_asserteq(a, ram + 4);
ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
0, 0, 0, 0);
/* allocate the rest */
/* This should fail as the allocated address would be 0 */
b = lmb_alloc(&lmb, 4, 1);
ut_asserteq(b, 0);
/* check that this was an error by checking lmb */
ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
0, 0, 0, 0);
/* check that this was an error by freeing b */
ret = lmb_free(&lmb, b, 4);
ut_asserteq(ret, -1);
ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
0, 0, 0, 0);
ret = lmb_free(&lmb, a, ram_size - 4);
ut_asserteq(ret, 0);
ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
return 0;
}
DM_TEST(lib_test_lmb_at_0, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
/* Check that calling lmb_reserve with overlapping regions fails. */
static int lib_test_lmb_overlapping_reserve(struct unit_test_state *uts)
{
const phys_addr_t ram = 0x40000000;
const phys_size_t ram_size = 0x20000000;
struct lmb lmb;
long ret;
lmb_init(&lmb);
ret = lmb_add(&lmb, ram, ram_size);
ut_asserteq(ret, 0);
ret = lmb_reserve(&lmb, 0x40010000, 0x10000);
ut_asserteq(ret, 0);
ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
0, 0, 0, 0);
/* allocate overlapping region should fail */
ret = lmb_reserve(&lmb, 0x40011000, 0x10000);
ut_asserteq(ret, -1);
ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
0, 0, 0, 0);
/* allocate 3nd region */
ret = lmb_reserve(&lmb, 0x40030000, 0x10000);
ut_asserteq(ret, 0);
ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40010000, 0x10000,
0x40030000, 0x10000, 0, 0);
/* allocate 2nd region */
ret = lmb_reserve(&lmb, 0x40020000, 0x10000);
ut_assert(ret >= 0);
ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x30000,
0, 0, 0, 0);
return 0;
}
DM_TEST(lib_test_lmb_overlapping_reserve,
DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
/*
* Simulate 512 MiB RAM, reserve 3 blocks, allocate addresses in between.
* Expect addresses outside the memory range to fail.
*/
static int test_alloc_addr(struct unit_test_state *uts, const phys_addr_t ram)
{
const phys_size_t ram_size = 0x20000000;
const phys_addr_t ram_end = ram + ram_size;
const phys_size_t alloc_addr_a = ram + 0x8000000;
const phys_size_t alloc_addr_b = ram + 0x8000000 * 2;
const phys_size_t alloc_addr_c = ram + 0x8000000 * 3;
struct lmb lmb;
long ret;
phys_addr_t a, b, c, d, e;
/* check for overflow */
ut_assert(ram_end == 0 || ram_end > ram);
lmb_init(&lmb);
ret = lmb_add(&lmb, ram, ram_size);
ut_asserteq(ret, 0);
/* reserve 3 blocks */
ret = lmb_reserve(&lmb, alloc_addr_a, 0x10000);
ut_asserteq(ret, 0);
ret = lmb_reserve(&lmb, alloc_addr_b, 0x10000);
ut_asserteq(ret, 0);
ret = lmb_reserve(&lmb, alloc_addr_c, 0x10000);
ut_asserteq(ret, 0);
ASSERT_LMB(&lmb, ram, ram_size, 3, alloc_addr_a, 0x10000,
alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
/* allocate blocks */
a = lmb_alloc_addr(&lmb, ram, alloc_addr_a - ram);
ut_asserteq(a, ram);
ASSERT_LMB(&lmb, ram, ram_size, 3, ram, 0x8010000,
alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
b = lmb_alloc_addr(&lmb, alloc_addr_a + 0x10000,
alloc_addr_b - alloc_addr_a - 0x10000);
ut_asserteq(b, alloc_addr_a + 0x10000);
ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x10010000,
alloc_addr_c, 0x10000, 0, 0);
c = lmb_alloc_addr(&lmb, alloc_addr_b + 0x10000,
alloc_addr_c - alloc_addr_b - 0x10000);
ut_asserteq(c, alloc_addr_b + 0x10000);
ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
0, 0, 0, 0);
d = lmb_alloc_addr(&lmb, alloc_addr_c + 0x10000,
ram_end - alloc_addr_c - 0x10000);
ut_asserteq(d, alloc_addr_c + 0x10000);
ASSERT_LMB(&lmb, ram, ram_size, 1, ram, ram_size,
0, 0, 0, 0);
/* allocating anything else should fail */
e = lmb_alloc(&lmb, 1, 1);
ut_asserteq(e, 0);
ASSERT_LMB(&lmb, ram, ram_size, 1, ram, ram_size,
0, 0, 0, 0);
ret = lmb_free(&lmb, d, ram_end - alloc_addr_c - 0x10000);
ut_asserteq(ret, 0);
/* allocate at 3 points in free range */
d = lmb_alloc_addr(&lmb, ram_end - 4, 4);
ut_asserteq(d, ram_end - 4);
ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x18010000,
d, 4, 0, 0);
ret = lmb_free(&lmb, d, 4);
ut_asserteq(ret, 0);
ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
0, 0, 0, 0);
d = lmb_alloc_addr(&lmb, ram_end - 128, 4);
ut_asserteq(d, ram_end - 128);
ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x18010000,
d, 4, 0, 0);
ret = lmb_free(&lmb, d, 4);
ut_asserteq(ret, 0);
ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
0, 0, 0, 0);
d = lmb_alloc_addr(&lmb, alloc_addr_c + 0x10000, 4);
ut_asserteq(d, alloc_addr_c + 0x10000);
ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010004,
0, 0, 0, 0);
ret = lmb_free(&lmb, d, 4);
ut_asserteq(ret, 0);
ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
0, 0, 0, 0);
/* allocate at the bottom */
ret = lmb_free(&lmb, a, alloc_addr_a - ram);
ut_asserteq(ret, 0);
ASSERT_LMB(&lmb, ram, ram_size, 1, ram + 0x8000000, 0x10010000,
0, 0, 0, 0);
d = lmb_alloc_addr(&lmb, ram, 4);
ut_asserteq(d, ram);
ASSERT_LMB(&lmb, ram, ram_size, 2, d, 4,
ram + 0x8000000, 0x10010000, 0, 0);
/* check that allocating outside memory fails */
if (ram_end != 0) {
ret = lmb_alloc_addr(&lmb, ram_end, 1);
ut_asserteq(ret, 0);
}
if (ram != 0) {
ret = lmb_alloc_addr(&lmb, ram - 1, 1);
ut_asserteq(ret, 0);
}
return 0;
}
static int lib_test_lmb_alloc_addr(struct unit_test_state *uts)
{
int ret;
/* simulate 512 MiB RAM beginning at 1GiB */
ret = test_alloc_addr(uts, 0x40000000);
if (ret)
return ret;
/* simulate 512 MiB RAM beginning at 1.5GiB */
return test_alloc_addr(uts, 0xE0000000);
}
DM_TEST(lib_test_lmb_alloc_addr, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
/* Simulate 512 MiB RAM, reserve 3 blocks, check addresses in between */
static int test_get_unreserved_size(struct unit_test_state *uts,
const phys_addr_t ram)
{
const phys_size_t ram_size = 0x20000000;
const phys_addr_t ram_end = ram + ram_size;
const phys_size_t alloc_addr_a = ram + 0x8000000;
const phys_size_t alloc_addr_b = ram + 0x8000000 * 2;
const phys_size_t alloc_addr_c = ram + 0x8000000 * 3;
struct lmb lmb;
long ret;
phys_size_t s;
/* check for overflow */
ut_assert(ram_end == 0 || ram_end > ram);
lmb_init(&lmb);
ret = lmb_add(&lmb, ram, ram_size);
ut_asserteq(ret, 0);
/* reserve 3 blocks */
ret = lmb_reserve(&lmb, alloc_addr_a, 0x10000);
ut_asserteq(ret, 0);
ret = lmb_reserve(&lmb, alloc_addr_b, 0x10000);
ut_asserteq(ret, 0);
ret = lmb_reserve(&lmb, alloc_addr_c, 0x10000);
ut_asserteq(ret, 0);
ASSERT_LMB(&lmb, ram, ram_size, 3, alloc_addr_a, 0x10000,
alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
/* check addresses in between blocks */
s = lmb_get_unreserved_size(&lmb, ram);
ut_asserteq(s, alloc_addr_a - ram);
s = lmb_get_unreserved_size(&lmb, ram + 0x10000);
ut_asserteq(s, alloc_addr_a - ram - 0x10000);
s = lmb_get_unreserved_size(&lmb, alloc_addr_a - 4);
ut_asserteq(s, 4);
s = lmb_get_unreserved_size(&lmb, alloc_addr_a + 0x10000);
ut_asserteq(s, alloc_addr_b - alloc_addr_a - 0x10000);
s = lmb_get_unreserved_size(&lmb, alloc_addr_a + 0x20000);
ut_asserteq(s, alloc_addr_b - alloc_addr_a - 0x20000);
s = lmb_get_unreserved_size(&lmb, alloc_addr_b - 4);
ut_asserteq(s, 4);
s = lmb_get_unreserved_size(&lmb, alloc_addr_c + 0x10000);
ut_asserteq(s, ram_end - alloc_addr_c - 0x10000);
s = lmb_get_unreserved_size(&lmb, alloc_addr_c + 0x20000);
ut_asserteq(s, ram_end - alloc_addr_c - 0x20000);
s = lmb_get_unreserved_size(&lmb, ram_end - 4);
ut_asserteq(s, 4);
return 0;
}
static int lib_test_lmb_get_unreserved_size(struct unit_test_state *uts)
{
int ret;
/* simulate 512 MiB RAM beginning at 1GiB */
ret = test_get_unreserved_size(uts, 0x40000000);
if (ret)
return ret;
/* simulate 512 MiB RAM beginning at 1.5GiB */
return test_get_unreserved_size(uts, 0xE0000000);
}
DM_TEST(lib_test_lmb_get_unreserved_size,
DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);