- Backport upstream Winbond W25N04KV Flash support. - Backport upstream GigaDevice series Flash support. - Backport pending Airoha AN8855 switch TPID value fix. - Backport Mediatek UART baudrate accuracy compensation support. - Pull mtk patchset from MTK SDK mtksoc-20250711 branch: Remove mt7622_rfb changes. The MTK SDK already dropped them. Replace Airoha ethernet PHY driver with new version. Split downstream snfi changes into independent patches. Add new Marvell CUX3410 PHY driver. Add new MediaTek built-in 2.5Gbps PHY driver. Signed-off-by: Shiji Yang <yangshiji66@outlook.com>
958 lines
22 KiB
Diff
958 lines
22 KiB
Diff
From 0056bd4ec8ac2cbde6c2a5e07cba9b4eb3b7cfa3 Mon Sep 17 00:00:00 2001
|
|
From: Weijie Gao <weijie.gao@mediatek.com>
|
|
Date: Mon, 25 Jul 2022 10:42:12 +0800
|
|
Subject: [PATCH 05/30] mtd: nmbm: add support for mtd
|
|
|
|
Add support to create NMBM based on MTD devices
|
|
|
|
Signed-off-by: Weijie Gao <weijie.gao@mediatek.com>
|
|
---
|
|
drivers/mtd/nmbm/Kconfig | 5 +
|
|
drivers/mtd/nmbm/Makefile | 1 +
|
|
drivers/mtd/nmbm/nmbm-mtd.c | 890 ++++++++++++++++++++++++++++++++++++
|
|
include/nmbm/nmbm-mtd.h | 27 ++
|
|
4 files changed, 923 insertions(+)
|
|
create mode 100644 drivers/mtd/nmbm/nmbm-mtd.c
|
|
create mode 100644 include/nmbm/nmbm-mtd.h
|
|
|
|
--- a/drivers/mtd/nmbm/Kconfig
|
|
+++ b/drivers/mtd/nmbm/Kconfig
|
|
@@ -27,3 +27,8 @@ config NMBM_LOG_LEVEL_NONE
|
|
bool "5 - None"
|
|
|
|
endchoice
|
|
+
|
|
+config NMBM_MTD
|
|
+ bool "Enable MTD based NAND mapping block management"
|
|
+ default n
|
|
+ depends on NMBM
|
|
--- a/drivers/mtd/nmbm/Makefile
|
|
+++ b/drivers/mtd/nmbm/Makefile
|
|
@@ -3,3 +3,4 @@
|
|
# (C) Copyright 2020 MediaTek Inc. All rights reserved.
|
|
|
|
obj-$(CONFIG_NMBM) += nmbm-core.o
|
|
+obj-$(CONFIG_NMBM_MTD) += nmbm-mtd.o
|
|
--- /dev/null
|
|
+++ b/drivers/mtd/nmbm/nmbm-mtd.c
|
|
@@ -0,0 +1,890 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
|
|
+ *
|
|
+ * Author: Weijie Gao <weijie.gao@mediatek.com>
|
|
+ */
|
|
+
|
|
+#include <linux/list.h>
|
|
+#include <linux/bitops.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/mtd/mtd.h>
|
|
+#include <jffs2/load_kernel.h>
|
|
+#include <watchdog.h>
|
|
+
|
|
+#include "nmbm-debug.h"
|
|
+
|
|
+#define NMBM_UPPER_MTD_NAME "nmbm"
|
|
+
|
|
+static uint32_t nmbm_id_cnt;
|
|
+static LIST_HEAD(nmbm_devs);
|
|
+
|
|
+struct nmbm_mtd {
|
|
+ struct mtd_info upper;
|
|
+ char *name;
|
|
+ uint32_t id;
|
|
+
|
|
+ struct mtd_info *lower;
|
|
+
|
|
+ struct nmbm_instance *ni;
|
|
+ uint8_t *page_cache;
|
|
+
|
|
+ struct list_head node;
|
|
+};
|
|
+
|
|
+static int nmbm_lower_read_page(void *arg, uint64_t addr, void *buf, void *oob,
|
|
+ enum nmbm_oob_mode mode)
|
|
+{
|
|
+ struct nmbm_mtd *nm = arg;
|
|
+ struct mtd_oob_ops ops;
|
|
+ int ret;
|
|
+
|
|
+ memset(&ops, 0, sizeof(ops));
|
|
+
|
|
+ switch (mode) {
|
|
+ case NMBM_MODE_PLACE_OOB:
|
|
+ ops.mode = MTD_OPS_PLACE_OOB;
|
|
+ break;
|
|
+ case NMBM_MODE_AUTO_OOB:
|
|
+ ops.mode = MTD_OPS_AUTO_OOB;
|
|
+ break;
|
|
+ case NMBM_MODE_RAW:
|
|
+ ops.mode = MTD_OPS_RAW;
|
|
+ break;
|
|
+ default:
|
|
+ pr_debug("%s: unsupported NMBM mode: %u\n", __func__, mode);
|
|
+ return -ENOTSUPP;
|
|
+ }
|
|
+
|
|
+ if (buf) {
|
|
+ ops.datbuf = buf;
|
|
+ ops.len = nm->lower->writesize;
|
|
+ }
|
|
+
|
|
+ if (oob) {
|
|
+ ops.oobbuf = oob;
|
|
+ ops.ooblen = mtd_oobavail(nm->lower, &ops);
|
|
+ }
|
|
+
|
|
+ ret = mtd_read_oob(nm->lower, addr, &ops);
|
|
+ nm->upper.ecc_stats.corrected = nm->lower->ecc_stats.corrected;
|
|
+ nm->upper.ecc_stats.failed = nm->lower->ecc_stats.failed;
|
|
+
|
|
+ /* Report error on failure (including ecc error) */
|
|
+ if (ret < 0 && ret != -EUCLEAN)
|
|
+ return ret;
|
|
+
|
|
+ /*
|
|
+ * Since mtd_read_oob() won't report exact bitflips, what we can know
|
|
+ * is whether bitflips exceeds the threshold.
|
|
+ * We want the -EUCLEAN to be passed to the upper layer, but not the
|
|
+ * error value itself. To achieve this, report bitflips above the
|
|
+ * threshold.
|
|
+ */
|
|
+
|
|
+ if (ret == -EUCLEAN) {
|
|
+ return min_t(u32, nm->lower->bitflip_threshold + 1,
|
|
+ nm->lower->ecc_strength);
|
|
+ }
|
|
+
|
|
+ /* For bitflips less than the threshold, return 0 */
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int nmbm_lower_write_page(void *arg, uint64_t addr, const void *buf,
|
|
+ const void *oob, enum nmbm_oob_mode mode)
|
|
+{
|
|
+ struct nmbm_mtd *nm = arg;
|
|
+ struct mtd_oob_ops ops;
|
|
+
|
|
+ memset(&ops, 0, sizeof(ops));
|
|
+
|
|
+ switch (mode) {
|
|
+ case NMBM_MODE_PLACE_OOB:
|
|
+ ops.mode = MTD_OPS_PLACE_OOB;
|
|
+ break;
|
|
+ case NMBM_MODE_AUTO_OOB:
|
|
+ ops.mode = MTD_OPS_AUTO_OOB;
|
|
+ break;
|
|
+ case NMBM_MODE_RAW:
|
|
+ ops.mode = MTD_OPS_RAW;
|
|
+ break;
|
|
+ default:
|
|
+ pr_debug("%s: unsupported NMBM mode: %u\n", __func__, mode);
|
|
+ return -ENOTSUPP;
|
|
+ }
|
|
+
|
|
+ if (buf) {
|
|
+ ops.datbuf = (uint8_t *)buf;
|
|
+ ops.len = nm->lower->writesize;
|
|
+ }
|
|
+
|
|
+ if (oob) {
|
|
+ ops.oobbuf = (uint8_t *)oob;
|
|
+ ops.ooblen = mtd_oobavail(nm->lower, &ops);
|
|
+ }
|
|
+
|
|
+ return mtd_write_oob(nm->lower, addr, &ops);
|
|
+}
|
|
+
|
|
+static int nmbm_lower_erase_block(void *arg, uint64_t addr)
|
|
+{
|
|
+ struct nmbm_mtd *nm = arg;
|
|
+ struct erase_info ei;
|
|
+
|
|
+ memset(&ei, 0, sizeof(ei));
|
|
+
|
|
+ ei.mtd = nm->lower;
|
|
+ ei.addr = addr;
|
|
+ ei.len = nm->lower->erasesize;
|
|
+
|
|
+ return mtd_erase(nm->lower, &ei);
|
|
+}
|
|
+
|
|
+static int nmbm_lower_is_bad_block(void *arg, uint64_t addr)
|
|
+{
|
|
+ struct nmbm_mtd *nm = arg;
|
|
+
|
|
+ return mtd_block_isbad(nm->lower, addr);
|
|
+}
|
|
+
|
|
+static int nmbm_lower_mark_bad_block(void *arg, uint64_t addr)
|
|
+{
|
|
+ struct nmbm_mtd *nm = arg;
|
|
+
|
|
+ return mtd_block_markbad(nm->lower, addr);
|
|
+}
|
|
+
|
|
+static void nmbm_lower_log(void *arg, enum nmbm_log_category level,
|
|
+ const char *fmt, va_list ap)
|
|
+{
|
|
+ vprintf(fmt, ap);
|
|
+}
|
|
+
|
|
+static int nmbm_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
|
|
+ size_t *retlen, u_char *buf)
|
|
+{
|
|
+ struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
|
|
+
|
|
+ /* Do not allow read past end of device */
|
|
+ if ((from + len) > mtd->size) {
|
|
+ pr_debug("%s: attempt to write beyond end of device\n",
|
|
+ __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return nmbm_read_range(nm->ni, from, len, buf, MTD_OPS_PLACE_OOB,
|
|
+ retlen);
|
|
+}
|
|
+
|
|
+static int nmbm_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
|
|
+ size_t *retlen, const u_char *buf)
|
|
+{
|
|
+ struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
|
|
+
|
|
+ /* Do not allow write past end of device */
|
|
+ if ((to + len) > mtd->size) {
|
|
+ pr_debug("%s: attempt to write beyond end of device\n",
|
|
+ __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return nmbm_write_range(nm->ni, to, len, buf, MTD_OPS_PLACE_OOB,
|
|
+ retlen);
|
|
+}
|
|
+
|
|
+static int nmbm_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
|
|
+{
|
|
+ struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
|
|
+ int ret;
|
|
+
|
|
+ instr->state = MTD_ERASING;
|
|
+
|
|
+ ret = nmbm_erase_block_range(nm->ni, instr->addr, instr->len,
|
|
+ &instr->fail_addr);
|
|
+ if (ret)
|
|
+ instr->state = MTD_ERASE_FAILED;
|
|
+ else
|
|
+ instr->state = MTD_ERASE_DONE;
|
|
+
|
|
+ if (!ret)
|
|
+ /* FIXME */
|
|
+ /* mtd_erase_callback(instr); */
|
|
+ return ret;
|
|
+ else
|
|
+ ret = -EIO;
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int nmbm_mtd_read_data(struct nmbm_mtd *nm, uint64_t addr,
|
|
+ struct mtd_oob_ops *ops, enum nmbm_oob_mode mode)
|
|
+{
|
|
+ size_t len, ooblen, maxooblen, chklen;
|
|
+ uint32_t col, ooboffs;
|
|
+ uint8_t *datcache, *oobcache;
|
|
+ bool has_ecc_err = false;
|
|
+ int ret, max_bitflips = 0;
|
|
+
|
|
+ col = addr & nm->lower->writesize_mask;
|
|
+ addr &= ~nm->lower->writesize_mask;
|
|
+ maxooblen = mtd_oobavail(nm->lower, ops);
|
|
+ ooboffs = ops->ooboffs;
|
|
+ ooblen = ops->ooblen;
|
|
+ len = ops->len;
|
|
+
|
|
+ datcache = len ? nm->page_cache : NULL;
|
|
+ oobcache = ooblen ? nm->page_cache + nm->lower->writesize : NULL;
|
|
+
|
|
+ ops->oobretlen = 0;
|
|
+ ops->retlen = 0;
|
|
+
|
|
+ while (len || ooblen) {
|
|
+ schedule();
|
|
+
|
|
+ ret = nmbm_read_single_page(nm->ni, addr, datcache, oobcache,
|
|
+ mode);
|
|
+ if (ret < 0 && ret != -EBADMSG)
|
|
+ return ret;
|
|
+
|
|
+ /* Continue reading on ecc error */
|
|
+ if (ret == -EBADMSG)
|
|
+ has_ecc_err = true;
|
|
+
|
|
+ /* Record the maximum bitflips between pages */
|
|
+ if (ret > max_bitflips)
|
|
+ max_bitflips = ret;
|
|
+
|
|
+ if (len) {
|
|
+ /* Move data */
|
|
+ chklen = nm->lower->writesize - col;
|
|
+ if (chklen > len)
|
|
+ chklen = len;
|
|
+
|
|
+ memcpy(ops->datbuf + ops->retlen, datcache + col,
|
|
+ chklen);
|
|
+ len -= chklen;
|
|
+ col = 0; /* (col + chklen) % */
|
|
+ ops->retlen += chklen;
|
|
+ }
|
|
+
|
|
+ if (ooblen) {
|
|
+ /* Move oob */
|
|
+ chklen = maxooblen - ooboffs;
|
|
+ if (chklen > ooblen)
|
|
+ chklen = ooblen;
|
|
+
|
|
+ memcpy(ops->oobbuf + ops->oobretlen, oobcache + ooboffs,
|
|
+ chklen);
|
|
+ ooblen -= chklen;
|
|
+ ooboffs = 0; /* (ooboffs + chklen) % maxooblen; */
|
|
+ ops->oobretlen += chklen;
|
|
+ }
|
|
+
|
|
+ addr += nm->lower->writesize;
|
|
+ }
|
|
+
|
|
+ if (has_ecc_err)
|
|
+ return -EBADMSG;
|
|
+
|
|
+ return max_bitflips;
|
|
+}
|
|
+
|
|
+static int nmbm_mtd_read_oob(struct mtd_info *mtd, loff_t from,
|
|
+ struct mtd_oob_ops *ops)
|
|
+{
|
|
+ struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
|
|
+ uint32_t maxooblen;
|
|
+ enum nmbm_oob_mode mode;
|
|
+
|
|
+ if (!ops->oobbuf && !ops->datbuf) {
|
|
+ if (ops->ooblen || ops->len)
|
|
+ return -EINVAL;
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ switch (ops->mode) {
|
|
+ case MTD_OPS_PLACE_OOB:
|
|
+ mode = NMBM_MODE_PLACE_OOB;
|
|
+ break;
|
|
+ case MTD_OPS_AUTO_OOB:
|
|
+ mode = NMBM_MODE_AUTO_OOB;
|
|
+ break;
|
|
+ case MTD_OPS_RAW:
|
|
+ mode = NMBM_MODE_RAW;
|
|
+ break;
|
|
+ default:
|
|
+ pr_debug("%s: unsupported oob mode: %u\n", __func__, ops->mode);
|
|
+ return -ENOTSUPP;
|
|
+ }
|
|
+
|
|
+ maxooblen = mtd_oobavail(mtd, ops);
|
|
+
|
|
+ /* Do not allow read past end of device */
|
|
+ if (ops->datbuf && (from + ops->len) > mtd->size) {
|
|
+ pr_debug("%s: attempt to read beyond end of device\n",
|
|
+ __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (!ops->oobbuf) {
|
|
+ /* Optimized for reading data only */
|
|
+ return nmbm_read_range(nm->ni, from, ops->len, ops->datbuf,
|
|
+ mode, &ops->retlen);
|
|
+ }
|
|
+
|
|
+ if (unlikely(ops->ooboffs >= maxooblen)) {
|
|
+ pr_debug("%s: attempt to start read outside oob\n",
|
|
+ __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (unlikely(from >= mtd->size ||
|
|
+ ops->ooboffs + ops->ooblen > ((mtd->size >> mtd->writesize_shift) -
|
|
+ (from >> mtd->writesize_shift)) * maxooblen)) {
|
|
+ pr_debug("%s: attempt to read beyond end of device\n",
|
|
+ __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return nmbm_mtd_read_data(nm, from, ops, mode);
|
|
+}
|
|
+
|
|
+static int nmbm_mtd_write_data(struct nmbm_mtd *nm, uint64_t addr,
|
|
+ struct mtd_oob_ops *ops, enum nmbm_oob_mode mode)
|
|
+{
|
|
+ size_t len, ooblen, maxooblen, chklen;
|
|
+ uint32_t col, ooboffs;
|
|
+ uint8_t *datcache, *oobcache;
|
|
+ int ret;
|
|
+
|
|
+ col = addr & nm->lower->writesize_mask;
|
|
+ addr &= ~nm->lower->writesize_mask;
|
|
+ maxooblen = mtd_oobavail(nm->lower, ops);
|
|
+ ooboffs = ops->ooboffs;
|
|
+ ooblen = ops->ooblen;
|
|
+ len = ops->len;
|
|
+
|
|
+ datcache = len ? nm->page_cache : NULL;
|
|
+ oobcache = ooblen ? nm->page_cache + nm->lower->writesize : NULL;
|
|
+
|
|
+ ops->oobretlen = 0;
|
|
+ ops->retlen = 0;
|
|
+
|
|
+ while (len || ooblen) {
|
|
+ schedule();
|
|
+
|
|
+ if (len) {
|
|
+ /* Move data */
|
|
+ chklen = nm->lower->writesize - col;
|
|
+ if (chklen > len)
|
|
+ chklen = len;
|
|
+
|
|
+ memset(datcache, 0xff, col);
|
|
+ memcpy(datcache + col, ops->datbuf + ops->retlen,
|
|
+ chklen);
|
|
+ memset(datcache + col + chklen, 0xff,
|
|
+ nm->lower->writesize - col - chklen);
|
|
+ len -= chklen;
|
|
+ col = 0; /* (col + chklen) % */
|
|
+ ops->retlen += chklen;
|
|
+ }
|
|
+
|
|
+ if (ooblen) {
|
|
+ /* Move oob */
|
|
+ chklen = maxooblen - ooboffs;
|
|
+ if (chklen > ooblen)
|
|
+ chklen = ooblen;
|
|
+
|
|
+ memset(oobcache, 0xff, ooboffs);
|
|
+ memcpy(oobcache + ooboffs,
|
|
+ ops->oobbuf + ops->oobretlen, chklen);
|
|
+ memset(oobcache + ooboffs + chklen, 0xff,
|
|
+ nm->lower->oobsize - ooboffs - chklen);
|
|
+ ooblen -= chklen;
|
|
+ ooboffs = 0; /* (ooboffs + chklen) % maxooblen; */
|
|
+ ops->oobretlen += chklen;
|
|
+ }
|
|
+
|
|
+ ret = nmbm_write_single_page(nm->ni, addr, datcache, oobcache,
|
|
+ mode);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ addr += nm->lower->writesize;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int nmbm_mtd_write_oob(struct mtd_info *mtd, loff_t to,
|
|
+ struct mtd_oob_ops *ops)
|
|
+{
|
|
+ struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
|
|
+ enum nmbm_oob_mode mode;
|
|
+ uint32_t maxooblen;
|
|
+
|
|
+ if (!ops->oobbuf && !ops->datbuf) {
|
|
+ if (ops->ooblen || ops->len)
|
|
+ return -EINVAL;
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ switch (ops->mode) {
|
|
+ case MTD_OPS_PLACE_OOB:
|
|
+ mode = NMBM_MODE_PLACE_OOB;
|
|
+ break;
|
|
+ case MTD_OPS_AUTO_OOB:
|
|
+ mode = NMBM_MODE_AUTO_OOB;
|
|
+ break;
|
|
+ case MTD_OPS_RAW:
|
|
+ mode = NMBM_MODE_RAW;
|
|
+ break;
|
|
+ default:
|
|
+ pr_debug("%s: unsupported oob mode: %u\n", __func__,
|
|
+ ops->mode);
|
|
+ return -ENOTSUPP;
|
|
+ }
|
|
+
|
|
+ maxooblen = mtd_oobavail(mtd, ops);
|
|
+
|
|
+ /* Do not allow write past end of device */
|
|
+ if (ops->datbuf && (to + ops->len) > mtd->size) {
|
|
+ pr_debug("%s: attempt to write beyond end of device\n",
|
|
+ __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (!ops->oobbuf) {
|
|
+ /* Optimized for writing data only */
|
|
+ return nmbm_write_range(nm->ni, to, ops->len, ops->datbuf,
|
|
+ mode, &ops->retlen);
|
|
+ }
|
|
+
|
|
+ if (unlikely(ops->ooboffs >= maxooblen)) {
|
|
+ pr_debug("%s: attempt to start write outside oob\n",
|
|
+ __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (unlikely(to >= mtd->size ||
|
|
+ ops->ooboffs + ops->ooblen > ((mtd->size >> mtd->writesize_shift) -
|
|
+ (to >> mtd->writesize_shift)) * maxooblen)) {
|
|
+ pr_debug("%s: attempt to write beyond end of device\n",
|
|
+ __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return nmbm_mtd_write_data(nm, to, ops, mode);
|
|
+}
|
|
+
|
|
+static int nmbm_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
|
|
+{
|
|
+ struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
|
|
+
|
|
+ return nmbm_check_bad_block(nm->ni, offs);
|
|
+}
|
|
+
|
|
+static int nmbm_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
|
|
+{
|
|
+ struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
|
|
+
|
|
+ return nmbm_mark_bad_block(nm->ni, offs);
|
|
+}
|
|
+
|
|
+int nmbm_attach_mtd(struct mtd_info *lower, int flags, uint32_t max_ratio,
|
|
+ uint32_t max_reserved_blocks, struct mtd_info **upper)
|
|
+{
|
|
+ struct nmbm_lower_device nld;
|
|
+ struct nmbm_instance *ni;
|
|
+ struct mtd_info *mtd;
|
|
+ struct nmbm_mtd *nm;
|
|
+ size_t namelen, alloc_size;
|
|
+ int ret;
|
|
+
|
|
+ if (!lower)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (lower->type != MTD_NANDFLASH || lower->flags != MTD_CAP_NANDFLASH)
|
|
+ return -ENOTSUPP;
|
|
+
|
|
+ namelen = strlen(NMBM_UPPER_MTD_NAME) + 16;
|
|
+
|
|
+ nm = calloc(sizeof(*nm) + lower->writesize + lower->oobsize + namelen + 1, 1);
|
|
+ if (!nm)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ nm->lower = lower;
|
|
+ nm->name = (char *)nm + sizeof(*nm);
|
|
+ nm->page_cache = (uint8_t *)nm->name + namelen + 1;
|
|
+
|
|
+ nm->id = nmbm_id_cnt++;
|
|
+ snprintf(nm->name, namelen + 1, "%s%u", NMBM_UPPER_MTD_NAME, nm->id);
|
|
+
|
|
+ memset(&nld, 0, sizeof(nld));
|
|
+
|
|
+ nld.flags = flags;
|
|
+ nld.max_ratio = max_ratio;
|
|
+ nld.max_reserved_blocks = max_reserved_blocks;
|
|
+
|
|
+ nld.size = lower->size;
|
|
+ nld.erasesize = lower->erasesize;
|
|
+ nld.writesize = lower->writesize;
|
|
+ nld.oobsize = lower->oobsize;
|
|
+ nld.oobavail = lower->oobavail;
|
|
+
|
|
+ nld.arg = nm;
|
|
+ nld.read_page = nmbm_lower_read_page;
|
|
+ nld.write_page = nmbm_lower_write_page;
|
|
+ nld.erase_block = nmbm_lower_erase_block;
|
|
+ nld.is_bad_block = nmbm_lower_is_bad_block;
|
|
+ nld.mark_bad_block = nmbm_lower_mark_bad_block;
|
|
+
|
|
+ nld.logprint = nmbm_lower_log;
|
|
+
|
|
+ alloc_size = nmbm_calc_structure_size(&nld);
|
|
+ ni = calloc(alloc_size, 1);
|
|
+ if (!ni) {
|
|
+ free(nm);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ ret = nmbm_attach(&nld, ni);
|
|
+ if (ret) {
|
|
+ free(ni);
|
|
+ free(nm);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ nm->ni = ni;
|
|
+
|
|
+ /* Initialize upper mtd */
|
|
+ mtd = &nm->upper;
|
|
+
|
|
+ mtd->name = nm->name;
|
|
+ mtd->type = MTD_DEV_TYPE_NMBM;
|
|
+ mtd->flags = lower->flags;
|
|
+
|
|
+ mtd->size = (uint64_t)ni->data_block_count * ni->lower.erasesize;
|
|
+ mtd->erasesize = lower->erasesize;
|
|
+ mtd->writesize = lower->writesize;
|
|
+ mtd->writebufsize = lower->writesize;
|
|
+ mtd->oobsize = lower->oobsize;
|
|
+ mtd->oobavail = lower->oobavail;
|
|
+
|
|
+ mtd->erasesize_shift = lower->erasesize_shift;
|
|
+ mtd->writesize_shift = lower->writesize_shift;
|
|
+ mtd->erasesize_mask = lower->erasesize_mask;
|
|
+ mtd->writesize_mask = lower->writesize_mask;
|
|
+
|
|
+ mtd->bitflip_threshold = lower->bitflip_threshold;
|
|
+
|
|
+ /* XXX: should this be duplicated? */
|
|
+ mtd->ooblayout = lower->ooblayout;
|
|
+ mtd->ecclayout = lower->ecclayout;
|
|
+
|
|
+ mtd->ecc_step_size = lower->ecc_step_size;
|
|
+ mtd->ecc_strength = lower->ecc_strength;
|
|
+
|
|
+ mtd->numeraseregions = lower->numeraseregions;
|
|
+ mtd->eraseregions = lower->eraseregions;
|
|
+
|
|
+ mtd->_read = nmbm_mtd_read;
|
|
+ mtd->_write = nmbm_mtd_write;
|
|
+ mtd->_erase = nmbm_mtd_erase;
|
|
+ mtd->_read_oob = nmbm_mtd_read_oob;
|
|
+ mtd->_write_oob = nmbm_mtd_write_oob;
|
|
+ mtd->_block_isbad = nmbm_mtd_block_isbad;
|
|
+ mtd->_block_markbad = nmbm_mtd_block_markbad;
|
|
+
|
|
+ *upper = mtd;
|
|
+
|
|
+ list_add_tail(&nm->node, &nmbm_devs);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int nmbm_free_mtd(struct mtd_info *upper)
|
|
+{
|
|
+ struct nmbm_mtd *pos;
|
|
+
|
|
+ if (!upper)
|
|
+ return -EINVAL;
|
|
+
|
|
+ list_for_each_entry(pos, &nmbm_devs, node) {
|
|
+ if (&pos->upper == upper) {
|
|
+ list_del(&pos->node);
|
|
+
|
|
+ nmbm_detach(pos->ni);
|
|
+ free(pos->ni);
|
|
+ free(pos);
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return -ENODEV;
|
|
+}
|
|
+
|
|
+struct mtd_info *nmbm_mtd_get_upper_by_index(uint32_t index)
|
|
+{
|
|
+ struct nmbm_mtd *nm;
|
|
+
|
|
+ list_for_each_entry(nm, &nmbm_devs, node) {
|
|
+ if (nm->id == index)
|
|
+ return &nm->upper;
|
|
+ }
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+struct mtd_info *nmbm_mtd_get_upper(struct mtd_info *lower)
|
|
+{
|
|
+ struct nmbm_mtd *nm;
|
|
+
|
|
+ list_for_each_entry(nm, &nmbm_devs, node) {
|
|
+ if (nm->lower == lower)
|
|
+ return &nm->upper;
|
|
+ }
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+void nmbm_mtd_list_devices(void)
|
|
+{
|
|
+ struct nmbm_mtd *nm;
|
|
+
|
|
+ printf("Index NMBM device Lower device\n");
|
|
+ printf("========================================\n");
|
|
+
|
|
+ list_for_each_entry(nm, &nmbm_devs, node) {
|
|
+ printf("%-8u%-20s%s\n", nm->id, nm->name, nm->lower->name);
|
|
+ }
|
|
+}
|
|
+
|
|
+int nmbm_mtd_print_info(const char *name)
|
|
+{
|
|
+ struct nmbm_mtd *nm;
|
|
+ bool found = false;
|
|
+
|
|
+ list_for_each_entry(nm, &nmbm_devs, node) {
|
|
+ if (!strcmp(nm->name, name)) {
|
|
+ found = true;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!found) {
|
|
+ printf("Error: NMBM device '%s' not found\n", name);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ printf("%s:\n", name);
|
|
+ printf("Total blocks: %u\n", nm->ni->block_count);
|
|
+ printf("Data blocks: %u\n", nm->ni->data_block_count);
|
|
+ printf("Management start block: %u\n", nm->ni->mgmt_start_ba);
|
|
+ printf("Info table size: 0x%x\n", nm->ni->info_table_size);
|
|
+
|
|
+ if (nm->ni->main_table_ba)
|
|
+ printf("Main info table start block: %u\n", nm->ni->main_table_ba);
|
|
+ else
|
|
+ printf("Main info table start block: Not exist\n");
|
|
+
|
|
+ if (nm->ni->backup_table_ba)
|
|
+ printf("Backup info table start block: %u\n", nm->ni->backup_table_ba);
|
|
+ else
|
|
+ printf("Backup info table start block: Not exist\n");
|
|
+
|
|
+ printf("Signature block: %u\n", nm->ni->signature_ba);
|
|
+ printf("Mapping blocks top address: %u\n", nm->ni->mapping_blocks_top_ba);
|
|
+ printf("Mapping blocks limit address: %u\n", nm->ni->mapping_blocks_ba);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const char nmbm_block_legends[] = {
|
|
+ [NMBM_BLOCK_GOOD_DATA] = '-',
|
|
+ [NMBM_BLOCK_GOOD_MGMT] = '+',
|
|
+ [NMBM_BLOCK_BAD] = 'B',
|
|
+ [NMBM_BLOCK_MAIN_INFO_TABLE] = 'I',
|
|
+ [NMBM_BLOCK_BACKUP_INFO_TABLE] = 'i',
|
|
+ [NMBM_BLOCK_REMAPPED] = 'M',
|
|
+ [NMBM_BLOCK_SIGNATURE] = 'S',
|
|
+};
|
|
+
|
|
+int nmbm_mtd_print_states(const char *name)
|
|
+{
|
|
+ struct nmbm_mtd *nm;
|
|
+ enum nmmb_block_type bt;
|
|
+ bool found = false;
|
|
+ uint32_t i;
|
|
+
|
|
+ list_for_each_entry(nm, &nmbm_devs, node) {
|
|
+ if (!strcmp(nm->name, name)) {
|
|
+ found = true;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!found) {
|
|
+ printf("Error: NMBM device '%s' not found\n", name);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ printf("Physical blocks:\n");
|
|
+ printf("\n");
|
|
+
|
|
+ printf("Legends:\n");
|
|
+ printf(" - Good data block\n");
|
|
+ printf(" + Good management block\n");
|
|
+ printf(" B Bad block\n");
|
|
+ printf(" I Main info table\n");
|
|
+ printf(" i Backup info table\n");
|
|
+ printf(" M Remapped spare block\n");
|
|
+ printf(" S Signature block\n");
|
|
+ printf("\n");
|
|
+
|
|
+ for (i = 0; i < nm->ni->block_count; i++) {
|
|
+ if (i % 64 == 0)
|
|
+ printf(" ");
|
|
+
|
|
+ bt = nmbm_debug_get_phys_block_type(nm->ni, i);
|
|
+ if (bt < __NMBM_BLOCK_TYPE_MAX)
|
|
+ putc(nmbm_block_legends[bt]);
|
|
+ else
|
|
+ putc('?');
|
|
+
|
|
+ if (i % 64 == 63)
|
|
+ printf("\n");
|
|
+ }
|
|
+
|
|
+ printf("\n");
|
|
+ printf("Logical blocks:\n");
|
|
+ printf("\n");
|
|
+
|
|
+ printf("Legends:\n");
|
|
+ printf(" - Good block\n");
|
|
+ printf(" + Initially remapped block\n");
|
|
+ printf(" M Remapped block\n");
|
|
+ printf(" B Bad/Unmapped block\n");
|
|
+ printf("\n");
|
|
+
|
|
+ for (i = 0; i < nm->ni->data_block_count; i++) {
|
|
+ if (i % 64 == 0)
|
|
+ printf(" ");
|
|
+
|
|
+ if (nm->ni->block_mapping[i] < 0)
|
|
+ putc('B');
|
|
+ else if (nm->ni->block_mapping[i] == i)
|
|
+ putc('-');
|
|
+ else if (nm->ni->block_mapping[i] < nm->ni->data_block_count)
|
|
+ putc('+');
|
|
+ else if (nm->ni->block_mapping[i] > nm->ni->mapping_blocks_top_ba &&
|
|
+ nm->ni->block_mapping[i] < nm->ni->signature_ba)
|
|
+ putc('M');
|
|
+ else
|
|
+ putc('?');
|
|
+
|
|
+ if (i % 64 == 63)
|
|
+ printf("\n");
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int nmbm_mtd_print_bad_blocks(const char *name)
|
|
+{
|
|
+ struct nmbm_mtd *nm;
|
|
+ bool found = false;
|
|
+ uint32_t i;
|
|
+
|
|
+ list_for_each_entry(nm, &nmbm_devs, node) {
|
|
+ if (!strcmp(nm->name, name)) {
|
|
+ found = true;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!found) {
|
|
+ printf("Error: NMBM device '%s' not found\n", name);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ printf("Physical blocks:\n");
|
|
+
|
|
+ for (i = 0; i < nm->ni->block_count; i++) {
|
|
+ switch (nmbm_debug_get_block_state(nm->ni, i)) {
|
|
+ case BLOCK_ST_BAD:
|
|
+ printf("%-12u [0x%08llx] - Bad\n", i,
|
|
+ (uint64_t)i << nm->ni->erasesize_shift);
|
|
+ break;
|
|
+ case BLOCK_ST_NEED_REMAP:
|
|
+ printf("%-12u [0x%08llx] - Awaiting remapping\n", i,
|
|
+ (uint64_t)i << nm->ni->erasesize_shift);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ printf("\n");
|
|
+ printf("Logical blocks:\n");
|
|
+
|
|
+ for (i = 0; i < nm->ni->data_block_count; i++) {
|
|
+ if (nm->ni->block_mapping[i] < 0) {
|
|
+ printf("%-12u [0x%08llx] - Bad\n", i,
|
|
+ (uint64_t)i << nm->ni->erasesize_shift);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int nmbm_mtd_print_mappings(const char *name, int printall)
|
|
+{
|
|
+ struct nmbm_mtd *nm;
|
|
+ bool found = false;
|
|
+ int32_t pb;
|
|
+ uint32_t i;
|
|
+
|
|
+ list_for_each_entry(nm, &nmbm_devs, node) {
|
|
+ if (!strcmp(nm->name, name)) {
|
|
+ found = true;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!found) {
|
|
+ printf("Error: NMBM device '%s' not found\n", name);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ printf("Logical Block Physical Block\n");
|
|
+ printf("==================================\n");
|
|
+
|
|
+ if (!printall) {
|
|
+ for (i = 0; i < nm->ni->data_block_count; i++) {
|
|
+ pb = nm->ni->block_mapping[i];
|
|
+ if (pb < 0)
|
|
+ printf("%-20uUnmapped\n", i);
|
|
+ else if ((uint32_t)pb > nm->ni->mapping_blocks_top_ba &&
|
|
+ (uint32_t)pb < nm->ni->signature_ba)
|
|
+ printf("%-20u%u\n", i, pb);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < nm->ni->data_block_count; i++) {
|
|
+ pb = nm->ni->block_mapping[i];
|
|
+
|
|
+ if (pb >= 0)
|
|
+ printf("%-20u%u\n", i, pb);
|
|
+ else
|
|
+ printf("%-20uUnmapped\n", i);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
--- /dev/null
|
|
+++ b/include/nmbm/nmbm-mtd.h
|
|
@@ -0,0 +1,27 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+/*
|
|
+ * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
|
|
+ *
|
|
+ * Author: Weijie Gao <weijie.gao@mediatek.com>
|
|
+ */
|
|
+
|
|
+#ifndef _NMBM_MTD_H_
|
|
+#define _NMBM_MTD_H_
|
|
+
|
|
+#include <linux/mtd/mtd.h>
|
|
+
|
|
+int nmbm_attach_mtd(struct mtd_info *lower, int flags, uint32_t max_ratio,
|
|
+ uint32_t max_reserved_blocks, struct mtd_info **upper);
|
|
+
|
|
+int nmbm_free_mtd(struct mtd_info *upper);
|
|
+
|
|
+struct mtd_info *nmbm_mtd_get_upper_by_index(uint32_t index);
|
|
+struct mtd_info *nmbm_mtd_get_upper(struct mtd_info *lower);
|
|
+
|
|
+void nmbm_mtd_list_devices(void);
|
|
+int nmbm_mtd_print_info(const char *name);
|
|
+int nmbm_mtd_print_states(const char *name);
|
|
+int nmbm_mtd_print_bad_blocks(const char *name);
|
|
+int nmbm_mtd_print_mappings(const char *name, int printall);
|
|
+
|
|
+#endif /* _NMBM_MTD_H_ */
|