Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/torvalds/linux.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/chips/gen_probe.c9
-rw-r--r--drivers/mtd/devices/mchp23k256.c20
-rw-r--r--drivers/mtd/devices/mchp48l640.c16
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c10
-rw-r--r--drivers/mtd/devices/phram.c12
-rw-r--r--drivers/mtd/devices/spear_smi.c2
-rw-r--r--drivers/mtd/devices/sst25l.c4
-rw-r--r--drivers/mtd/hyperbus/Kconfig2
-rw-r--r--drivers/mtd/hyperbus/rpc-if.c12
-rw-r--r--drivers/mtd/maps/Kconfig6
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c133
-rw-r--r--drivers/mtd/mtd_blkdevs.c27
-rw-r--r--drivers/mtd/mtdchar.c110
-rw-r--r--drivers/mtd/mtdcore.c68
-rw-r--r--drivers/mtd/mtdpart.c2
-rw-r--r--drivers/mtd/mtdswap.c2
-rw-r--r--drivers/mtd/nand/Kconfig7
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/core.c13
-rw-r--r--drivers/mtd/nand/ecc-mxic.c879
-rw-r--r--drivers/mtd/nand/ecc.c119
-rw-r--r--drivers/mtd/nand/onenand/generic.c7
-rw-r--r--drivers/mtd/nand/onenand/onenand_bbt.c4
-rw-r--r--drivers/mtd/nand/raw/Kconfig25
-rw-r--r--drivers/mtd/nand/raw/Makefile1
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c18
-rw-r--r--drivers/mtd/nand/raw/atmel/pmecc.c4
-rw-r--r--drivers/mtd/nand/raw/brcmnand/Makefile2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/bcma_nand.c132
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c164
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.h29
-rw-r--r--drivers/mtd/nand/raw/davinci_nand.c73
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c81
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_ecc.c7
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c7
-rw-r--r--drivers/mtd/nand/raw/ingenic/jz4780_bch.c2
-rw-r--r--drivers/mtd/nand/raw/mpc5121_nfc.c1
-rw-r--r--drivers/mtd/nand/raw/mtk_ecc.c2
-rw-r--r--drivers/mtd/nand/raw/nand_base.c151
-rw-r--r--drivers/mtd/nand/raw/nand_bbt.c3
-rw-r--r--drivers/mtd/nand/raw/nandsim.c47
-rw-r--r--drivers/mtd/nand/raw/omap2.c509
-rw-r--r--drivers/mtd/nand/raw/omap_elm.c20
-rw-r--r--drivers/mtd/nand/raw/pl35x-nand-controller.c2
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c28
-rw-r--r--drivers/mtd/nand/raw/renesas-nand-controller.c1424
-rw-r--r--drivers/mtd/nand/raw/rockchip-nand-controller.c1
-rw-r--r--drivers/mtd/nand/raw/sh_flctl.c2
-rw-r--r--drivers/mtd/nand/raw/sharpsl.c1
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c40
-rw-r--r--drivers/mtd/nand/raw/tegra_nand.c58
-rw-r--r--drivers/mtd/nand/spi/core.c51
-rw-r--r--drivers/mtd/nand/spi/macronix.c2
-rw-r--r--drivers/mtd/parsers/Kconfig2
-rw-r--r--drivers/mtd/parsers/qcomsmempart.c36
-rw-r--r--drivers/mtd/spi-nor/atmel.c138
-rw-r--r--drivers/mtd/spi-nor/catalyst.c21
-rw-r--r--drivers/mtd/spi-nor/controllers/Kconfig36
-rw-r--r--drivers/mtd/spi-nor/controllers/Makefile3
-rw-r--r--drivers/mtd/spi-nor/controllers/aspeed-smc.c15
-rw-r--r--drivers/mtd/spi-nor/controllers/intel-spi-pci.c99
-rw-r--r--drivers/mtd/spi-nor/controllers/intel-spi-platform.c54
-rw-r--r--drivers/mtd/spi-nor/controllers/intel-spi.c968
-rw-r--r--drivers/mtd/spi-nor/controllers/intel-spi.h21
-rw-r--r--drivers/mtd/spi-nor/core.c784
-rw-r--r--drivers/mtd/spi-nor/core.h273
-rw-r--r--drivers/mtd/spi-nor/eon.c39
-rw-r--r--drivers/mtd/spi-nor/esmt.c21
-rw-r--r--drivers/mtd/spi-nor/everspin.c18
-rw-r--r--drivers/mtd/spi-nor/fujitsu.c9
-rw-r--r--drivers/mtd/spi-nor/gigadevice.c65
-rw-r--r--drivers/mtd/spi-nor/intel.c18
-rw-r--r--drivers/mtd/spi-nor/issi.c72
-rw-r--r--drivers/mtd/spi-nor/macronix.c121
-rw-r--r--drivers/mtd/spi-nor/micron-st.c433
-rw-r--r--drivers/mtd/spi-nor/otp.c2
-rw-r--r--drivers/mtd/spi-nor/sfdp.c20
-rw-r--r--drivers/mtd/spi-nor/spansion.c302
-rw-r--r--drivers/mtd/spi-nor/sst.c132
-rw-r--r--drivers/mtd/spi-nor/swp.c2
-rw-r--r--drivers/mtd/spi-nor/winbond.c193
-rw-r--r--drivers/mtd/spi-nor/xilinx.c114
-rw-r--r--drivers/mtd/spi-nor/xmc.c16
-rw-r--r--drivers/mtd/tests/speedtest.c11
-rw-r--r--drivers/mtd/ubi/block.c7
86 files changed, 5136 insertions, 3262 deletions
diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c
index e5bd3c2bc3b2..4d4f97841016 100644
--- a/drivers/mtd/chips/gen_probe.c
+++ b/drivers/mtd/chips/gen_probe.c
@@ -61,8 +61,8 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
struct cfi_private cfi;
struct cfi_private *retcfi;
unsigned long *chip_map;
- int i, j, mapsize;
int max_chips;
+ int i, j;
memset(&cfi, 0, sizeof(cfi));
@@ -111,8 +111,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
max_chips = 1;
}
- mapsize = sizeof(long) * DIV_ROUND_UP(max_chips, BITS_PER_LONG);
- chip_map = kzalloc(mapsize, GFP_KERNEL);
+ chip_map = bitmap_zalloc(max_chips, GFP_KERNEL);
if (!chip_map) {
kfree(cfi.cfiq);
return NULL;
@@ -139,7 +138,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
if (!retcfi) {
kfree(cfi.cfiq);
- kfree(chip_map);
+ bitmap_free(chip_map);
return NULL;
}
@@ -157,7 +156,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
}
}
- kfree(chip_map);
+ bitmap_free(chip_map);
return retcfi;
}
diff --git a/drivers/mtd/devices/mchp23k256.c b/drivers/mtd/devices/mchp23k256.c
index 77c872fd3d83..3a6ea7a6a30c 100644
--- a/drivers/mtd/devices/mchp23k256.c
+++ b/drivers/mtd/devices/mchp23k256.c
@@ -209,11 +209,11 @@ static int mchp23k256_probe(struct spi_device *spi)
return 0;
}
-static int mchp23k256_remove(struct spi_device *spi)
+static void mchp23k256_remove(struct spi_device *spi)
{
struct mchp23k256_flash *flash = spi_get_drvdata(spi);
- return mtd_device_unregister(&flash->mtd);
+ WARN_ON(mtd_device_unregister(&flash->mtd));
}
static const struct of_device_id mchp23k256_of_table[] = {
@@ -229,13 +229,27 @@ static const struct of_device_id mchp23k256_of_table[] = {
};
MODULE_DEVICE_TABLE(of, mchp23k256_of_table);
+static const struct spi_device_id mchp23k256_spi_ids[] = {
+ {
+ .name = "mchp23k256",
+ .driver_data = (kernel_ulong_t)&mchp23k256_caps,
+ },
+ {
+ .name = "mchp23lcv1024",
+ .driver_data = (kernel_ulong_t)&mchp23lcv1024_caps,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, mchp23k256_spi_ids);
+
static struct spi_driver mchp23k256_driver = {
.driver = {
.name = "mchp23k256",
- .of_match_table = of_match_ptr(mchp23k256_of_table),
+ .of_match_table = mchp23k256_of_table,
},
.probe = mchp23k256_probe,
.remove = mchp23k256_remove,
+ .id_table = mchp23k256_spi_ids,
};
module_spi_driver(mchp23k256_driver);
diff --git a/drivers/mtd/devices/mchp48l640.c b/drivers/mtd/devices/mchp48l640.c
index 99400d0fb8c1..40cd5041174c 100644
--- a/drivers/mtd/devices/mchp48l640.c
+++ b/drivers/mtd/devices/mchp48l640.c
@@ -341,11 +341,11 @@ static int mchp48l640_probe(struct spi_device *spi)
return 0;
}
-static int mchp48l640_remove(struct spi_device *spi)
+static void mchp48l640_remove(struct spi_device *spi)
{
struct mchp48l640_flash *flash = spi_get_drvdata(spi);
- return mtd_device_unregister(&flash->mtd);
+ WARN_ON(mtd_device_unregister(&flash->mtd));
}
static const struct of_device_id mchp48l640_of_table[] = {
@@ -357,13 +357,23 @@ static const struct of_device_id mchp48l640_of_table[] = {
};
MODULE_DEVICE_TABLE(of, mchp48l640_of_table);
+static const struct spi_device_id mchp48l640_spi_ids[] = {
+ {
+ .name = "48l640",
+ .driver_data = (kernel_ulong_t)&mchp48l640_caps,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, mchp48l640_spi_ids);
+
static struct spi_driver mchp48l640_driver = {
.driver = {
.name = "mchp48l640",
- .of_match_table = of_match_ptr(mchp48l640_of_table),
+ .of_match_table = mchp48l640_of_table,
},
.probe = mchp48l640_probe,
.remove = mchp48l640_remove,
+ .id_table = mchp48l640_spi_ids,
};
module_spi_driver(mchp48l640_driver);
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 2b317ed6c103..134e27328597 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -916,17 +916,15 @@ static int dataflash_probe(struct spi_device *spi)
return status;
}
-static int dataflash_remove(struct spi_device *spi)
+static void dataflash_remove(struct spi_device *spi)
{
struct dataflash *flash = spi_get_drvdata(spi);
- int status;
dev_dbg(&spi->dev, "remove\n");
- status = mtd_device_unregister(&flash->mtd);
- if (status == 0)
- kfree(flash);
- return status;
+ WARN_ON(mtd_device_unregister(&flash->mtd));
+
+ kfree(flash);
}
static struct spi_driver dataflash_driver = {
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 6ed6c51fac69..d503821a3e60 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -264,16 +264,20 @@ static int phram_setup(const char *val)
}
}
- if (erasesize)
- div_u64_rem(len, (uint32_t)erasesize, &rem);
-
if (len == 0 || erasesize == 0 || erasesize > len
- || erasesize > UINT_MAX || rem) {
+ || erasesize > UINT_MAX) {
parse_err("illegal erasesize or len\n");
ret = -EINVAL;
goto error;
}
+ div_u64_rem(len, (uint32_t)erasesize, &rem);
+ if (rem) {
+ parse_err("len is not multiple of erasesize\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
ret = register_device(name, start, len, (uint32_t)erasesize);
if (ret)
goto error;
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
index 2e00862389dd..24073518587f 100644
--- a/drivers/mtd/devices/spear_smi.c
+++ b/drivers/mtd/devices/spear_smi.c
@@ -969,7 +969,7 @@ static int spear_smi_probe(struct platform_device *pdev)
goto err;
}
- dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_ATOMIC);
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev) {
ret = -ENOMEM;
goto err;
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index b81c3f0b85f9..8813994ce9f4 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -398,11 +398,11 @@ static int sst25l_probe(struct spi_device *spi)
return 0;
}
-static int sst25l_remove(struct spi_device *spi)
+static void sst25l_remove(struct spi_device *spi)
{
struct sst25l_flash *flash = spi_get_drvdata(spi);
- return mtd_device_unregister(&flash->mtd);
+ WARN_ON(mtd_device_unregister(&flash->mtd));
}
static struct spi_driver sst25l_driver = {
diff --git a/drivers/mtd/hyperbus/Kconfig b/drivers/mtd/hyperbus/Kconfig
index 46c7e407e378..30ffc4c16e4d 100644
--- a/drivers/mtd/hyperbus/Kconfig
+++ b/drivers/mtd/hyperbus/Kconfig
@@ -15,7 +15,7 @@ if MTD_HYPERBUS
config HBMC_AM654
tristate "HyperBus controller driver for AM65x SoC"
- depends on ARM64 || COMPILE_TEST
+ depends on ARCH_K3 || COMPILE_TEST
select MULTIPLEXER
imply MUX_MMIO
help
diff --git a/drivers/mtd/hyperbus/rpc-if.c b/drivers/mtd/hyperbus/rpc-if.c
index ecb050ba95cd..6e08ec1d4f09 100644
--- a/drivers/mtd/hyperbus/rpc-if.c
+++ b/drivers/mtd/hyperbus/rpc-if.c
@@ -124,13 +124,17 @@ static int rpcif_hb_probe(struct platform_device *pdev)
if (!hyperbus)
return -ENOMEM;
- rpcif_sw_init(&hyperbus->rpc, pdev->dev.parent);
+ error = rpcif_sw_init(&hyperbus->rpc, pdev->dev.parent);
+ if (error)
+ return error;
platform_set_drvdata(pdev, hyperbus);
rpcif_enable_rpm(&hyperbus->rpc);
- rpcif_hw_init(&hyperbus->rpc, true);
+ error = rpcif_hw_init(&hyperbus->rpc, true);
+ if (error)
+ return error;
hyperbus->hbdev.map.size = hyperbus->rpc.size;
hyperbus->hbdev.map.virt = hyperbus->rpc.dirmap;
@@ -150,9 +154,9 @@ static int rpcif_hb_remove(struct platform_device *pdev)
{
struct rpcif_hyperbus *hyperbus = platform_get_drvdata(pdev);
int error = hyperbus_unregister_device(&hyperbus->hbdev);
- struct rpcif *rpc = dev_get_drvdata(pdev->dev.parent);
- rpcif_disable_rpm(rpc);
+ rpcif_disable_rpm(&hyperbus->rpc);
+
return error;
}
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 4945caa88345..6a099bbcd8be 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -357,12 +357,6 @@ config MTD_INTEL_VR_NOR
Map driver for a NOR flash bank located on the Expansion Bus of the
Intel Vermilion Range chipset.
-config MTD_RBTX4939
- tristate "Map driver for RBTX4939 board"
- depends on TOSHIBA_RBTX4939 && MTD_CFI && MTD_COMPLEX_MAPPINGS
- help
- Map driver for NOR flash chips on RBTX4939 board.
-
config MTD_PLATRAM
tristate "Map driver for platform device RAM (mtd-ram)"
select MTD_RAM
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 11fea9c8d561..2240b100f66a 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -42,6 +42,5 @@ obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o
obj-$(CONFIG_MTD_IXP4XX) += ixp4xx.o
obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o
obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o
-obj-$(CONFIG_MTD_RBTX4939) += rbtx4939-flash.o
obj-$(CONFIG_MTD_VMU) += vmu-flash.o
obj-$(CONFIG_MTD_LANTIQ) += lantiq-flash.o
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
deleted file mode 100644
index 39c86c0b0ec1..000000000000
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ /dev/null
@@ -1,133 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * rbtx4939-flash (based on physmap.c)
- *
- * This is a simplified physmap driver with map_init callback function.
- *
- * Copyright (C) 2009 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <asm/txx9/rbtx4939.h>
-
-struct rbtx4939_flash_info {
- struct mtd_info *mtd;
- struct map_info map;
-};
-
-static int rbtx4939_flash_remove(struct platform_device *dev)
-{
- struct rbtx4939_flash_info *info;
-
- info = platform_get_drvdata(dev);
- if (!info)
- return 0;
-
- if (info->mtd) {
- mtd_device_unregister(info->mtd);
- map_destroy(info->mtd);
- }
- return 0;
-}
-
-static const char * const rom_probe_types[] = {
- "cfi_probe", "jedec_probe", NULL };
-
-static int rbtx4939_flash_probe(struct platform_device *dev)
-{
- struct rbtx4939_flash_data *pdata;
- struct rbtx4939_flash_info *info;
- struct resource *res;
- const char * const *probe_type;
- int err = 0;
- unsigned long size;
-
- pdata = dev_get_platdata(&dev->dev);
- if (!pdata)
- return -ENODEV;
-
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
- info = devm_kzalloc(&dev->dev, sizeof(struct rbtx4939_flash_info),
- GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- platform_set_drvdata(dev, info);
-
- size = resource_size(res);
- pr_notice("rbtx4939 platform flash device: %pR\n", res);
-
- if (!devm_request_mem_region(&dev->dev, res->start, size,
- dev_name(&dev->dev)))
- return -EBUSY;
-
- info->map.name = dev_name(&dev->dev);
- info->map.phys = res->start;
- info->map.size = size;
- info->map.bankwidth = pdata->width;
-
- info->map.virt = devm_ioremap(&dev->dev, info->map.phys, size);
- if (!info->map.virt)
- return -EBUSY;
-
- if (pdata->map_init)
- (*pdata->map_init)(&info->map);
- else
- simple_map_init(&info->map);
-
- probe_type = rom_probe_types;
- for (; !info->mtd && *probe_type; probe_type++)
- info->mtd = do_map_probe(*probe_type, &info->map);
- if (!info->mtd) {
- dev_err(&dev->dev, "map_probe failed\n");
- err = -ENXIO;
- goto err_out;
- }
- info->mtd->dev.parent = &dev->dev;
- err = mtd_device_register(info->mtd, pdata->parts, pdata->nr_parts);
-
- if (err)
- goto err_out;
- return 0;
-
-err_out:
- rbtx4939_flash_remove(dev);
- return err;
-}
-
-#ifdef CONFIG_PM
-static void rbtx4939_flash_shutdown(struct platform_device *dev)
-{
- struct rbtx4939_flash_info *info = platform_get_drvdata(dev);
-
- if (mtd_suspend(info->mtd) == 0)
- mtd_resume(info->mtd);
-}
-#else
-#define rbtx4939_flash_shutdown NULL
-#endif
-
-static struct platform_driver rbtx4939_flash_driver = {
- .probe = rbtx4939_flash_probe,
- .remove = rbtx4939_flash_remove,
- .shutdown = rbtx4939_flash_shutdown,
- .driver = {
- .name = "rbtx4939-flash",
- },
-};
-
-module_platform_driver(rbtx4939_flash_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("RBTX4939 MTD map driver");
-MODULE_ALIAS("platform:rbtx4939-flash");
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 4eaba6f4ec68..64d2b093f114 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -46,23 +46,19 @@ static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
struct mtd_blktrans_dev *dev,
struct request *req)
{
+ struct req_iterator iter;
+ struct bio_vec bvec;
unsigned long block, nsect;
char *buf;
block = blk_rq_pos(req) << 9 >> tr->blkshift;
nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
- if (req_op(req) == REQ_OP_FLUSH) {
+ switch (req_op(req)) {
+ case REQ_OP_FLUSH:
if (tr->flush(dev))
return BLK_STS_IOERR;
return BLK_STS_OK;
- }
-
- if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
- get_capacity(req->rq_disk))
- return BLK_STS_IOERR;
-
- switch (req_op(req)) {
case REQ_OP_DISCARD:
if (tr->discard(dev, block, nsect))
return BLK_STS_IOERR;
@@ -76,13 +72,17 @@ static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
}
}
kunmap(bio_page(req->bio));
- rq_flush_dcache_pages(req);
+
+ rq_for_each_segment(bvec, req, iter)
+ flush_dcache_page(bvec.bv_page);
return BLK_STS_OK;
case REQ_OP_WRITE:
if (!tr->writesect)
return BLK_STS_IOERR;
- rq_flush_dcache_pages(req);
+ rq_for_each_segment(bvec, req, iter)
+ flush_dcache_page(bvec.bv_page);
+
buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
if (tr->writesect(dev, block, buf)) {
@@ -158,6 +158,7 @@ static void mtd_blktrans_work(struct mtd_blktrans_dev *dev)
}
background_done = 0;
+ cond_resched();
spin_lock_irq(&dev->queue_lock);
}
}
@@ -346,7 +347,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
gd->minors = 1 << tr->part_bits;
gd->fops = &mtd_block_ops;
- if (tr->part_bits)
+ if (tr->part_bits) {
if (new->devnum < 26)
snprintf(gd->disk_name, sizeof(gd->disk_name),
"%s%c", tr->name, 'a' + new->devnum);
@@ -355,9 +356,11 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
"%s%c%c", tr->name,
'a' - 1 + new->devnum / 26,
'a' + new->devnum % 26);
- else
+ } else {
snprintf(gd->disk_name, sizeof(gd->disk_name),
"%s%d", tr->name, new->devnum);
+ gd->flags |= GENHD_FL_NO_PART;
+ }
set_capacity(gd, ((u64)new->size * tr->blksize) >> 9);
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 155e991d9d75..d0f9c4b0285c 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -573,14 +573,32 @@ static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
}
}
+static void adjust_oob_length(struct mtd_info *mtd, uint64_t start,
+ struct mtd_oob_ops *ops)
+{
+ uint32_t start_page, end_page;
+ u32 oob_per_page;
+
+ if (ops->len == 0 || ops->ooblen == 0)
+ return;
+
+ start_page = mtd_div_by_ws(start, mtd);
+ end_page = mtd_div_by_ws(start + ops->len - 1, mtd);
+ oob_per_page = mtd_oobavail(mtd, ops);
+
+ ops->ooblen = min_t(size_t, ops->ooblen,
+ (end_page - start_page + 1) * oob_per_page);
+}
+
static int mtdchar_write_ioctl(struct mtd_info *mtd,
struct mtd_write_req __user *argp)
{
struct mtd_info *master = mtd_get_master(mtd);
struct mtd_write_req req;
- struct mtd_oob_ops ops = {};
const void __user *usr_data, *usr_oob;
- int ret;
+ uint8_t *datbuf = NULL, *oobbuf = NULL;
+ size_t datbuf_len, oobbuf_len;
+ int ret = 0;
if (copy_from_user(&req, argp, sizeof(req)))
return -EFAULT;
@@ -590,33 +608,79 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
if (!master->_write_oob)
return -EOPNOTSUPP;
- ops.mode = req.mode;
- ops.len = (size_t)req.len;
- ops.ooblen = (size_t)req.ooblen;
- ops.ooboffs = 0;
-
- if (usr_data) {
- ops.datbuf = memdup_user(usr_data, ops.len);
- if (IS_ERR(ops.datbuf))
- return PTR_ERR(ops.datbuf);
- } else {
- ops.datbuf = NULL;
+
+ if (!usr_data)
+ req.len = 0;
+
+ if (!usr_oob)
+ req.ooblen = 0;
+
+ if (req.start + req.len > mtd->size)
+ return -EINVAL;
+
+ datbuf_len = min_t(size_t, req.len, mtd->erasesize);
+ if (datbuf_len > 0) {
+ datbuf = kmalloc(datbuf_len, GFP_KERNEL);
+ if (!datbuf)
+ return -ENOMEM;
}
- if (usr_oob) {
- ops.oobbuf = memdup_user(usr_oob, ops.ooblen);
- if (IS_ERR(ops.oobbuf)) {
- kfree(ops.datbuf);
- return PTR_ERR(ops.oobbuf);
+ oobbuf_len = min_t(size_t, req.ooblen, mtd->erasesize);
+ if (oobbuf_len > 0) {
+ oobbuf = kmalloc(oobbuf_len, GFP_KERNEL);
+ if (!oobbuf) {
+ kfree(datbuf);
+ return -ENOMEM;
}
- } else {
- ops.oobbuf = NULL;
}
- ret = mtd_write_oob(mtd, (loff_t)req.start, &ops);
+ while (req.len > 0 || (!usr_data && req.ooblen > 0)) {
+ struct mtd_oob_ops ops = {
+ .mode = req.mode,
+ .len = min_t(size_t, req.len, datbuf_len),
+ .ooblen = min_t(size_t, req.ooblen, oobbuf_len),
+ .datbuf = datbuf,
+ .oobbuf = oobbuf,
+ };
- kfree(ops.datbuf);
- kfree(ops.oobbuf);
+ /*
+ * Shorten non-page-aligned, eraseblock-sized writes so that
+ * the write ends on an eraseblock boundary. This is necessary
+ * for adjust_oob_length() to properly handle non-page-aligned
+ * writes.
+ */
+ if (ops.len == mtd->erasesize)
+ ops.len -= mtd_mod_by_ws(req.start + ops.len, mtd);
+
+ /*
+ * For writes which are not OOB-only, adjust the amount of OOB
+ * data written according to the number of data pages written.
+ * This is necessary to prevent OOB data from being skipped
+ * over in data+OOB writes requiring multiple mtd_write_oob()
+ * calls to be completed.
+ */
+ adjust_oob_length(mtd, req.start, &ops);
+
+ if (copy_from_user(datbuf, usr_data, ops.len) ||
+ copy_from_user(oobbuf, usr_oob, ops.ooblen)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = mtd_write_oob(mtd, req.start, &ops);
+ if (ret)
+ break;
+
+ req.start += ops.retlen;
+ req.len -= ops.retlen;
+ usr_data += ops.retlen;
+
+ req.ooblen -= ops.oobretlen;
+ usr_oob += ops.oobretlen;
+ }
+
+ kfree(datbuf);
+ kfree(oobbuf);
return ret;
}
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 9186268d361b..7731796024e0 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -336,49 +336,31 @@ static const struct device_type mtd_devtype = {
.release = mtd_release,
};
-static int mtd_partid_debug_show(struct seq_file *s, void *p)
-{
- struct mtd_info *mtd = s->private;
-
- seq_printf(s, "%s\n", mtd->dbg.partid);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(mtd_partid_debug);
+static bool mtd_expert_analysis_mode;
-static int mtd_partname_debug_show(struct seq_file *s, void *p)
+#ifdef CONFIG_DEBUG_FS
+bool mtd_check_expert_analysis_mode(void)
{
- struct mtd_info *mtd = s->private;
+ const char *mtd_expert_analysis_warning =
+ "Bad block checks have been entirely disabled.\n"
+ "This is only reserved for post-mortem forensics and debug purposes.\n"
+ "Never enable this mode if you do not know what you are doing!\n";
- seq_printf(s, "%s\n", mtd->dbg.partname);
-
- return 0;
+ return WARN_ONCE(mtd_expert_analysis_mode, mtd_expert_analysis_warning);
}
-
-DEFINE_SHOW_ATTRIBUTE(mtd_partname_debug);
+EXPORT_SYMBOL_GPL(mtd_check_expert_analysis_mode);
+#endif
static struct dentry *dfs_dir_mtd;
static void mtd_debugfs_populate(struct mtd_info *mtd)
{
- struct mtd_info *master = mtd_get_master(mtd);
struct device *dev = &mtd->dev;
- struct dentry *root;
if (IS_ERR_OR_NULL(dfs_dir_mtd))
return;
- root = debugfs_create_dir(dev_name(dev), dfs_dir_mtd);
- mtd->dbg.dfs_dir = root;
-
- if (master->dbg.partid)
- debugfs_create_file("partid", 0400, root, master,
- &mtd_partid_debug_fops);
-
- if (master->dbg.partname)
- debugfs_create_file("partname", 0400, root, master,
- &mtd_partname_debug_fops);
+ mtd->dbg.dfs_dir = debugfs_create_dir(dev_name(dev), dfs_dir_mtd);
}
#ifndef CONFIG_MMU
@@ -546,6 +528,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd)
config.stride = 1;
config.read_only = true;
config.root_only = true;
+ config.ignore_wp = true;
config.no_of_node = !of_device_is_compatible(node, "nvmem-cells");
config.priv = mtd;
@@ -742,11 +725,13 @@ int del_mtd_device(struct mtd_info *mtd)
debugfs_remove_recursive(mtd->dbg.dfs_dir);
/* Try to remove the NVMEM provider */
- if (mtd->nvmem)
- nvmem_unregister(mtd->nvmem);
+ nvmem_unregister(mtd->nvmem);
device_unregister(&mtd->dev);
+ /* Clear dev so mtd can be safely re-registered later if desired */
+ memset(&mtd->dev, 0, sizeof(mtd->dev));
+
idr_remove(&mtd_idr, mtd->index);
of_node_put(mtd_get_of_node(mtd));
@@ -825,12 +810,12 @@ static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
/* OTP nvmem will be registered on the physical device */
config.dev = mtd->dev.parent;
- /* just reuse the compatible as name */
- config.name = compatible;
+ config.name = kasprintf(GFP_KERNEL, "%s-%s", dev_name(&mtd->dev), compatible);
config.id = NVMEM_DEVID_NONE;
config.owner = THIS_MODULE;
config.type = NVMEM_TYPE_OTP;
config.root_only = true;
+ config.ignore_wp = true;
config.reg_read = reg_read;
config.size = size;
config.of_node = np;
@@ -842,6 +827,7 @@ static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
nvmem = NULL;
of_node_put(np);
+ kfree(config.name);
return nvmem;
}
@@ -918,8 +904,7 @@ static int mtd_otp_nvmem_add(struct mtd_info *mtd)
return 0;
err:
- if (mtd->otp_user_nvmem)
- nvmem_unregister(mtd->otp_user_nvmem);
+ nvmem_unregister(mtd->otp_user_nvmem);
return err;
}
@@ -1018,14 +1003,13 @@ int mtd_device_unregister(struct mtd_info *master)
{
int err;
- if (master->_reboot)
+ if (master->_reboot) {
unregister_reboot_notifier(&master->reboot_notifier);
+ memset(&master->reboot_notifier, 0, sizeof(master->reboot_notifier));
+ }
- if (master->otp_user_nvmem)
- nvmem_unregister(master->otp_user_nvmem);
-
- if (master->otp_factory_nvmem)
- nvmem_unregister(master->otp_factory_nvmem);
+ nvmem_unregister(master->otp_user_nvmem);
+ nvmem_unregister(master->otp_factory_nvmem);
err = del_mtd_partitions(master);
if (err)
@@ -2388,6 +2372,8 @@ static int __init init_mtd(void)
goto out_procfs;
dfs_dir_mtd = debugfs_create_dir("mtd", NULL);
+ debugfs_create_bool("expert_analysis_mode", 0600, dfs_dir_mtd,
+ &mtd_expert_analysis_mode);
return 0;
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 04af12b66110..357661b62c94 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -312,7 +312,7 @@ static int __mtd_del_partition(struct mtd_info *mtd)
if (err)
return err;
- list_del(&child->part.node);
+ list_del(&mtd->part.node);
free_partition(mtd);
return 0;
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index e86b04bc1d6b..dc7f1532a37f 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -19,7 +19,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <linux/genhd.h>
+#include <linux/blkdev.h>
#include <linux/swap.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index b40455234cbd..9b249826ef93 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -46,6 +46,13 @@ config MTD_NAND_ECC_SW_BCH
ECC codes. They are used with NAND devices requiring more than 1 bit
of error correction.
+config MTD_NAND_ECC_MXIC
+ bool "Macronix external hardware ECC engine"
+ depends on HAS_IOMEM
+ select MTD_NAND_ECC
+ help
+ This enables support for the hardware ECC engine from Macronix.
+
endmenu
endmenu
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 1c0b46960eb1..a4e6b7ae0614 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -10,3 +10,4 @@ obj-y += spi/
nandcore-$(CONFIG_MTD_NAND_ECC) += ecc.o
nandcore-$(CONFIG_MTD_NAND_ECC_SW_HAMMING) += ecc-sw-hamming.o
nandcore-$(CONFIG_MTD_NAND_ECC_SW_BCH) += ecc-sw-bch.o
+nandcore-$(CONFIG_MTD_NAND_ECC_MXIC) += ecc-mxic.o
diff --git a/drivers/mtd/nand/core.c b/drivers/mtd/nand/core.c
index 5e13a03d2b32..dbd7b06524b3 100644
--- a/drivers/mtd/nand/core.c
+++ b/drivers/mtd/nand/core.c
@@ -21,6 +21,9 @@
*/
bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos)
{
+ if (mtd_check_expert_analysis_mode())
+ return false;
+
if (nanddev_bbt_is_initialized(nand)) {
unsigned int entry;
int status;
@@ -232,7 +235,9 @@ static int nanddev_get_ecc_engine(struct nand_device *nand)
nand->ecc.engine = nand_ecc_get_on_die_hw_engine(nand);
break;
case NAND_ECC_ENGINE_TYPE_ON_HOST:
- pr_err("On-host hardware ECC engines not supported yet\n");
+ nand->ecc.engine = nand_ecc_get_on_host_hw_engine(nand);
+ if (PTR_ERR(nand->ecc.engine) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
break;
default:
pr_err("Missing ECC engine type\n");
@@ -252,7 +257,7 @@ static int nanddev_put_ecc_engine(struct nand_device *nand)
{
switch (nand->ecc.ctx.conf.engine_type) {
case NAND_ECC_ENGINE_TYPE_ON_HOST:
- pr_err("On-host hardware ECC engines not supported yet\n");
+ nand_ecc_put_on_host_hw_engine(nand);
break;
case NAND_ECC_ENGINE_TYPE_NONE:
case NAND_ECC_ENGINE_TYPE_SOFT:
@@ -297,7 +302,9 @@ int nanddev_ecc_engine_init(struct nand_device *nand)
/* Look for the ECC engine to use */
ret = nanddev_get_ecc_engine(nand);
if (ret) {
- pr_err("No ECC engine found\n");
+ if (ret != -EPROBE_DEFER)
+ pr_err("No ECC engine found\n");
+
return ret;
}
diff --git a/drivers/mtd/nand/ecc-mxic.c b/drivers/mtd/nand/ecc-mxic.c
new file mode 100644
index 000000000000..8afdca731b87
--- /dev/null
+++ b/drivers/mtd/nand/ecc-mxic.c
@@ -0,0 +1,879 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Macronix external hardware ECC engine for NAND devices, also
+ * called DPE for Data Processing Engine.
+ *
+ * Copyright © 2019 Macronix
+ * Author: Miquel Raynal <miquel.raynal@bootlin.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand-ecc-mxic.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* DPE Configuration */
+#define DP_CONFIG 0x00
+#define ECC_EN BIT(0)
+#define ECC_TYP(idx) (((idx) << 3) & GENMASK(6, 3))
+/* DPE Interrupt Status */
+#define INTRPT_STS 0x04
+#define TRANS_CMPLT BIT(0)
+#define SDMA_MAIN BIT(1)
+#define SDMA_SPARE BIT(2)
+#define ECC_ERR BIT(3)
+#define TO_SPARE BIT(4)
+#define TO_MAIN BIT(5)
+/* DPE Interrupt Status Enable */
+#define INTRPT_STS_EN 0x08
+/* DPE Interrupt Signal Enable */
+#define INTRPT_SIG_EN 0x0C
+/* Host Controller Configuration */
+#define HC_CONFIG 0x10
+#define DEV2MEM 0 /* TRANS_TYP_DMA in the spec */
+#define MEM2MEM BIT(4) /* TRANS_TYP_IO in the spec */
+#define MAPPING BIT(5) /* TRANS_TYP_MAPPING in the spec */
+#define ECC_PACKED 0 /* LAYOUT_TYP_INTEGRATED in the spec */
+#define ECC_INTERLEAVED BIT(2) /* LAYOUT_TYP_DISTRIBUTED in the spec */
+#define BURST_TYP_FIXED 0
+#define BURST_TYP_INCREASING BIT(0)
+/* Host Controller Slave Address */
+#define HC_SLV_ADDR 0x14
+/* ECC Chunk Size */
+#define CHUNK_SIZE 0x20
+/* Main Data Size */
+#define MAIN_SIZE 0x24
+/* Spare Data Size */
+#define SPARE_SIZE 0x28
+#define META_SZ(reg) ((reg) & GENMASK(7, 0))
+#define PARITY_SZ(reg) (((reg) & GENMASK(15, 8)) >> 8)
+#define RSV_SZ(reg) (((reg) & GENMASK(23, 16)) >> 16)
+#define SPARE_SZ(reg) ((reg) >> 24)
+/* ECC Chunk Count */
+#define CHUNK_CNT 0x30
+/* SDMA Control */
+#define SDMA_CTRL 0x40
+#define WRITE_NAND 0
+#define READ_NAND BIT(1)
+#define CONT_NAND BIT(29)
+#define CONT_SYSM BIT(30) /* Continue System Memory? */
+#define SDMA_STRT BIT(31)
+/* SDMA Address of Main Data */
+#define SDMA_MAIN_ADDR 0x44
+/* SDMA Address of Spare Data */
+#define SDMA_SPARE_ADDR 0x48
+/* DPE Version Number */
+#define DP_VER 0xD0
+#define DP_VER_OFFSET 16
+
+/* Status bytes between each chunk of spare data */
+#define STAT_BYTES 4
+#define NO_ERR 0x00
+#define MAX_CORR_ERR 0x28
+#define UNCORR_ERR 0xFE
+#define ERASED_CHUNK 0xFF
+
+struct mxic_ecc_engine {
+ struct device *dev;
+ void __iomem *regs;
+ int irq;
+ struct completion complete;
+ struct nand_ecc_engine external_engine;
+ struct nand_ecc_engine pipelined_engine;
+ struct mutex lock;
+};
+
+struct mxic_ecc_ctx {
+ /* ECC machinery */
+ unsigned int data_step_sz;
+ unsigned int oob_step_sz;
+ unsigned int parity_sz;
+ unsigned int meta_sz;
+ u8 *status;
+ int steps;
+
+ /* DMA boilerplate */
+ struct nand_ecc_req_tweak_ctx req_ctx;
+ u8 *oobwithstat;
+ struct scatterlist sg[2];
+ struct nand_page_io_req *req;
+ unsigned int pageoffs;
+};
+
+static struct mxic_ecc_engine *ext_ecc_eng_to_mxic(struct nand_ecc_engine *eng)
+{
+ return container_of(eng, struct mxic_ecc_engine, external_engine);
+}
+
+static struct mxic_ecc_engine *pip_ecc_eng_to_mxic(struct nand_ecc_engine *eng)
+{
+ return container_of(eng, struct mxic_ecc_engine, pipelined_engine);
+}
+
+static struct mxic_ecc_engine *nand_to_mxic(struct nand_device *nand)
+{
+ struct nand_ecc_engine *eng = nand->ecc.engine;
+
+ if (eng->integration == NAND_ECC_ENGINE_INTEGRATION_EXTERNAL)
+ return ext_ecc_eng_to_mxic(eng);
+ else
+ return pip_ecc_eng_to_mxic(eng);
+}
+
+static int mxic_ecc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
+
+ if (section < 0 || section >= ctx->steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * ctx->oob_step_sz) + ctx->meta_sz;
+ oobregion->length = ctx->parity_sz;
+
+ return 0;
+}
+
+static int mxic_ecc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
+
+ if (section < 0 || section >= ctx->steps)
+ return -ERANGE;
+
+ if (!section) {
+ oobregion->offset = 2;
+ oobregion->length = ctx->meta_sz - 2;
+ } else {
+ oobregion->offset = section * ctx->oob_step_sz;
+ oobregion->length = ctx->meta_sz;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops mxic_ecc_ooblayout_ops = {
+ .ecc = mxic_ecc_ooblayout_ecc,
+ .free = mxic_ecc_ooblayout_free,
+};
+
+static void mxic_ecc_disable_engine(struct mxic_ecc_engine *mxic)
+{
+ u32 reg;
+
+ reg = readl(mxic->regs + DP_CONFIG);
+ reg &= ~ECC_EN;
+ writel(reg, mxic->regs + DP_CONFIG);
+}
+
+static void mxic_ecc_enable_engine(struct mxic_ecc_engine *mxic)
+{
+ u32 reg;
+
+ reg = readl(mxic->regs + DP_CONFIG);
+ reg |= ECC_EN;
+ writel(reg, mxic->regs + DP_CONFIG);
+}
+
+static void mxic_ecc_disable_int(struct mxic_ecc_engine *mxic)
+{
+ writel(0, mxic->regs + INTRPT_SIG_EN);
+}
+
+static void mxic_ecc_enable_int(struct mxic_ecc_engine *mxic)
+{
+ writel(TRANS_CMPLT, mxic->regs + INTRPT_SIG_EN);
+}
+
+static irqreturn_t mxic_ecc_isr(int irq, void *dev_id)
+{
+ struct mxic_ecc_engine *mxic = dev_id;
+ u32 sts;
+
+ sts = readl(mxic->regs + INTRPT_STS);
+ if (!sts)
+ return IRQ_NONE;
+
+ if (sts & TRANS_CMPLT)
+ complete(&mxic->complete);
+
+ writel(sts, mxic->regs + INTRPT_STS);
+
+ return IRQ_HANDLED;
+}
+
+static int mxic_ecc_init_ctx(struct nand_device *nand, struct device *dev)
+{
+ struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
+ struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
+ struct nand_ecc_props *reqs = &nand->ecc.requirements;
+ struct nand_ecc_props *user = &nand->ecc.user_conf;
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ int step_size = 0, strength = 0, desired_correction = 0, steps, idx;
+ static const int possible_strength[] = {4, 8, 40, 48};
+ static const int spare_size[] = {32, 32, 96, 96};
+ struct mxic_ecc_ctx *ctx;
+ u32 spare_reg;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ nand->ecc.ctx.priv = ctx;
+
+ /* Only large page NAND chips may use BCH */
+ if (mtd->oobsize < 64) {
+ pr_err("BCH cannot be used with small page NAND chips\n");
+ return -EINVAL;
+ }
+
+ mtd_set_ooblayout(mtd, &mxic_ecc_ooblayout_ops);
+
+ /* Enable all status bits */
+ writel(TRANS_CMPLT | SDMA_MAIN | SDMA_SPARE | ECC_ERR |
+ TO_SPARE | TO_MAIN, mxic->regs + INTRPT_STS_EN);
+
+ /* Configure the correction depending on the NAND device topology */
+ if (user->step_size && user->strength) {
+ step_size = user->step_size;
+ strength = user->strength;
+ } else if (reqs->step_size && reqs->strength) {
+ step_size = reqs->step_size;
+ strength = reqs->strength;
+ }
+
+ if (step_size && strength) {
+ steps = mtd->writesize / step_size;
+ desired_correction = steps * strength;
+ }
+
+ /* Step size is fixed to 1kiB, strength may vary (4 possible values) */
+ conf->step_size = SZ_1K;
+ steps = mtd->writesize / conf->step_size;
+
+ ctx->status = devm_kzalloc(dev, steps * sizeof(u8), GFP_KERNEL);
+ if (!ctx->status)
+ return -ENOMEM;
+
+ if (desired_correction) {
+ strength = desired_correction / steps;
+
+ for (idx = 0; idx < ARRAY_SIZE(possible_strength); idx++)
+ if (possible_strength[idx] >= strength)
+ break;
+
+ idx = min_t(unsigned int, idx,
+ ARRAY_SIZE(possible_strength) - 1);
+ } else {
+ /* Missing data, maximize the correction */
+ idx = ARRAY_SIZE(possible_strength) - 1;
+ }
+
+ /* Tune the selected strength until it fits in the OOB area */
+ for (; idx >= 0; idx--) {
+ if (spare_size[idx] * steps <= mtd->oobsize)
+ break;
+ }
+
+ /* This engine cannot be used with this NAND device */
+ if (idx < 0)
+ return -EINVAL;
+
+ /* Configure the engine for the desired strength */
+ writel(ECC_TYP(idx), mxic->regs + DP_CONFIG);
+ conf->strength = possible_strength[idx];
+ spare_reg = readl(mxic->regs + SPARE_SIZE);
+
+ ctx->steps = steps;
+ ctx->data_step_sz = mtd->writesize / steps;
+ ctx->oob_step_sz = mtd->oobsize / steps;
+ ctx->parity_sz = PARITY_SZ(spare_reg);
+ ctx->meta_sz = META_SZ(spare_reg);
+
+ /* Ensure buffers will contain enough bytes to store the STAT_BYTES */
+ ctx->req_ctx.oob_buffer_size = nanddev_per_page_oobsize(nand) +
+ (ctx->steps * STAT_BYTES);
+ ret = nand_ecc_init_req_tweaking(&ctx->req_ctx, nand);
+ if (ret)
+ return ret;
+
+ ctx->oobwithstat = kmalloc(mtd->oobsize + (ctx->steps * STAT_BYTES),
+ GFP_KERNEL);
+ if (!ctx->oobwithstat) {
+ ret = -ENOMEM;
+ goto cleanup_req_tweak;
+ }
+
+ sg_init_table(ctx->sg, 2);
+
+ /* Configuration dump and sanity checks */
+ dev_err(dev, "DPE version number: %d\n",
+ readl(mxic->regs + DP_VER) >> DP_VER_OFFSET);
+ dev_err(dev, "Chunk size: %d\n", readl(mxic->regs + CHUNK_SIZE));
+ dev_err(dev, "Main size: %d\n", readl(mxic->regs + MAIN_SIZE));
+ dev_err(dev, "Spare size: %d\n", SPARE_SZ(spare_reg));
+ dev_err(dev, "Rsv size: %ld\n", RSV_SZ(spare_reg));
+ dev_err(dev, "Parity size: %d\n", ctx->parity_sz);
+ dev_err(dev, "Meta size: %d\n", ctx->meta_sz);
+
+ if ((ctx->meta_sz + ctx->parity_sz + RSV_SZ(spare_reg)) !=
+ SPARE_SZ(spare_reg)) {
+ dev_err(dev, "Wrong OOB configuration: %d + %d + %ld != %d\n",
+ ctx->meta_sz, ctx->parity_sz, RSV_SZ(spare_reg),
+ SPARE_SZ(spare_reg));
+ ret = -EINVAL;
+ goto free_oobwithstat;
+ }
+
+ if (ctx->oob_step_sz != SPARE_SZ(spare_reg)) {
+ dev_err(dev, "Wrong OOB configuration: %d != %d\n",
+ ctx->oob_step_sz, SPARE_SZ(spare_reg));
+ ret = -EINVAL;
+ goto free_oobwithstat;
+ }
+
+ return 0;
+
+free_oobwithstat:
+ kfree(ctx->oobwithstat);
+cleanup_req_tweak:
+ nand_ecc_cleanup_req_tweaking(&ctx->req_ctx);
+
+ return ret;
+}
+
+static int mxic_ecc_init_ctx_external(struct nand_device *nand)
+{
+ struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
+ struct device *dev = nand->ecc.engine->dev;
+ int ret;
+
+ dev_info(dev, "Macronix ECC engine in external mode\n");
+
+ ret = mxic_ecc_init_ctx(nand, dev);
+ if (ret)
+ return ret;
+
+ /* Trigger each step manually */
+ writel(1, mxic->regs + CHUNK_CNT);
+ writel(BURST_TYP_INCREASING | ECC_PACKED | MEM2MEM,
+ mxic->regs + HC_CONFIG);
+
+ return 0;
+}
+
+static int mxic_ecc_init_ctx_pipelined(struct nand_device *nand)
+{
+ struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
+ struct mxic_ecc_ctx *ctx;
+ struct device *dev;
+ int ret;
+
+ dev = nand_ecc_get_engine_dev(nand->ecc.engine->dev);
+ if (!dev)
+ return -EINVAL;
+
+ dev_info(dev, "Macronix ECC engine in pipelined/mapping mode\n");
+
+ ret = mxic_ecc_init_ctx(nand, dev);
+ if (ret)
+ return ret;
+
+ ctx = nand_to_ecc_ctx(nand);
+
+ /* All steps should be handled in one go directly by the internal DMA */
+ writel(ctx->steps, mxic->regs + CHUNK_CNT);
+
+ /*
+ * Interleaved ECC scheme cannot be used otherwise factory bad block
+ * markers would be lost. A packed layout is mandatory.
+ */
+ writel(BURST_TYP_INCREASING | ECC_PACKED | MAPPING,
+ mxic->regs + HC_CONFIG);
+
+ return 0;
+}
+
+static void mxic_ecc_cleanup_ctx(struct nand_device *nand)
+{
+ struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
+
+ if (ctx) {
+ nand_ecc_cleanup_req_tweaking(&ctx->req_ctx);
+ kfree(ctx->oobwithstat);
+ }
+}
+
+static int mxic_ecc_data_xfer_wait_for_completion(struct mxic_ecc_engine *mxic)
+{
+ u32 val;
+ int ret;
+
+ if (mxic->irq) {
+ reinit_completion(&mxic->complete);
+ mxic_ecc_enable_int(mxic);
+ ret = wait_for_completion_timeout(&mxic->complete,
+ msecs_to_jiffies(1000));
+ mxic_ecc_disable_int(mxic);
+ } else {
+ ret = readl_poll_timeout(mxic->regs + INTRPT_STS, val,
+ val & TRANS_CMPLT, 10, USEC_PER_SEC);
+ writel(val, mxic->regs + INTRPT_STS);
+ }
+
+ if (ret) {
+ dev_err(mxic->dev, "Timeout on data xfer completion\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int mxic_ecc_process_data(struct mxic_ecc_engine *mxic,
+ unsigned int direction)
+{
+ unsigned int dir = (direction == NAND_PAGE_READ) ?
+ READ_NAND : WRITE_NAND;
+ int ret;
+
+ mxic_ecc_enable_engine(mxic);
+
+ /* Trigger processing */
+ writel(SDMA_STRT | dir, mxic->regs + SDMA_CTRL);
+
+ /* Wait for completion */
+ ret = mxic_ecc_data_xfer_wait_for_completion(mxic);
+
+ mxic_ecc_disable_engine(mxic);
+
+ return ret;
+}
+
+int mxic_ecc_process_data_pipelined(struct nand_ecc_engine *eng,
+ unsigned int direction, dma_addr_t dirmap)
+{
+ struct mxic_ecc_engine *mxic = pip_ecc_eng_to_mxic(eng);
+
+ if (dirmap)
+ writel(dirmap, mxic->regs + HC_SLV_ADDR);
+
+ return mxic_ecc_process_data(mxic, direction);
+}
+EXPORT_SYMBOL_GPL(mxic_ecc_process_data_pipelined);
+
+static void mxic_ecc_extract_status_bytes(struct mxic_ecc_ctx *ctx)
+{
+ u8 *buf = ctx->oobwithstat;
+ int next_stat_pos;
+ int step;
+
+ /* Extract the ECC status */
+ for (step = 0; step < ctx->steps; step++) {
+ next_stat_pos = ctx->oob_step_sz +
+ ((STAT_BYTES + ctx->oob_step_sz) * step);
+
+ ctx->status[step] = buf[next_stat_pos];
+ }
+}
+
+static void mxic_ecc_reconstruct_oobbuf(struct mxic_ecc_ctx *ctx,
+ u8 *dst, const u8 *src)
+{
+ int step;
+
+ /* Reconstruct the OOB buffer linearly (without the ECC status bytes) */
+ for (step = 0; step < ctx->steps; step++)
+ memcpy(dst + (step * ctx->oob_step_sz),
+ src + (step * (ctx->oob_step_sz + STAT_BYTES)),
+ ctx->oob_step_sz);
+}
+
+static void mxic_ecc_add_room_in_oobbuf(struct mxic_ecc_ctx *ctx,
+ u8 *dst, const u8 *src)
+{
+ int step;
+
+ /* Add some space in the OOB buffer for the status bytes */
+ for (step = 0; step < ctx->steps; step++)
+ memcpy(dst + (step * (ctx->oob_step_sz + STAT_BYTES)),
+ src + (step * ctx->oob_step_sz),
+ ctx->oob_step_sz);
+}
+
+static int mxic_ecc_count_biterrs(struct mxic_ecc_engine *mxic,
+ struct nand_device *nand)
+{
+ struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ struct device *dev = mxic->dev;
+ unsigned int max_bf = 0;
+ bool failure = false;
+ int step;
+
+ for (step = 0; step < ctx->steps; step++) {
+ u8 stat = ctx->status[step];
+
+ if (stat == NO_ERR) {
+ dev_dbg(dev, "ECC step %d: no error\n", step);
+ } else if (stat == ERASED_CHUNK) {
+ dev_dbg(dev, "ECC step %d: erased\n", step);
+ } else if (stat == UNCORR_ERR || stat > MAX_CORR_ERR) {
+ dev_dbg(dev, "ECC step %d: uncorrectable\n", step);
+ mtd->ecc_stats.failed++;
+ failure = true;
+ } else {
+ dev_dbg(dev, "ECC step %d: %d bits corrected\n",
+ step, stat);
+ max_bf = max_t(unsigned int, max_bf, stat);
+ mtd->ecc_stats.corrected += stat;
+ }
+ }
+
+ return failure ? -EBADMSG : max_bf;
+}
+
+/* External ECC engine helpers */
+static int mxic_ecc_prepare_io_req_external(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
+ struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ int offset, nents, step, ret;
+
+ if (req->mode == MTD_OPS_RAW)
+ return 0;
+
+ nand_ecc_tweak_req(&ctx->req_ctx, req);
+ ctx->req = req;
+
+ if (req->type == NAND_PAGE_READ)
+ return 0;
+
+ mxic_ecc_add_room_in_oobbuf(ctx, ctx->oobwithstat,
+ ctx->req->oobbuf.out);
+
+ sg_set_buf(&ctx->sg[0], req->databuf.out, req->datalen);
+ sg_set_buf(&ctx->sg[1], ctx->oobwithstat,
+ req->ooblen + (ctx->steps * STAT_BYTES));
+
+ nents = dma_map_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
+ if (!nents)
+ return -EINVAL;
+
+ mutex_lock(&mxic->lock);
+
+ for (step = 0; step < ctx->steps; step++) {
+ writel(sg_dma_address(&ctx->sg[0]) + (step * ctx->data_step_sz),
+ mxic->regs + SDMA_MAIN_ADDR);
+ writel(sg_dma_address(&ctx->sg[1]) + (step * (ctx->oob_step_sz + STAT_BYTES)),
+ mxic->regs + SDMA_SPARE_ADDR);
+ ret = mxic_ecc_process_data(mxic, ctx->req->type);
+ if (ret)
+ break;
+ }
+
+ mutex_unlock(&mxic->lock);
+
+ dma_unmap_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
+
+ if (ret)
+ return ret;
+
+ /* Retrieve the calculated ECC bytes */
+ for (step = 0; step < ctx->steps; step++) {
+ offset = ctx->meta_sz + (step * ctx->oob_step_sz);
+ mtd_ooblayout_get_eccbytes(mtd,
+ (u8 *)ctx->req->oobbuf.out + offset,
+ ctx->oobwithstat + (step * STAT_BYTES),
+ step * ctx->parity_sz,
+ ctx->parity_sz);
+ }
+
+ return 0;
+}
+
+static int mxic_ecc_finish_io_req_external(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
+ struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
+ int nents, step, ret;
+
+ if (req->mode == MTD_OPS_RAW)
+ return 0;
+
+ if (req->type == NAND_PAGE_WRITE) {
+ nand_ecc_restore_req(&ctx->req_ctx, req);
+ return 0;
+ }
+
+ /* Copy the OOB buffer and add room for the ECC engine status bytes */
+ mxic_ecc_add_room_in_oobbuf(ctx, ctx->oobwithstat, ctx->req->oobbuf.in);
+
+ sg_set_buf(&ctx->sg[0], req->databuf.in, req->datalen);
+ sg_set_buf(&ctx->sg[1], ctx->oobwithstat,
+ req->ooblen + (ctx->steps * STAT_BYTES));
+ nents = dma_map_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
+ if (!nents)
+ return -EINVAL;
+
+ mutex_lock(&mxic->lock);
+
+ for (step = 0; step < ctx->steps; step++) {
+ writel(sg_dma_address(&ctx->sg[0]) + (step * ctx->data_step_sz),
+ mxic->regs + SDMA_MAIN_ADDR);
+ writel(sg_dma_address(&ctx->sg[1]) + (step * (ctx->oob_step_sz + STAT_BYTES)),
+ mxic->regs + SDMA_SPARE_ADDR);
+ ret = mxic_ecc_process_data(mxic, ctx->req->type);
+ if (ret)
+ break;
+ }
+
+ mutex_unlock(&mxic->lock);
+
+ dma_unmap_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
+
+ if (ret) {
+ nand_ecc_restore_req(&ctx->req_ctx, req);
+ return ret;
+ }
+
+ /* Extract the status bytes and reconstruct the buffer */
+ mxic_ecc_extract_status_bytes(ctx);
+ mxic_ecc_reconstruct_oobbuf(ctx, ctx->req->oobbuf.in, ctx->oobwithstat);
+
+ nand_ecc_restore_req(&ctx->req_ctx, req);
+
+ return mxic_ecc_count_biterrs(mxic, nand);
+}
+
+/* Pipelined ECC engine helpers */
+static int mxic_ecc_prepare_io_req_pipelined(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
+ struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
+ int nents;
+
+ if (req->mode == MTD_OPS_RAW)
+ return 0;
+
+ nand_ecc_tweak_req(&ctx->req_ctx, req);
+ ctx->req = req;
+
+ /* Copy the OOB buffer and add room for the ECC engine status bytes */
+ mxic_ecc_add_room_in_oobbuf(ctx, ctx->oobwithstat, ctx->req->oobbuf.in);
+
+ sg_set_buf(&ctx->sg[0], req->databuf.in, req->datalen);
+ sg_set_buf(&ctx->sg[1], ctx->oobwithstat,
+ req->ooblen + (ctx->steps * STAT_BYTES));
+
+ nents = dma_map_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
+ if (!nents)
+ return -EINVAL;
+
+ mutex_lock(&mxic->lock);
+
+ writel(sg_dma_address(&ctx->sg[0]), mxic->regs + SDMA_MAIN_ADDR);
+ writel(sg_dma_address(&ctx->sg[1]), mxic->regs + SDMA_SPARE_ADDR);
+
+ return 0;
+}
+
+static int mxic_ecc_finish_io_req_pipelined(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
+ struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
+ int ret = 0;
+
+ if (req->mode == MTD_OPS_RAW)
+ return 0;
+
+ mutex_unlock(&mxic->lock);
+
+ dma_unmap_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
+
+ if (req->type == NAND_PAGE_READ) {
+ mxic_ecc_extract_status_bytes(ctx);
+ mxic_ecc_reconstruct_oobbuf(ctx, ctx->req->oobbuf.in,
+ ctx->oobwithstat);
+ ret = mxic_ecc_count_biterrs(mxic, nand);
+ }
+
+ nand_ecc_restore_req(&ctx->req_ctx, req);
+
+ return ret;
+}
+
+static struct nand_ecc_engine_ops mxic_ecc_engine_external_ops = {
+ .init_ctx = mxic_ecc_init_ctx_external,
+ .cleanup_ctx = mxic_ecc_cleanup_ctx,
+ .prepare_io_req = mxic_ecc_prepare_io_req_external,
+ .finish_io_req = mxic_ecc_finish_io_req_external,
+};
+
+static struct nand_ecc_engine_ops mxic_ecc_engine_pipelined_ops = {
+ .init_ctx = mxic_ecc_init_ctx_pipelined,
+ .cleanup_ctx = mxic_ecc_cleanup_ctx,
+ .prepare_io_req = mxic_ecc_prepare_io_req_pipelined,
+ .finish_io_req = mxic_ecc_finish_io_req_pipelined,
+};
+
+struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void)
+{
+ return &mxic_ecc_engine_pipelined_ops;
+}
+EXPORT_SYMBOL_GPL(mxic_ecc_get_pipelined_ops);
+
+static struct platform_device *
+mxic_ecc_get_pdev(struct platform_device *spi_pdev)
+{
+ struct platform_device *eng_pdev;
+ struct device_node *np;
+
+ /* Retrieve the nand-ecc-engine phandle */
+ np = of_parse_phandle(spi_pdev->dev.of_node, "nand-ecc-engine", 0);
+ if (!np)
+ return NULL;
+
+ /* Jump to the engine's device node */
+ eng_pdev = of_find_device_by_node(np);
+ of_node_put(np);
+
+ return eng_pdev;
+}
+
+void mxic_ecc_put_pipelined_engine(struct nand_ecc_engine *eng)
+{
+ struct mxic_ecc_engine *mxic = pip_ecc_eng_to_mxic(eng);
+
+ platform_device_put(to_platform_device(mxic->dev));
+}
+EXPORT_SYMBOL_GPL(mxic_ecc_put_pipelined_engine);
+
+struct nand_ecc_engine *
+mxic_ecc_get_pipelined_engine(struct platform_device *spi_pdev)
+{
+ struct platform_device *eng_pdev;
+ struct mxic_ecc_engine *mxic;
+
+ eng_pdev = mxic_ecc_get_pdev(spi_pdev);
+ if (!eng_pdev)
+ return ERR_PTR(-ENODEV);
+
+ mxic = platform_get_drvdata(eng_pdev);
+ if (!mxic) {
+ platform_device_put(eng_pdev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ return &mxic->pipelined_engine;
+}
+EXPORT_SYMBOL_GPL(mxic_ecc_get_pipelined_engine);
+
+/*
+ * Only the external ECC engine is exported as the pipelined is SoC specific, so
+ * it is registered directly by the drivers that wrap it.
+ */
+static int mxic_ecc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mxic_ecc_engine *mxic;
+ int ret;
+
+ mxic = devm_kzalloc(&pdev->dev, sizeof(*mxic), GFP_KERNEL);
+ if (!mxic)
+ return -ENOMEM;
+
+ mxic->dev = &pdev->dev;
+
+ /*
+ * Both memory regions for the ECC engine itself and the AXI slave
+ * address are mandatory.
+ */
+ mxic->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mxic->regs)) {
+ dev_err(&pdev->dev, "Missing memory region\n");
+ return PTR_ERR(mxic->regs);
+ }
+
+ mxic_ecc_disable_engine(mxic);
+ mxic_ecc_disable_int(mxic);
+
+ /* IRQ is optional yet much more efficient */
+ mxic->irq = platform_get_irq_byname_optional(pdev, "ecc-engine");
+ if (mxic->irq > 0) {
+ ret = devm_request_irq(&pdev->dev, mxic->irq, mxic_ecc_isr, 0,
+ "mxic-ecc", mxic);
+ if (ret)
+ return ret;
+ } else {
+ dev_info(dev, "Invalid or missing IRQ, fallback to polling\n");
+ mxic->irq = 0;
+ }
+
+ mutex_init(&mxic->lock);
+
+ /*
+ * In external mode, the device is the ECC engine. In pipelined mode,
+ * the device is the host controller. The device is used to match the
+ * right ECC engine based on the DT properties.
+ */
+ mxic->external_engine.dev = &pdev->dev;
+ mxic->external_engine.integration = NAND_ECC_ENGINE_INTEGRATION_EXTERNAL;
+ mxic->external_engine.ops = &mxic_ecc_engine_external_ops;
+
+ nand_ecc_register_on_host_hw_engine(&mxic->external_engine);
+
+ platform_set_drvdata(pdev, mxic);
+
+ return 0;
+}
+
+static int mxic_ecc_remove(struct platform_device *pdev)
+{
+ struct mxic_ecc_engine *mxic = platform_get_drvdata(pdev);
+
+ nand_ecc_unregister_on_host_hw_engine(&mxic->external_engine);
+
+ return 0;
+}
+
+static const struct of_device_id mxic_ecc_of_ids[] = {
+ {
+ .compatible = "mxicy,nand-ecc-engine-rev3",
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, mxic_ecc_of_ids);
+
+static struct platform_driver mxic_ecc_driver = {
+ .driver = {
+ .name = "mxic-nand-ecc-engine",
+ .of_match_table = mxic_ecc_of_ids,
+ },
+ .probe = mxic_ecc_probe,
+ .remove = mxic_ecc_remove,
+};
+module_platform_driver(mxic_ecc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
+MODULE_DESCRIPTION("Macronix NAND hardware ECC controller");
diff --git a/drivers/mtd/nand/ecc.c b/drivers/mtd/nand/ecc.c
index 6c43dfda01d4..5250764cedee 100644
--- a/drivers/mtd/nand/ecc.c
+++ b/drivers/mtd/nand/ecc.c
@@ -96,6 +96,12 @@
#include <linux/module.h>
#include <linux/mtd/nand.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+
+static LIST_HEAD(on_host_hw_engines);
+static DEFINE_MUTEX(on_host_hw_engines_mutex);
/**
* nand_ecc_init_ctx - Init the ECC engine context
@@ -611,6 +617,119 @@ struct nand_ecc_engine *nand_ecc_get_on_die_hw_engine(struct nand_device *nand)
}
EXPORT_SYMBOL(nand_ecc_get_on_die_hw_engine);
+int nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine *engine)
+{
+ struct nand_ecc_engine *item;
+
+ if (!engine)
+ return -EINVAL;
+
+ /* Prevent multiple registrations of one engine */
+ list_for_each_entry(item, &on_host_hw_engines, node)
+ if (item == engine)
+ return 0;
+
+ mutex_lock(&on_host_hw_engines_mutex);
+ list_add_tail(&engine->node, &on_host_hw_engines);
+ mutex_unlock(&on_host_hw_engines_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(nand_ecc_register_on_host_hw_engine);
+
+int nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine *engine)
+{
+ if (!engine)
+ return -EINVAL;
+
+ mutex_lock(&on_host_hw_engines_mutex);
+ list_del(&engine->node);
+ mutex_unlock(&on_host_hw_engines_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(nand_ecc_unregister_on_host_hw_engine);
+
+static struct nand_ecc_engine *nand_ecc_match_on_host_hw_engine(struct device *dev)
+{
+ struct nand_ecc_engine *item;
+
+ list_for_each_entry(item, &on_host_hw_engines, node)
+ if (item->dev == dev)
+ return item;
+
+ return NULL;
+}
+
+struct nand_ecc_engine *nand_ecc_get_on_host_hw_engine(struct nand_device *nand)
+{
+ struct nand_ecc_engine *engine = NULL;
+ struct device *dev = &nand->mtd.dev;
+ struct platform_device *pdev;
+ struct device_node *np;
+
+ if (list_empty(&on_host_hw_engines))
+ return NULL;
+
+ /* Check for an explicit nand-ecc-engine property */
+ np = of_parse_phandle(dev->of_node, "nand-ecc-engine", 0);
+ if (np) {
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ engine = nand_ecc_match_on_host_hw_engine(&pdev->dev);
+ platform_device_put(pdev);
+ of_node_put(np);
+
+ if (!engine)
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ if (engine)
+ get_device(engine->dev);
+
+ return engine;
+}
+EXPORT_SYMBOL(nand_ecc_get_on_host_hw_engine);
+
+void nand_ecc_put_on_host_hw_engine(struct nand_device *nand)
+{
+ put_device(nand->ecc.engine->dev);
+}
+EXPORT_SYMBOL(nand_ecc_put_on_host_hw_engine);
+
+/*
+ * In the case of a pipelined engine, the device registering the ECC
+ * engine is not necessarily the ECC engine itself but may be a host controller.
+ * It is then useful to provide a helper to retrieve the right device object
+ * which actually represents the ECC engine.
+ */
+struct device *nand_ecc_get_engine_dev(struct device *host)
+{
+ struct platform_device *ecc_pdev;
+ struct device_node *np;
+
+ /*
+ * If the device node contains this property, it means we need to follow
+ * it in order to get the right ECC engine device we are looking for.
+ */
+ np = of_parse_phandle(host->of_node, "nand-ecc-engine", 0);
+ if (!np)
+ return host;
+
+ ecc_pdev = of_find_device_by_node(np);
+ if (!ecc_pdev) {
+ of_node_put(np);
+ return NULL;
+ }
+
+ platform_device_put(ecc_pdev);
+ of_node_put(np);
+
+ return &ecc_pdev->dev;
+}
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
MODULE_DESCRIPTION("Generic ECC engine");
diff --git a/drivers/mtd/nand/onenand/generic.c b/drivers/mtd/nand/onenand/generic.c
index 8b6f4da5d720..a4b8b65fe15f 100644
--- a/drivers/mtd/nand/onenand/generic.c
+++ b/drivers/mtd/nand/onenand/generic.c
@@ -53,7 +53,12 @@ static int generic_onenand_probe(struct platform_device *pdev)
}
info->onenand.mmcontrol = pdata ? pdata->mmcontrol : NULL;
- info->onenand.irq = platform_get_irq(pdev, 0);
+
+ err = platform_get_irq(pdev, 0);
+ if (err < 0)
+ goto out_iounmap;
+
+ info->onenand.irq = err;
info->mtd.dev.parent = &pdev->dev;
info->mtd.priv = &info->onenand;
diff --git a/drivers/mtd/nand/onenand/onenand_bbt.c b/drivers/mtd/nand/onenand/onenand_bbt.c
index def89f108007..b17315f8e1d4 100644
--- a/drivers/mtd/nand/onenand/onenand_bbt.c
+++ b/drivers/mtd/nand/onenand/onenand_bbt.c
@@ -60,7 +60,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
int i, j, numblocks, len, scanlen;
int startblock;
loff_t from;
- size_t readlen, ooblen;
+ size_t readlen;
struct mtd_oob_ops ops;
int rgn;
@@ -69,7 +69,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
len = 2;
/* We need only read few bytes from the OOB area */
- scanlen = ooblen = 0;
+ scanlen = 0;
readlen = bd->len;
/* chip == -1 case only */
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index 0a45d3c6c15b..9b078e78f3fa 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -40,8 +40,9 @@ config MTD_NAND_AMS_DELTA
config MTD_NAND_OMAP2
tristate "OMAP2, OMAP3, OMAP4 and Keystone NAND controller"
- depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST
+ depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
depends on HAS_IOMEM
+ depends on OMAP_GPMC
help
Support for NAND flash on Texas Instruments OMAP2, OMAP3, OMAP4
and Keystone platforms.
@@ -208,6 +209,19 @@ config MTD_NAND_BRCMNAND
originally designed for Set-Top Box but is used on various BCM7xxx,
BCM3xxx, BCM63xxx, iProc/Cygnus and more.
+if MTD_NAND_BRCMNAND
+
+config MTD_NAND_BRCMNAND_BCMA
+ tristate "Broadcom BCMA NAND controller"
+ depends on BCMA_NFLASH
+ depends on BCMA
+ help
+ Enables the BRCMNAND controller over BCMA on BCM47186/BCM5358 SoCs.
+ The glue driver will take care of performing the low-level I/O
+ operations to interface the BRCMNAND controller over the BCMA bus.
+
+endif # MTD_NAND_BRCMNAND
+
config MTD_NAND_BCM47XXNFLASH
tristate "BCM4706 BCMA NAND controller"
depends on BCMA_NFLASH
@@ -308,7 +322,7 @@ config MTD_NAND_DAVINCI
config MTD_NAND_TXX9NDFMC
tristate "TXx9 NAND controller"
- depends on SOC_TX4938 || SOC_TX4939 || COMPILE_TEST
+ depends on SOC_TX4938 || COMPILE_TEST
depends on HAS_IOMEM
help
This enables the NAND flash controller on the TXx9 SoCs.
@@ -461,6 +475,13 @@ config MTD_NAND_PL35X
Enables support for PrimeCell SMC PL351 and PL353 NAND
controller found on Zynq7000.
+config MTD_NAND_RENESAS
+ tristate "Renesas R-Car Gen3 & RZ/N1 NAND controller"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ help
+ Enables support for the NAND controller found on Renesas R-Car
+ Gen3 and RZ/N1 SoC families.
+
comment "Misc"
config MTD_SM_COMMON
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index 2f97958c3a33..88a566513c56 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_MTD_NAND_ARASAN) += arasan-nand-controller.o
obj-$(CONFIG_MTD_NAND_INTEL_LGM) += intel-nand-controller.o
obj-$(CONFIG_MTD_NAND_ROCKCHIP) += rockchip-nand-controller.o
obj-$(CONFIG_MTD_NAND_PL35X) += pl35x-nand-controller.o
+obj-$(CONFIG_MTD_NAND_RENESAS) += renesas-nand-controller.o
nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_onfi.o
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index f3276ee9e4fe..6ef14442c71a 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -1938,7 +1938,7 @@ static const struct atmel_smc_nand_ebi_csa_cfg sam9x60_ebi_csa = {
.nfd0_on_d16 = AT91_SFR_CCFG_NFD0_ON_D16,
};
-static const struct of_device_id atmel_ebi_csa_regmap_of_ids[] = {
+static const struct of_device_id __maybe_unused atmel_ebi_csa_regmap_of_ids[] = {
{
.compatible = "atmel,at91sam9260-matrix",
.data = &at91sam9260_ebi_csa,
@@ -2060,13 +2060,15 @@ static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
nc->mck = of_clk_get(dev->parent->of_node, 0);
if (IS_ERR(nc->mck)) {
dev_err(dev, "Failed to retrieve MCK clk\n");
- return PTR_ERR(nc->mck);
+ ret = PTR_ERR(nc->mck);
+ goto out_release_dma;
}
np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0);
if (!np) {
dev_err(dev, "Missing or invalid atmel,smc property\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_release_dma;
}
nc->smc = syscon_node_to_regmap(np);
@@ -2074,10 +2076,16 @@ static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
if (IS_ERR(nc->smc)) {
ret = PTR_ERR(nc->smc);
dev_err(dev, "Could not get SMC regmap (err = %d)\n", ret);
- return ret;
+ goto out_release_dma;
}
return 0;
+
+out_release_dma:
+ if (nc->dmac)
+ dma_release_channel(nc->dmac);
+
+ return ret;
}
static int
@@ -2648,7 +2656,7 @@ static SIMPLE_DEV_PM_OPS(atmel_nand_controller_pm_ops, NULL,
static struct platform_driver atmel_nand_controller_driver = {
.driver = {
.name = "atmel-nand-controller",
- .of_match_table = of_match_ptr(atmel_nand_controller_of_ids),
+ .of_match_table = atmel_nand_controller_of_ids,
.pm = &atmel_nand_controller_pm_ops,
},
.probe = atmel_nand_controller_probe,
diff --git a/drivers/mtd/nand/raw/atmel/pmecc.c b/drivers/mtd/nand/raw/atmel/pmecc.c
index 498e41ccabbd..4d7dc8a9c373 100644
--- a/drivers/mtd/nand/raw/atmel/pmecc.c
+++ b/drivers/mtd/nand/raw/atmel/pmecc.c
@@ -920,7 +920,7 @@ static struct atmel_pmecc_caps sama5d2_caps = {
.correct_erased_chunks = true,
};
-static const struct of_device_id atmel_pmecc_legacy_match[] = {
+static const struct of_device_id __maybe_unused atmel_pmecc_legacy_match[] = {
{ .compatible = "atmel,sama5d4-nand", &sama5d4_caps },
{ .compatible = "atmel,sama5d2-nand", &sama5d2_caps },
{ /* sentinel */ }
@@ -1003,7 +1003,7 @@ static int atmel_pmecc_probe(struct platform_device *pdev)
static struct platform_driver atmel_pmecc_driver = {
.driver = {
.name = "atmel-pmecc",
- .of_match_table = of_match_ptr(atmel_pmecc_match),
+ .of_match_table = atmel_pmecc_match,
},
.probe = atmel_pmecc_probe,
};
diff --git a/drivers/mtd/nand/raw/brcmnand/Makefile b/drivers/mtd/nand/raw/brcmnand/Makefile
index 195b845e48b8..16dc7254200e 100644
--- a/drivers/mtd/nand/raw/brcmnand/Makefile
+++ b/drivers/mtd/nand/raw/brcmnand/Makefile
@@ -6,3 +6,5 @@ obj-$(CONFIG_MTD_NAND_BRCMNAND) += bcm63138_nand.o
obj-$(CONFIG_MTD_NAND_BRCMNAND) += bcm6368_nand.o
obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmstb_nand.o
obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand.o
+
+obj-$(CONFIG_MTD_NAND_BRCMNAND_BCMA) += bcma_nand.o
diff --git a/drivers/mtd/nand/raw/brcmnand/bcma_nand.c b/drivers/mtd/nand/raw/brcmnand/bcma_nand.c
new file mode 100644
index 000000000000..dd27977919fb
--- /dev/null
+++ b/drivers/mtd/nand/raw/brcmnand/bcma_nand.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2021 Broadcom
+ */
+#include <linux/bcma/bcma.h>
+#include <linux/bcma/bcma_driver_chipcommon.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "brcmnand.h"
+
+struct brcmnand_bcma_soc {
+ struct brcmnand_soc soc;
+ struct bcma_drv_cc *cc;
+};
+
+static inline bool brcmnand_bcma_needs_swapping(u32 offset)
+{
+ switch (offset) {
+ case BCMA_CC_NAND_SPARE_RD0:
+ case BCMA_CC_NAND_SPARE_RD4:
+ case BCMA_CC_NAND_SPARE_RD8:
+ case BCMA_CC_NAND_SPARE_RD12:
+ case BCMA_CC_NAND_SPARE_WR0:
+ case BCMA_CC_NAND_SPARE_WR4:
+ case BCMA_CC_NAND_SPARE_WR8:
+ case BCMA_CC_NAND_SPARE_WR12:
+ case BCMA_CC_NAND_DEVID:
+ case BCMA_CC_NAND_DEVID_X:
+ case BCMA_CC_NAND_SPARE_RD16:
+ case BCMA_CC_NAND_SPARE_RD20:
+ case BCMA_CC_NAND_SPARE_RD24:
+ case BCMA_CC_NAND_SPARE_RD28:
+ return true;
+ }
+
+ return false;
+}
+
+static inline struct brcmnand_bcma_soc *to_bcma_soc(struct brcmnand_soc *soc)
+{
+ return container_of(soc, struct brcmnand_bcma_soc, soc);
+}
+
+static u32 brcmnand_bcma_read_reg(struct brcmnand_soc *soc, u32 offset)
+{
+ struct brcmnand_bcma_soc *sc = to_bcma_soc(soc);
+ u32 val;
+
+ /* Offset into the NAND block and deal with the flash cache separately */
+ if (offset == BRCMNAND_NON_MMIO_FC_ADDR)
+ offset = BCMA_CC_NAND_CACHE_DATA;
+ else
+ offset += BCMA_CC_NAND_REVISION;
+
+ val = bcma_cc_read32(sc->cc, offset);
+
+ /* Swap if necessary */
+ if (brcmnand_bcma_needs_swapping(offset))
+ val = be32_to_cpu((__force __be32)val);
+ return val;
+}
+
+static void brcmnand_bcma_write_reg(struct brcmnand_soc *soc, u32 val,
+ u32 offset)
+{
+ struct brcmnand_bcma_soc *sc = to_bcma_soc(soc);
+
+ /* Offset into the NAND block */
+ if (offset == BRCMNAND_NON_MMIO_FC_ADDR)
+ offset = BCMA_CC_NAND_CACHE_DATA;
+ else
+ offset += BCMA_CC_NAND_REVISION;
+
+ /* Swap if necessary */
+ if (brcmnand_bcma_needs_swapping(offset))
+ val = (__force u32)cpu_to_be32(val);
+
+ bcma_cc_write32(sc->cc, offset, val);
+}
+
+static struct brcmnand_io_ops brcmnand_bcma_io_ops = {
+ .read_reg = brcmnand_bcma_read_reg,
+ .write_reg = brcmnand_bcma_write_reg,
+};
+
+static void brcmnand_bcma_prepare_data_bus(struct brcmnand_soc *soc, bool prepare,
+ bool is_param)
+{
+ struct brcmnand_bcma_soc *sc = to_bcma_soc(soc);
+
+ /* Reset the cache address to ensure we are already accessing the
+ * beginning of a sub-page.
+ */
+ bcma_cc_write32(sc->cc, BCMA_CC_NAND_CACHE_ADDR, 0);
+}
+
+static int brcmnand_bcma_nand_probe(struct platform_device *pdev)
+{
+ struct bcma_nflash *nflash = dev_get_platdata(&pdev->dev);
+ struct brcmnand_bcma_soc *soc;
+
+ soc = devm_kzalloc(&pdev->dev, sizeof(*soc), GFP_KERNEL);
+ if (!soc)
+ return -ENOMEM;
+
+ soc->cc = container_of(nflash, struct bcma_drv_cc, nflash);
+ soc->soc.prepare_data_bus = brcmnand_bcma_prepare_data_bus;
+ soc->soc.ops = &brcmnand_bcma_io_ops;
+
+ if (soc->cc->core->bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
+ dev_err(&pdev->dev, "Use bcm47xxnflash for 4706!\n");
+ return -ENODEV;
+ }
+
+ return brcmnand_probe(pdev, &soc->soc);
+}
+
+static struct platform_driver brcmnand_bcma_nand_driver = {
+ .probe = brcmnand_bcma_nand_probe,
+ .remove = brcmnand_remove,
+ .driver = {
+ .name = "bcma_brcmnand",
+ .pm = &brcmnand_pm_ops,
+ }
+};
+module_platform_driver(brcmnand_bcma_nand_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("NAND controller driver glue for BCMA chips");
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index f75929783b94..2e9c2e2d9c9f 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -9,6 +9,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/platform_device.h>
+#include <linux/platform_data/brcmnand.h>
#include <linux/err.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
@@ -25,6 +26,7 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
+#include <linux/static_key.h>
#include <linux/list.h>
#include <linux/log2.h>
@@ -207,13 +209,15 @@ enum {
struct brcmnand_host;
+static DEFINE_STATIC_KEY_FALSE(brcmnand_soc_has_ops_key);
+
struct brcmnand_controller {
struct device *dev;
struct nand_controller controller;
void __iomem *nand_base;
void __iomem *nand_fc; /* flash cache */
void __iomem *flash_dma_base;
- unsigned int irq;
+ int irq;
unsigned int dma_irq;
int nand_version;
@@ -592,15 +596,29 @@ enum {
INTFC_CTLR_READY = BIT(31),
};
+static inline bool brcmnand_non_mmio_ops(struct brcmnand_controller *ctrl)
+{
+#if IS_ENABLED(CONFIG_MTD_NAND_BRCMNAND_BCMA)
+ return static_branch_unlikely(&brcmnand_soc_has_ops_key);
+#else
+ return false;
+#endif
+}
+
static inline u32 nand_readreg(struct brcmnand_controller *ctrl, u32 offs)
{
+ if (brcmnand_non_mmio_ops(ctrl))
+ return brcmnand_soc_read(ctrl->soc, offs);
return brcmnand_readl(ctrl->nand_base + offs);
}
static inline void nand_writereg(struct brcmnand_controller *ctrl, u32 offs,
u32 val)
{
- brcmnand_writel(val, ctrl->nand_base + offs);
+ if (brcmnand_non_mmio_ops(ctrl))
+ brcmnand_soc_write(ctrl->soc, val, offs);
+ else
+ brcmnand_writel(val, ctrl->nand_base + offs);
}
static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
@@ -766,13 +784,18 @@ static inline void brcmnand_rmw_reg(struct brcmnand_controller *ctrl,
static inline u32 brcmnand_read_fc(struct brcmnand_controller *ctrl, int word)
{
+ if (brcmnand_non_mmio_ops(ctrl))
+ return brcmnand_soc_read(ctrl->soc, BRCMNAND_NON_MMIO_FC_ADDR);
return __raw_readl(ctrl->nand_fc + word * 4);
}
static inline void brcmnand_write_fc(struct brcmnand_controller *ctrl,
int word, u32 val)
{
- __raw_writel(val, ctrl->nand_fc + word * 4);
+ if (brcmnand_non_mmio_ops(ctrl))
+ brcmnand_soc_write(ctrl->soc, val, BRCMNAND_NON_MMIO_FC_ADDR);
+ else
+ __raw_writel(val, ctrl->nand_fc + word * 4);
}
static inline void edu_writel(struct brcmnand_controller *ctrl,
@@ -897,6 +920,12 @@ static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val)
static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl)
{
+ /* Kludge for the BCMA-based NAND controller which does not actually
+ * shift the command
+ */
+ if (ctrl->nand_version == 0x0304 && brcmnand_non_mmio_ops(ctrl))
+ return 0;
+
if (ctrl->nand_version < 0x0602)
return 24;
return 0;
@@ -1592,7 +1621,7 @@ static bool brcmstb_nand_wait_for_completion(struct nand_chip *chip)
bool err = false;
int sts;
- if (mtd->oops_panic_write) {
+ if (mtd->oops_panic_write || ctrl->irq < 0) {
/* switch to interrupt polling and PIO mode */
disable_ctrl_irqs(ctrl);
sts = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY,
@@ -2106,7 +2135,7 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
mtd->oobsize / trans,
host->hwcfg.sector_size_1k);
- if (!ret) {
+ if (ret != -EBADMSG) {
*err_addr = brcmnand_get_uncorrecc_addr(ctrl);
if (*err_addr)
@@ -2750,33 +2779,27 @@ static const struct nand_controller_ops brcmnand_controller_ops = {
.attach_chip = brcmnand_attach_chip,
};
-static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn)
+static int brcmnand_init_cs(struct brcmnand_host *host,
+ const char * const *part_probe_types)
{
struct brcmnand_controller *ctrl = host->ctrl;
- struct platform_device *pdev = host->pdev;
+ struct device *dev = ctrl->dev;
struct mtd_info *mtd;
struct nand_chip *chip;
int ret;
u16 cfg_offs;
- ret = of_property_read_u32(dn, "reg", &host->cs);
- if (ret) {
- dev_err(&pdev->dev, "can't get chip-select\n");
- return -ENXIO;
- }
-
mtd = nand_to_mtd(&host->chip);
chip = &host->chip;
- nand_set_flash_node(chip, dn);
nand_set_controller_data(chip, host);
- mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "brcmnand.%d",
+ mtd->name = devm_kasprintf(dev, GFP_KERNEL, "brcmnand.%d",
host->cs);
if (!mtd->name)
return -ENOMEM;
mtd->owner = THIS_MODULE;
- mtd->dev.parent = &pdev->dev;
+ mtd->dev.parent = dev;
chip->legacy.cmd_ctrl = brcmnand_cmd_ctrl;
chip->legacy.cmdfunc = brcmnand_cmdfunc;
@@ -2810,7 +2833,7 @@ static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn)
if (ret)
return ret;
- ret = mtd_device_register(mtd, NULL, 0);
+ ret = mtd_device_parse_register(mtd, part_probe_types, NULL, NULL, 0);
if (ret)
nand_cleanup(chip);
@@ -2914,7 +2937,7 @@ const struct dev_pm_ops brcmnand_pm_ops = {
};
EXPORT_SYMBOL_GPL(brcmnand_pm_ops);
-static const struct of_device_id brcmnand_of_match[] = {
+static const struct of_device_id __maybe_unused brcmnand_of_match[] = {
{ .compatible = "brcm,brcmnand-v2.1" },
{ .compatible = "brcm,brcmnand-v2.2" },
{ .compatible = "brcm,brcmnand-v4.0" },
@@ -2979,17 +3002,15 @@ static int brcmnand_edu_setup(struct platform_device *pdev)
int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
{
+ struct brcmnand_platform_data *pd = dev_get_platdata(&pdev->dev);
struct device *dev = &pdev->dev;
struct device_node *dn = dev->of_node, *child;
struct brcmnand_controller *ctrl;
+ struct brcmnand_host *host;
struct resource *res;
int ret;
- /* We only support device-tree instantiation */
- if (!dn)
- return -ENODEV;
-
- if (!of_match_node(brcmnand_of_match, dn))
+ if (dn && !of_match_node(brcmnand_of_match, dn))
return -ENODEV;
ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
@@ -2998,6 +3019,13 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
dev_set_drvdata(dev, ctrl);
ctrl->dev = dev;
+ ctrl->soc = soc;
+
+ /* Enable the static key if the soc provides I/O operations indicating
+ * that a non-memory mapped IO access path must be used
+ */
+ if (brcmnand_soc_has_ops(ctrl->soc))
+ static_branch_enable(&brcmnand_soc_has_ops_key);
init_completion(&ctrl->done);
init_completion(&ctrl->dma_done);
@@ -3009,7 +3037,7 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
/* NAND register range */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ctrl->nand_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(ctrl->nand_base))
+ if (IS_ERR(ctrl->nand_base) && !brcmnand_soc_has_ops(soc))
return PTR_ERR(ctrl->nand_base);
/* Enable clock before using NAND registers */
@@ -3126,40 +3154,33 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
}
/* IRQ */
- ctrl->irq = platform_get_irq(pdev, 0);
- if ((int)ctrl->irq < 0) {
- dev_err(dev, "no IRQ defined\n");
- ret = -ENODEV;
- goto err;
- }
-
- /*
- * Some SoCs integrate this controller (e.g., its interrupt bits) in
- * interesting ways
- */
- if (soc) {
- ctrl->soc = soc;
-
- ret = devm_request_irq(dev, ctrl->irq, brcmnand_irq, 0,
- DRV_NAME, ctrl);
+ ctrl->irq = platform_get_irq_optional(pdev, 0);
+ if (ctrl->irq > 0) {
+ /*
+ * Some SoCs integrate this controller (e.g., its interrupt bits) in
+ * interesting ways
+ */
+ if (soc) {
+ ret = devm_request_irq(dev, ctrl->irq, brcmnand_irq, 0,
+ DRV_NAME, ctrl);
- /* Enable interrupt */
- ctrl->soc->ctlrdy_ack(ctrl->soc);
- ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
- } else {
- /* Use standard interrupt infrastructure */
- ret = devm_request_irq(dev, ctrl->irq, brcmnand_ctlrdy_irq, 0,
- DRV_NAME, ctrl);
- }
- if (ret < 0) {
- dev_err(dev, "can't allocate IRQ %d: error %d\n",
- ctrl->irq, ret);
- goto err;
+ /* Enable interrupt */
+ ctrl->soc->ctlrdy_ack(ctrl->soc);
+ ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
+ } else {
+ /* Use standard interrupt infrastructure */
+ ret = devm_request_irq(dev, ctrl->irq, brcmnand_ctlrdy_irq, 0,
+ DRV_NAME, ctrl);
+ }
+ if (ret < 0) {
+ dev_err(dev, "can't allocate IRQ %d: error %d\n",
+ ctrl->irq, ret);
+ goto err;
+ }
}
for_each_available_child_of_node(dn, child) {
if (of_device_is_compatible(child, "brcm,nandcs")) {
- struct brcmnand_host *host;
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host) {
@@ -3170,7 +3191,16 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
host->pdev = pdev;
host->ctrl = ctrl;
- ret = brcmnand_init_cs(host, child);
+ ret = of_property_read_u32(child, "reg", &host->cs);
+ if (ret) {
+ dev_err(dev, "can't get chip-select\n");
+ devm_kfree(dev, host);
+ continue;
+ }
+
+ nand_set_flash_node(&host->chip, child);
+
+ ret = brcmnand_init_cs(host, NULL);
if (ret) {
devm_kfree(dev, host);
continue; /* Try all chip-selects */
@@ -3180,6 +3210,32 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
}
}
+ if (!list_empty(&ctrl->host_list))
+ return 0;
+
+ if (!pd) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /* If we got there we must have been probing via platform data */
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ host->pdev = pdev;
+ host->ctrl = ctrl;
+ host->cs = pd->chip_select;
+ host->chip.ecc.size = pd->ecc_stepsize;
+ host->chip.ecc.strength = pd->ecc_strength;
+
+ ret = brcmnand_init_cs(host, pd->part_probe_types);
+ if (ret)
+ goto err;
+
+ list_add_tail(&host->node, &ctrl->host_list);
+
/* No chip-selects could initialize properly */
if (list_empty(&ctrl->host_list)) {
ret = -ENODEV;
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.h b/drivers/mtd/nand/raw/brcmnand/brcmnand.h
index eb498fbe505e..f1f93d85f50d 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.h
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.h
@@ -11,12 +11,25 @@
struct platform_device;
struct dev_pm_ops;
+struct brcmnand_io_ops;
+
+/* Special register offset constant to intercept a non-MMIO access
+ * to the flash cache register space. This is intentionally large
+ * not to overlap with an existing offset.
+ */
+#define BRCMNAND_NON_MMIO_FC_ADDR 0xffffffff
struct brcmnand_soc {
bool (*ctlrdy_ack)(struct brcmnand_soc *soc);
void (*ctlrdy_set_enabled)(struct brcmnand_soc *soc, bool en);
void (*prepare_data_bus)(struct brcmnand_soc *soc, bool prepare,
bool is_param);
+ const struct brcmnand_io_ops *ops;
+};
+
+struct brcmnand_io_ops {
+ u32 (*read_reg)(struct brcmnand_soc *soc, u32 offset);
+ void (*write_reg)(struct brcmnand_soc *soc, u32 val, u32 offset);
};
static inline void brcmnand_soc_data_bus_prepare(struct brcmnand_soc *soc,
@@ -58,6 +71,22 @@ static inline void brcmnand_writel(u32 val, void __iomem *addr)
writel_relaxed(val, addr);
}
+static inline bool brcmnand_soc_has_ops(struct brcmnand_soc *soc)
+{
+ return soc && soc->ops && soc->ops->read_reg && soc->ops->write_reg;
+}
+
+static inline u32 brcmnand_soc_read(struct brcmnand_soc *soc, u32 offset)
+{
+ return soc->ops->read_reg(soc, offset);
+}
+
+static inline void brcmnand_soc_write(struct brcmnand_soc *soc, u32 val,
+ u32 offset)
+{
+ soc->ops->write_reg(soc, val, offset);
+}
+
int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc);
int brcmnand_remove(struct platform_device *pdev);
diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c
index 118da9944e3b..45fec8c192ab 100644
--- a/drivers/mtd/nand/raw/davinci_nand.c
+++ b/drivers/mtd/nand/raw/davinci_nand.c
@@ -371,77 +371,6 @@ correct:
return corrected;
}
-/**
- * nand_read_page_hwecc_oob_first - hw ecc, read oob first
- * @chip: nand chip info structure
- * @buf: buffer to store read data
- * @oob_required: caller requires OOB data read to chip->oob_poi
- * @page: page number to read
- *
- * Hardware ECC for large page chips, require OOB to be read first. For this
- * ECC mode, the write_page method is re-used from ECC_HW. These methods
- * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
- * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
- * the data area, by overwriting the NAND manufacturer bad block markings.
- */
-static int nand_davinci_read_page_hwecc_oob_first(struct nand_chip *chip,
- uint8_t *buf,
- int oob_required, int page)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- int i, eccsize = chip->ecc.size, ret;
- int eccbytes = chip->ecc.bytes;
- int eccsteps = chip->ecc.steps;
- uint8_t *p = buf;
- uint8_t *ecc_code = chip->ecc.code_buf;
- uint8_t *ecc_calc = chip->ecc.calc_buf;
- unsigned int max_bitflips = 0;
-
- /* Read the OOB area first */
- ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
- if (ret)
- return ret;
-
- ret = nand_read_page_op(chip, page, 0, NULL, 0);
- if (ret)
- return ret;
-
- ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
- chip->ecc.total);
- if (ret)
- return ret;
-
- for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
- int stat;
-
- chip->ecc.hwctl(chip, NAND_ECC_READ);
-
- ret = nand_read_data_op(chip, p, eccsize, false, false);
- if (ret)
- return ret;
-
- chip->ecc.calculate(chip, p, &ecc_calc[i]);
-
- stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
- if (stat == -EBADMSG &&
- (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
- /* check for empty pages with bitflips */
- stat = nand_check_erased_ecc_chunk(p, eccsize,
- &ecc_code[i],
- eccbytes, NULL, 0,
- chip->ecc.strength);
- }
-
- if (stat < 0) {
- mtd->ecc_stats.failed++;
- } else {
- mtd->ecc_stats.corrected += stat;
- max_bitflips = max_t(unsigned int, max_bitflips, stat);
- }
- }
- return max_bitflips;
-}
-
/*----------------------------------------------------------------------*/
/* An ECC layout for using 4-bit ECC with small-page flash, storing
@@ -651,7 +580,7 @@ static int davinci_nand_attach_chip(struct nand_chip *chip)
} else if (chunks == 4 || chunks == 8) {
mtd_set_ooblayout(mtd,
nand_get_large_page_ooblayout());
- chip->ecc.read_page = nand_davinci_read_page_hwecc_oob_first;
+ chip->ecc.read_page = nand_read_page_hwecc_oob_first;
} else {
return -EIO;
}
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index 10cc71829dcb..44b14c9dc9a7 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -644,10 +644,11 @@ err_out:
* RDN_DELAY = ----------------------- {3}
* RP
*/
-static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
- const struct nand_sdr_timings *sdr)
+static int gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
+ const struct nand_sdr_timings *sdr)
{
struct gpmi_nfc_hardware_timing *hw = &this->hw;
+ struct resources *r = &this->resources;
unsigned int dll_threshold_ps = this->devdata->max_chain_delay;
unsigned int period_ps, reference_period_ps;
unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles;
@@ -656,21 +657,33 @@ static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
int sample_delay_ps, sample_delay_factor;
u16 busy_timeout_cycles;
u8 wrn_dly_sel;
+ unsigned long clk_rate, min_rate;
if (sdr->tRC_min >= 30000) {
/* ONFI non-EDO modes [0-3] */
hw->clk_rate = 22000000;
+ min_rate = 0;
wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
} else if (sdr->tRC_min >= 25000) {
/* ONFI EDO mode 4 */
hw->clk_rate = 80000000;
+ min_rate = 22000000;
wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
} else {
/* ONFI EDO mode 5 */
hw->clk_rate = 100000000;
+ min_rate = 80000000;
wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
}
+ clk_rate = clk_round_rate(r->clock[0], hw->clk_rate);
+ if (clk_rate <= min_rate) {
+ dev_err(this->dev, "clock setting: expected %ld, got %ld\n",
+ hw->clk_rate, clk_rate);
+ return -ENOTSUPP;
+ }
+
+ hw->clk_rate = clk_rate;
/* SDR core timings are given in picoseconds */
period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate);
@@ -711,16 +724,35 @@ static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
hw->ctrl1n |= BF_GPMI_CTRL1_RDN_DELAY(sample_delay_factor) |
BM_GPMI_CTRL1_DLL_ENABLE |
(use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0);
+ return 0;
}
-static void gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
+static int gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
{
struct gpmi_nfc_hardware_timing *hw = &this->hw;
struct resources *r = &this->resources;
void __iomem *gpmi_regs = r->gpmi_regs;
unsigned int dll_wait_time_us;
+ int ret;
+
+ /* Clock dividers do NOT guarantee a clean clock signal on its output
+ * during the change of the divide factor on i.MX6Q/UL/SX. On i.MX7/8,
+ * all clock dividers provide these guarantee.
+ */
+ if (GPMI_IS_MX6Q(this) || GPMI_IS_MX6SX(this))
+ clk_disable_unprepare(r->clock[0]);
- clk_set_rate(r->clock[0], hw->clk_rate);
+ ret = clk_set_rate(r->clock[0], hw->clk_rate);
+ if (ret) {
+ dev_err(this->dev, "cannot set clock rate to %lu Hz: %d\n", hw->clk_rate, ret);
+ return ret;
+ }
+
+ if (GPMI_IS_MX6Q(this) || GPMI_IS_MX6SX(this)) {
+ ret = clk_prepare_enable(r->clock[0]);
+ if (ret)
+ return ret;
+ }
writel(hw->timing0, gpmi_regs + HW_GPMI_TIMING0);
writel(hw->timing1, gpmi_regs + HW_GPMI_TIMING1);
@@ -739,6 +771,8 @@ static void gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
/* Wait for the DLL to settle. */
udelay(dll_wait_time_us);
+
+ return 0;
}
static int gpmi_setup_interface(struct nand_chip *chip, int chipnr,
@@ -746,14 +780,15 @@ static int gpmi_setup_interface(struct nand_chip *chip, int chipnr,
{
struct gpmi_nand_data *this = nand_get_controller_data(chip);
const struct nand_sdr_timings *sdr;
+ int ret;
/* Retrieve required NAND timings */
sdr = nand_get_sdr_timings(conf);
if (IS_ERR(sdr))
return PTR_ERR(sdr);
- /* Only MX6 GPMI controller can reach EDO timings */
- if (sdr->tRC_min <= 25000 && !GPMI_IS_MX6(this))
+ /* Only MX28/MX6 GPMI controller can reach EDO timings */
+ if (sdr->tRC_min <= 25000 && !GPMI_IS_MX28(this) && !GPMI_IS_MX6(this))
return -ENOTSUPP;
/* Stop here if this call was just a check */
@@ -761,7 +796,9 @@ static int gpmi_setup_interface(struct nand_chip *chip, int chipnr,
return 0;
/* Do the actual derivation of the controller timings */
- gpmi_nfc_compute_timings(this, sdr);
+ ret = gpmi_nfc_compute_timings(this, sdr);
+ if (ret)
+ return ret;
this->hw.must_apply_timings = true;
@@ -971,16 +1008,13 @@ static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
{
struct platform_device *pdev = this->pdev;
const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;
- struct resource *r;
int err;
- r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
- if (!r) {
- dev_err(this->dev, "Can't get resource for %s\n", res_name);
- return -ENODEV;
- }
+ err = platform_get_irq_byname(pdev, res_name);
+ if (err < 0)
+ return err;
- err = devm_request_irq(this->dev, r->start, irq_h, 0, res_name, this);
+ err = devm_request_irq(this->dev, err, irq_h, 0, res_name, this);
if (err)
dev_err(this->dev, "error requesting BCH IRQ\n");
@@ -1032,15 +1066,6 @@ static int gpmi_get_clks(struct gpmi_nand_data *this)
r->clock[i] = clk;
}
- if (GPMI_IS_MX6(this))
- /*
- * Set the default value for the gpmi clock.
- *
- * If you want to use the ONFI nand which is in the
- * Synchronous Mode, you should change the clock as you need.
- */
- clk_set_rate(r->clock[0], 22000000);
-
return 0;
err_clock:
@@ -1425,7 +1450,6 @@ static int gpmi_ecc_write_page(struct nand_chip *chip, const uint8_t *buf,
struct mtd_info *mtd = nand_to_mtd(chip);
struct gpmi_nand_data *this = nand_get_controller_data(chip);
struct bch_geometry *nfc_geo = &this->bch_geometry;
- int ret;
dev_dbg(this->dev, "ecc write page.\n");
@@ -1445,9 +1469,7 @@ static int gpmi_ecc_write_page(struct nand_chip *chip, const uint8_t *buf,
this->auxiliary_virt);
}
- ret = nand_prog_page_op(chip, page, 0, buf, nfc_geo->page_size);
-
- return ret;
+ return nand_prog_page_op(chip, page, 0, buf, nfc_geo->page_size);
}
/*
@@ -2278,7 +2300,9 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip,
*/
if (this->hw.must_apply_timings) {
this->hw.must_apply_timings = false;
- gpmi_nfc_apply_timings(this);
+ ret = gpmi_nfc_apply_timings(this);
+ if (ret)
+ goto out_pm;
}
dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs);
@@ -2407,6 +2431,7 @@ unmap:
this->bch = false;
+out_pm:
pm_runtime_mark_last_busy(this->dev);
pm_runtime_put_autosuspend(this->dev);
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
index efe0ffe4f1ab..9054559e52dd 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
@@ -68,9 +68,14 @@ static struct ingenic_ecc *ingenic_ecc_get(struct device_node *np)
struct ingenic_ecc *ecc;
pdev = of_find_device_by_node(np);
- if (!pdev || !platform_get_drvdata(pdev))
+ if (!pdev)
return ERR_PTR(-EPROBE_DEFER);
+ if (!platform_get_drvdata(pdev)) {
+ put_device(&pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
ecc = platform_get_drvdata(pdev);
clk_prepare_enable(ecc->clk);
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
index 0e9d426fe4f2..ff26c10f295d 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
@@ -32,6 +32,7 @@ struct jz_soc_info {
unsigned long addr_offset;
unsigned long cmd_offset;
const struct mtd_ooblayout_ops *oob_layout;
+ bool oob_first;
};
struct ingenic_nand_cs {
@@ -240,6 +241,9 @@ static int ingenic_nand_attach_chip(struct nand_chip *chip)
if (chip->bbt_options & NAND_BBT_USE_FLASH)
chip->bbt_options |= NAND_BBT_NO_OOB;
+ if (nfc->soc_info->oob_first)
+ chip->ecc.read_page = nand_read_page_hwecc_oob_first;
+
/* For legacy reasons we use a different layout on the qi,lb60 board. */
if (of_machine_is_compatible("qi,lb60"))
mtd_set_ooblayout(mtd, &qi_lb60_ooblayout_ops);
@@ -534,6 +538,7 @@ static const struct jz_soc_info jz4740_soc_info = {
.data_offset = 0x00000000,
.cmd_offset = 0x00008000,
.addr_offset = 0x00010000,
+ .oob_first = true,
};
static const struct jz_soc_info jz4725b_soc_info = {
@@ -562,7 +567,7 @@ static struct platform_driver ingenic_nand_driver = {
.remove = ingenic_nand_remove,
.driver = {
.name = DRV_NAME,
- .of_match_table = of_match_ptr(ingenic_nand_dt_match),
+ .of_match_table = ingenic_nand_dt_match,
},
};
module_platform_driver(ingenic_nand_driver);
diff --git a/drivers/mtd/nand/raw/ingenic/jz4780_bch.c b/drivers/mtd/nand/raw/ingenic/jz4780_bch.c
index d67dbfff76cc..12b5b0484fe9 100644
--- a/drivers/mtd/nand/raw/ingenic/jz4780_bch.c
+++ b/drivers/mtd/nand/raw/ingenic/jz4780_bch.c
@@ -260,7 +260,7 @@ static struct platform_driver jz4780_bch_driver = {
.probe = jz4780_bch_probe,
.driver = {
.name = "jz4780-bch",
- .of_match_table = of_match_ptr(jz4780_bch_dt_match),
+ .of_match_table = jz4780_bch_dt_match,
},
};
module_platform_driver(jz4780_bch_driver);
diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c b/drivers/mtd/nand/raw/mpc5121_nfc.c
index cb293c50acb8..5b9271b9c326 100644
--- a/drivers/mtd/nand/raw/mpc5121_nfc.c
+++ b/drivers/mtd/nand/raw/mpc5121_nfc.c
@@ -291,7 +291,6 @@ static int ads5121_chipselect_init(struct mtd_info *mtd)
/* Control chips select signal on ADS5121 board */
static void ads5121_select_chip(struct nand_chip *nand, int chip)
{
- struct mtd_info *mtd = nand_to_mtd(nand);
struct mpc5121_nfc_prv *prv = nand_get_controller_data(nand);
u8 v;
diff --git a/drivers/mtd/nand/raw/mtk_ecc.c b/drivers/mtd/nand/raw/mtk_ecc.c
index 1b47964cb6da..e7df3dac705e 100644
--- a/drivers/mtd/nand/raw/mtk_ecc.c
+++ b/drivers/mtd/nand/raw/mtk_ecc.c
@@ -579,7 +579,7 @@ static struct platform_driver mtk_ecc_driver = {
.probe = mtk_ecc_probe,
.driver = {
.name = "mtk-ecc",
- .of_match_table = of_match_ptr(mtk_ecc_dt_match),
+ .of_match_table = mtk_ecc_dt_match,
#ifdef CONFIG_PM_SLEEP
.pm = &mtk_ecc_pm_ops,
#endif
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index a130320de412..284fff62ac49 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -321,6 +321,9 @@ static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
if (nand_region_is_secured(chip, ofs, mtd->erasesize))
return -EIO;
+ if (mtd_check_expert_analysis_mode())
+ return 0;
+
if (chip->legacy.block_bad)
return chip->legacy.block_bad(chip, ofs);
@@ -335,16 +338,19 @@ static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
*
* Return: -EBUSY if the chip has been suspended, 0 otherwise
*/
-static int nand_get_device(struct nand_chip *chip)
+static void nand_get_device(struct nand_chip *chip)
{
- mutex_lock(&chip->lock);
- if (chip->suspended) {
+ /* Wait until the device is resumed. */
+ while (1) {
+ mutex_lock(&chip->lock);
+ if (!chip->suspended) {
+ mutex_lock(&chip->controller->lock);
+ return;
+ }
mutex_unlock(&chip->lock);
- return -EBUSY;
- }
- mutex_lock(&chip->controller->lock);
- return 0;
+ wait_event(chip->resume_wq, !chip->suspended);
+ }
}
/**
@@ -573,9 +579,7 @@ static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs)
nand_erase_nand(chip, &einfo, 0);
/* Write bad block marker to OOB */
- ret = nand_get_device(chip);
- if (ret)
- return ret;
+ nand_get_device(chip);
ret = nand_markbad_bbm(chip, ofs);
nand_release_device(chip);
@@ -3161,6 +3165,73 @@ static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
}
/**
+ * nand_read_page_hwecc_oob_first - Hardware ECC page read with ECC
+ * data read from OOB area
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Hardware ECC for large page chips, which requires the ECC data to be
+ * extracted from the OOB before the actual data is read.
+ */
+int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int i, eccsize = chip->ecc.size, ret;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
+ unsigned int max_bitflips = 0;
+
+ /* Read the OOB area first */
+ ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+ if (ret)
+ return ret;
+
+ /* Move read cursor to start of page */
+ ret = nand_change_read_column_op(chip, 0, NULL, 0, false);
+ if (ret)
+ return ret;
+
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ chip->ecc.hwctl(chip, NAND_ECC_READ);
+
+ ret = nand_read_data_op(chip, p, eccsize, false, false);
+ if (ret)
+ return ret;
+
+ stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
+ if (stat == -EBADMSG &&
+ (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
+ /* check for empty pages with bitflips */
+ stat = nand_check_erased_ecc_chunk(p, eccsize,
+ &ecc_code[i],
+ eccbytes, NULL, 0,
+ chip->ecc.strength);
+ }
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+ return max_bitflips;
+}
+EXPORT_SYMBOL_GPL(nand_read_page_hwecc_oob_first);
+
+/**
* nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
* @chip: nand chip info structure
* @buf: buffer to store read data
@@ -3756,9 +3827,7 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
ops->mode != MTD_OPS_RAW)
return -ENOTSUPP;
- ret = nand_get_device(chip);
- if (ret)
- return ret;
+ nand_get_device(chip);
if (!ops->datbuf)
ret = nand_do_read_oob(chip, from, ops);
@@ -4345,13 +4414,11 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
struct nand_chip *chip = mtd_to_nand(mtd);
- int ret;
+ int ret = 0;
ops->retlen = 0;
- ret = nand_get_device(chip);
- if (ret)
- return ret;
+ nand_get_device(chip);
switch (ops->mode) {
case MTD_OPS_PLACE_OOB:
@@ -4411,9 +4478,7 @@ int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
return -EIO;
/* Grab the lock and see if the device is available */
- ret = nand_get_device(chip);
- if (ret)
- return ret;
+ nand_get_device(chip);
/* Shift to get first page */
page = (int)(instr->addr >> chip->page_shift);
@@ -4500,7 +4565,7 @@ static void nand_sync(struct mtd_info *mtd)
pr_debug("%s: called\n", __func__);
/* Grab the lock and see if the device is available */
- WARN_ON(nand_get_device(chip));
+ nand_get_device(chip);
/* Release it and go back */
nand_release_device(chip);
}
@@ -4517,9 +4582,7 @@ static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
int ret;
/* Select the NAND device */
- ret = nand_get_device(chip);
- if (ret)
- return ret;
+ nand_get_device(chip);
nand_select_target(chip, chipnr);
@@ -4590,6 +4653,8 @@ static void nand_resume(struct mtd_info *mtd)
__func__);
}
mutex_unlock(&chip->lock);
+
+ wake_up_all(&chip->resume_wq);
}
/**
@@ -5204,25 +5269,24 @@ static void of_get_nand_ecc_legacy_user_config(struct nand_chip *chip)
user_conf->placement = of_get_rawnand_ecc_placement_legacy(dn);
}
-static int of_get_nand_bus_width(struct device_node *np)
+static int of_get_nand_bus_width(struct nand_chip *chip)
{
+ struct device_node *dn = nand_get_flash_node(chip);
u32 val;
+ int ret;
- if (of_property_read_u32(np, "nand-bus-width", &val))
- return 8;
-
- switch (val) {
- case 8:
- case 16:
- return val;
- default:
- return -EIO;
- }
-}
+ ret = of_property_read_u32(dn, "nand-bus-width", &val);
+ if (ret == -EINVAL)
+ /* Buswidth defaults to 8 if the property does not exist .*/
+ return 0;
+ else if (ret)
+ return ret;
-static bool of_get_nand_on_flash_bbt(struct device_node *np)
-{
- return of_property_read_bool(np, "nand-on-flash-bbt");
+ if (val == 16)
+ chip->options |= NAND_BUSWIDTH_16;
+ else if (val != 8)
+ return -EINVAL;
+ return 0;
}
static int of_get_nand_secure_regions(struct nand_chip *chip)
@@ -5298,17 +5362,19 @@ static int rawnand_dt_init(struct nand_chip *chip)
{
struct nand_device *nand = mtd_to_nanddev(nand_to_mtd(chip));
struct device_node *dn = nand_get_flash_node(chip);
+ int ret;
if (!dn)
return 0;
- if (of_get_nand_bus_width(dn) == 16)
- chip->options |= NAND_BUSWIDTH_16;
+ ret = of_get_nand_bus_width(chip);
+ if (ret)
+ return ret;
if (of_property_read_bool(dn, "nand-is-boot-medium"))
chip->options |= NAND_IS_BOOT_MEDIUM;
- if (of_get_nand_on_flash_bbt(dn))
+ if (of_property_read_bool(dn, "nand-on-flash-bbt"))
chip->bbt_options |= NAND_BBT_USE_FLASH;
of_get_nand_ecc_user_config(nand);
@@ -5367,6 +5433,7 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
chip->cur_cs = -1;
mutex_init(&chip->lock);
+ init_waitqueue_head(&chip->resume_wq);
/* Enforce the right timings for reset/detection */
chip->current_interface_config = nand_get_reset_interface_config();
diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c
index b7ad030225f8..a3723da2e0a0 100644
--- a/drivers/mtd/nand/raw/nand_bbt.c
+++ b/drivers/mtd/nand/raw/nand_bbt.c
@@ -1455,6 +1455,9 @@ int nand_isbad_bbt(struct nand_chip *this, loff_t offs, int allowbbt)
pr_debug("nand_isbad_bbt(): bbt info for offs 0x%08x: (block %d) 0x%02x\n",
(unsigned int)offs, block, res);
+ if (mtd_check_expert_analysis_mode())
+ return 0;
+
switch (res) {
case BBT_BLOCK_GOOD:
return 0;
diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c
index 0750121ac371..24beade95c7f 100644
--- a/drivers/mtd/nand/raw/nandsim.c
+++ b/drivers/mtd/nand/raw/nandsim.c
@@ -201,6 +201,9 @@ MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should "
/* Calculate the OOB offset in flash RAM image by (row, column) address */
#define NS_RAW_OFFSET_OOB(ns) (NS_RAW_OFFSET(ns) + ns->geom.pgsz)
+/* Calculate the byte shift in the next page to access */
+#define NS_PAGE_BYTE_SHIFT(ns) ((ns)->regs.column + (ns)->regs.off)
+
/* After a command is input, the simulator goes to one of the following states */
#define STATE_CMD_READ0 0x00000001 /* read data from the beginning of page */
#define STATE_CMD_READ1 0x00000002 /* read data from the second half of page */
@@ -979,15 +982,8 @@ static int ns_read_error(unsigned int page_no)
static int ns_setup_wear_reporting(struct mtd_info *mtd)
{
- size_t mem;
-
wear_eb_count = div_u64(mtd->size, mtd->erasesize);
- mem = wear_eb_count * sizeof(unsigned long);
- if (mem / sizeof(unsigned long) != wear_eb_count) {
- NS_ERR("Too many erase blocks for wear reporting\n");
- return -ENOMEM;
- }
- erase_block_wear = kzalloc(mem, GFP_KERNEL);
+ erase_block_wear = kcalloc(wear_eb_count, sizeof(unsigned long), GFP_KERNEL);
if (!erase_block_wear) {
NS_ERR("Too many erase blocks for wear reporting\n");
return -ENOMEM;
@@ -1389,7 +1385,7 @@ static inline union ns_mem *NS_GET_PAGE(struct nandsim *ns)
*/
static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns)
{
- return NS_GET_PAGE(ns)->byte + ns->regs.column + ns->regs.off;
+ return NS_GET_PAGE(ns)->byte + NS_PAGE_BYTE_SHIFT(ns);
}
static int ns_do_read_error(struct nandsim *ns, int num)
@@ -1415,7 +1411,7 @@ static void ns_do_bit_flips(struct nandsim *ns, int num)
ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
NS_WARN("read_page: flipping bit %d in page %d "
"reading from %d ecc: corrected=%u failed=%u\n",
- pos, ns->regs.row, ns->regs.column + ns->regs.off,
+ pos, ns->regs.row, NS_PAGE_BYTE_SHIFT(ns),
nsmtd->ecc_stats.corrected, nsmtd->ecc_stats.failed);
}
}
@@ -1437,7 +1433,7 @@ static void ns_read_page(struct nandsim *ns, int num)
ssize_t tx;
NS_DBG("read_page: page %d written, reading from %d\n",
- ns->regs.row, ns->regs.column + ns->regs.off);
+ ns->regs.row, NS_PAGE_BYTE_SHIFT(ns));
if (ns_do_read_error(ns, num))
return;
pos = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off;
@@ -1458,7 +1454,7 @@ static void ns_read_page(struct nandsim *ns, int num)
memset(ns->buf.byte, 0xFF, num);
} else {
NS_DBG("read_page: page %d allocated, reading from %d\n",
- ns->regs.row, ns->regs.column + ns->regs.off);
+ ns->regs.row, NS_PAGE_BYTE_SHIFT(ns));
if (ns_do_read_error(ns, num))
return;
memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num);
@@ -1509,7 +1505,7 @@ static int ns_prog_page(struct nandsim *ns, int num)
int all;
NS_DBG("prog_page: writing page %d\n", ns->regs.row);
- pg_off = ns->file_buf + ns->regs.column + ns->regs.off;
+ pg_off = ns->file_buf + NS_PAGE_BYTE_SHIFT(ns);
off = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off;
if (!test_bit(ns->regs.row, ns->pages_written)) {
all = 1;
@@ -1598,7 +1594,7 @@ static int ns_do_state_action(struct nandsim *ns, uint32_t action)
NS_ERR("do_state_action: column number is too large\n");
break;
}
- num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
+ num = ns->geom.pgszoob - NS_PAGE_BYTE_SHIFT(ns);
ns_read_page(ns, num);
NS_DBG("do_state_action: (ACTION_CPY:) copy %d bytes to int buf, raw offset %d\n",
@@ -1666,7 +1662,7 @@ static int ns_do_state_action(struct nandsim *ns, uint32_t action)
return -1;
}
- num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
+ num = ns->geom.pgszoob - NS_PAGE_BYTE_SHIFT(ns);
if (num != ns->regs.count) {
NS_ERR("do_state_action: too few bytes were input (%d instead of %d)\n",
ns->regs.count, num);
@@ -1738,14 +1734,6 @@ static void ns_switch_state(struct nandsim *ns)
"state: %s, nxstate: %s\n",
ns_get_state_name(ns->state),
ns_get_state_name(ns->nxstate));
-
- /* See, whether we need to do some action */
- if ((ns->state & ACTION_MASK) &&
- ns_do_state_action(ns, ns->state) < 0) {
- ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
- return;
- }
-
} else {
/*
* We don't yet know which operation we perform.
@@ -1762,12 +1750,13 @@ static void ns_switch_state(struct nandsim *ns)
if (ns_find_operation(ns, 0))
return;
+ }
- if ((ns->state & ACTION_MASK) &&
- ns_do_state_action(ns, ns->state) < 0) {
- ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
- return;
- }
+ /* See, whether we need to do some action */
+ if ((ns->state & ACTION_MASK) &&
+ ns_do_state_action(ns, ns->state) < 0) {
+ ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
}
/* For 16x devices column means the page offset in words */
@@ -1817,7 +1806,7 @@ static void ns_switch_state(struct nandsim *ns)
switch (NS_STATE(ns->state)) {
case STATE_DATAIN:
case STATE_DATAOUT:
- ns->regs.num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
+ ns->regs.num = ns->geom.pgszoob - NS_PAGE_BYTE_SHIFT(ns);
break;
case STATE_DATAOUT_ID:
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index b26d4947af02..58c32a11792e 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -19,7 +19,7 @@
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/omap-dma.h>
-#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -148,7 +148,6 @@ struct omap_nand_info {
int gpmc_cs;
bool dev_ready;
enum nand_io xfer_type;
- int devsize;
enum omap_ecc ecc_opt;
struct device_node *elm_of_node;
@@ -164,6 +163,7 @@ struct omap_nand_info {
u_char *buf;
int buf_len;
/* Interface to GPMC */
+ void __iomem *fifo;
struct gpmc_nand_regs reg;
struct gpmc_nand_ops *ops;
bool flash_bbt;
@@ -175,6 +175,11 @@ struct omap_nand_info {
unsigned int nsteps_per_eccpg;
unsigned int eccpg_size;
unsigned int eccpg_bytes;
+ void (*data_in)(struct nand_chip *chip, void *buf,
+ unsigned int len, bool force_8bit);
+ void (*data_out)(struct nand_chip *chip,
+ const void *buf, unsigned int len,
+ bool force_8bit);
};
static inline struct omap_nand_info *mtd_to_omap(struct mtd_info *mtd)
@@ -182,6 +187,13 @@ static inline struct omap_nand_info *mtd_to_omap(struct mtd_info *mtd)
return container_of(mtd_to_nand(mtd), struct omap_nand_info, nand);
}
+static void omap_nand_data_in(struct nand_chip *chip, void *buf,
+ unsigned int len, bool force_8bit);
+
+static void omap_nand_data_out(struct nand_chip *chip,
+ const void *buf, unsigned int len,
+ bool force_8bit);
+
/**
* omap_prefetch_enable - configures and starts prefetch transfer
* @cs: cs (chip select) number
@@ -241,169 +253,70 @@ static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
}
/**
- * omap_hwcontrol - hardware specific access to control-lines
- * @chip: NAND chip object
- * @cmd: command to device
- * @ctrl:
- * NAND_NCE: bit 0 -> don't care
- * NAND_CLE: bit 1 -> Command Latch
- * NAND_ALE: bit 2 -> Address Latch
- *
- * NOTE: boards may use different bits for these!!
+ * omap_nand_data_in_pref - NAND data in using prefetch engine
*/
-static void omap_hwcontrol(struct nand_chip *chip, int cmd, unsigned int ctrl)
+static void omap_nand_data_in_pref(struct nand_chip *chip, void *buf,
+ unsigned int len, bool force_8bit)
{
struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
-
- if (cmd != NAND_CMD_NONE) {
- if (ctrl & NAND_CLE)
- writeb(cmd, info->reg.gpmc_nand_command);
-
- else if (ctrl & NAND_ALE)
- writeb(cmd, info->reg.gpmc_nand_address);
-
- else /* NAND_NCE */
- writeb(cmd, info->reg.gpmc_nand_data);
- }
-}
-
-/**
- * omap_read_buf8 - read data from NAND controller into buffer
- * @mtd: MTD device structure
- * @buf: buffer to store date
- * @len: number of bytes to read
- */
-static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
-{
- struct nand_chip *nand = mtd_to_nand(mtd);
-
- ioread8_rep(nand->legacy.IO_ADDR_R, buf, len);
-}
-
-/**
- * omap_write_buf8 - write buffer to NAND controller
- * @mtd: MTD device structure
- * @buf: data buffer
- * @len: number of bytes to write
- */
-static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
-{
- struct omap_nand_info *info = mtd_to_omap(mtd);
- u_char *p = (u_char *)buf;
- bool status;
-
- while (len--) {
- iowrite8(*p++, info->nand.legacy.IO_ADDR_W);
- /* wait until buffer is available for write */
- do {
- status = info->ops->nand_writebuffer_empty();
- } while (!status);
- }
-}
-
-/**
- * omap_read_buf16 - read data from NAND controller into buffer
- * @mtd: MTD device structure
- * @buf: buffer to store date
- * @len: number of bytes to read
- */
-static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
-{
- struct nand_chip *nand = mtd_to_nand(mtd);
-
- ioread16_rep(nand->legacy.IO_ADDR_R, buf, len / 2);
-}
-
-/**
- * omap_write_buf16 - write buffer to NAND controller
- * @mtd: MTD device structure
- * @buf: data buffer
- * @len: number of bytes to write
- */
-static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
-{
- struct omap_nand_info *info = mtd_to_omap(mtd);
- u16 *p = (u16 *) buf;
- bool status;
- /* FIXME try bursts of writesw() or DMA ... */
- len >>= 1;
-
- while (len--) {
- iowrite16(*p++, info->nand.legacy.IO_ADDR_W);
- /* wait until buffer is available for write */
- do {
- status = info->ops->nand_writebuffer_empty();
- } while (!status);
- }
-}
-
-/**
- * omap_read_buf_pref - read data from NAND controller into buffer
- * @chip: NAND chip object
- * @buf: buffer to store date
- * @len: number of bytes to read
- */
-static void omap_read_buf_pref(struct nand_chip *chip, u_char *buf, int len)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct omap_nand_info *info = mtd_to_omap(mtd);
uint32_t r_count = 0;
int ret = 0;
u32 *p = (u32 *)buf;
+ unsigned int pref_len;
- /* take care of subpage reads */
- if (len % 4) {
- if (info->nand.options & NAND_BUSWIDTH_16)
- omap_read_buf16(mtd, buf, len % 4);
- else
- omap_read_buf8(mtd, buf, len % 4);
- p = (u32 *) (buf + len % 4);
- len -= len % 4;
+ if (force_8bit) {
+ omap_nand_data_in(chip, buf, len, force_8bit);
+ return;
}
+ /* read 32-bit words using prefetch and remaining bytes normally */
+
/* configure and start prefetch transfer */
+ pref_len = len - (len & 3);
ret = omap_prefetch_enable(info->gpmc_cs,
- PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0, info);
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x0, pref_len, 0x0, info);
if (ret) {
- /* PFPW engine is busy, use cpu copy method */
- if (info->nand.options & NAND_BUSWIDTH_16)
- omap_read_buf16(mtd, (u_char *)p, len);
- else
- omap_read_buf8(mtd, (u_char *)p, len);
+ /* prefetch engine is busy, use CPU copy method */
+ omap_nand_data_in(chip, buf, len, false);
} else {
do {
r_count = readl(info->reg.gpmc_prefetch_status);
r_count = PREFETCH_STATUS_FIFO_CNT(r_count);
r_count = r_count >> 2;
- ioread32_rep(info->nand.legacy.IO_ADDR_R, p, r_count);
+ ioread32_rep(info->fifo, p, r_count);
p += r_count;
- len -= r_count << 2;
- } while (len);
- /* disable and stop the PFPW engine */
+ pref_len -= r_count << 2;
+ } while (pref_len);
+ /* disable and stop the Prefetch engine */
omap_prefetch_reset(info->gpmc_cs, info);
+ /* fetch any remaining bytes */
+ if (len & 3)
+ omap_nand_data_in(chip, p, len & 3, false);
}
}
/**
- * omap_write_buf_pref - write buffer to NAND controller
- * @chip: NAND chip object
- * @buf: data buffer
- * @len: number of bytes to write
+ * omap_nand_data_out_pref - NAND data out using Write Posting engine
*/
-static void omap_write_buf_pref(struct nand_chip *chip, const u_char *buf,
- int len)
+static void omap_nand_data_out_pref(struct nand_chip *chip,
+ const void *buf, unsigned int len,
+ bool force_8bit)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct omap_nand_info *info = mtd_to_omap(mtd);
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
uint32_t w_count = 0;
int i = 0, ret = 0;
u16 *p = (u16 *)buf;
unsigned long tim, limit;
u32 val;
+ if (force_8bit) {
+ omap_nand_data_out(chip, buf, len, force_8bit);
+ return;
+ }
+
/* take care of subpage writes */
if (len % 2 != 0) {
- writeb(*buf, info->nand.legacy.IO_ADDR_W);
+ writeb(*(u8 *)buf, info->fifo);
p = (u16 *)(buf + 1);
len--;
}
@@ -412,18 +325,15 @@ static void omap_write_buf_pref(struct nand_chip *chip, const u_char *buf,
ret = omap_prefetch_enable(info->gpmc_cs,
PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1, info);
if (ret) {
- /* PFPW engine is busy, use cpu copy method */
- if (info->nand.options & NAND_BUSWIDTH_16)
- omap_write_buf16(mtd, (u_char *)p, len);
- else
- omap_write_buf8(mtd, (u_char *)p, len);
+ /* write posting engine is busy, use CPU copy method */
+ omap_nand_data_out(chip, buf, len, false);
} else {
while (len) {
w_count = readl(info->reg.gpmc_prefetch_status);
w_count = PREFETCH_STATUS_FIFO_CNT(w_count);
w_count = w_count >> 1;
for (i = 0; (i < w_count) && len; i++, len -= 2)
- iowrite16(*p++, info->nand.legacy.IO_ADDR_W);
+ iowrite16(*p++, info->fifo);
}
/* wait for data to flushed-out before reset the prefetch */
tim = 0;
@@ -451,15 +361,16 @@ static void omap_nand_dma_callback(void *data)
/*
* omap_nand_dma_transfer: configure and start dma transfer
- * @mtd: MTD device structure
+ * @chip: nand chip structure
* @addr: virtual address in RAM of source/destination
* @len: number of data bytes to be transferred
* @is_write: flag for read/write operation
*/
-static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
- unsigned int len, int is_write)
+static inline int omap_nand_dma_transfer(struct nand_chip *chip,
+ const void *addr, unsigned int len,
+ int is_write)
{
- struct omap_nand_info *info = mtd_to_omap(mtd);
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
struct dma_async_tx_descriptor *tx;
enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
DMA_FROM_DEVICE;
@@ -521,49 +432,51 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
out_copy_unmap:
dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
out_copy:
- if (info->nand.options & NAND_BUSWIDTH_16)
- is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
- : omap_write_buf16(mtd, (u_char *) addr, len);
- else
- is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
- : omap_write_buf8(mtd, (u_char *) addr, len);
+ is_write == 0 ? omap_nand_data_in(chip, (void *)addr, len, false)
+ : omap_nand_data_out(chip, addr, len, false);
+
return 0;
}
/**
- * omap_read_buf_dma_pref - read data from NAND controller into buffer
- * @chip: NAND chip object
- * @buf: buffer to store date
- * @len: number of bytes to read
+ * omap_nand_data_in_dma_pref - NAND data in using DMA and Prefetch
*/
-static void omap_read_buf_dma_pref(struct nand_chip *chip, u_char *buf,
- int len)
+static void omap_nand_data_in_dma_pref(struct nand_chip *chip, void *buf,
+ unsigned int len, bool force_8bit)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ if (force_8bit) {
+ omap_nand_data_in(chip, buf, len, force_8bit);
+ return;
+ }
+
if (len <= mtd->oobsize)
- omap_read_buf_pref(chip, buf, len);
+ omap_nand_data_in_pref(chip, buf, len, false);
else
/* start transfer in DMA mode */
- omap_nand_dma_transfer(mtd, buf, len, 0x0);
+ omap_nand_dma_transfer(chip, buf, len, 0x0);
}
/**
- * omap_write_buf_dma_pref - write buffer to NAND controller
- * @chip: NAND chip object
- * @buf: data buffer
- * @len: number of bytes to write
+ * omap_nand_data_out_dma_pref - NAND data out using DMA and write posting
*/
-static void omap_write_buf_dma_pref(struct nand_chip *chip, const u_char *buf,
- int len)
+static void omap_nand_data_out_dma_pref(struct nand_chip *chip,
+ const void *buf, unsigned int len,
+ bool force_8bit)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ if (force_8bit) {
+ omap_nand_data_out(chip, buf, len, force_8bit);
+ return;
+ }
+
if (len <= mtd->oobsize)
- omap_write_buf_pref(chip, buf, len);
+ omap_nand_data_out_pref(chip, buf, len, false);
else
/* start transfer in DMA mode */
- omap_nand_dma_transfer(mtd, (u_char *)buf, len, 0x1);
+ omap_nand_dma_transfer(chip, buf, len, 0x1);
}
/*
@@ -587,13 +500,13 @@ static irqreturn_t omap_nand_irq(int this_irq, void *dev)
bytes = info->buf_len;
else if (!info->buf_len)
bytes = 0;
- iowrite32_rep(info->nand.legacy.IO_ADDR_W, (u32 *)info->buf,
+ iowrite32_rep(info->fifo, (u32 *)info->buf,
bytes >> 2);
info->buf = info->buf + bytes;
info->buf_len -= bytes;
} else {
- ioread32_rep(info->nand.legacy.IO_ADDR_R, (u32 *)info->buf,
+ ioread32_rep(info->fifo, (u32 *)info->buf,
bytes >> 2);
info->buf = info->buf + bytes;
@@ -613,20 +526,17 @@ done:
}
/*
- * omap_read_buf_irq_pref - read data from NAND controller into buffer
- * @chip: NAND chip object
- * @buf: buffer to store date
- * @len: number of bytes to read
+ * omap_nand_data_in_irq_pref - NAND data in using Prefetch and IRQ
*/
-static void omap_read_buf_irq_pref(struct nand_chip *chip, u_char *buf,
- int len)
+static void omap_nand_data_in_irq_pref(struct nand_chip *chip, void *buf,
+ unsigned int len, bool force_8bit)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct omap_nand_info *info = mtd_to_omap(mtd);
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ struct mtd_info *mtd = nand_to_mtd(&info->nand);
int ret = 0;
- if (len <= mtd->oobsize) {
- omap_read_buf_pref(chip, buf, len);
+ if (len <= mtd->oobsize || force_8bit) {
+ omap_nand_data_in(chip, buf, len, force_8bit);
return;
}
@@ -637,9 +547,11 @@ static void omap_read_buf_irq_pref(struct nand_chip *chip, u_char *buf,
/* configure and start prefetch transfer */
ret = omap_prefetch_enable(info->gpmc_cs,
PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0, info);
- if (ret)
+ if (ret) {
/* PFPW engine is busy, use cpu copy method */
- goto out_copy;
+ omap_nand_data_in(chip, buf, len, false);
+ return;
+ }
info->buf_len = len;
@@ -652,31 +564,23 @@ static void omap_read_buf_irq_pref(struct nand_chip *chip, u_char *buf,
/* disable and stop the PFPW engine */
omap_prefetch_reset(info->gpmc_cs, info);
return;
-
-out_copy:
- if (info->nand.options & NAND_BUSWIDTH_16)
- omap_read_buf16(mtd, buf, len);
- else
- omap_read_buf8(mtd, buf, len);
}
/*
- * omap_write_buf_irq_pref - write buffer to NAND controller
- * @chip: NAND chip object
- * @buf: data buffer
- * @len: number of bytes to write
+ * omap_nand_data_out_irq_pref - NAND out using write posting and IRQ
*/
-static void omap_write_buf_irq_pref(struct nand_chip *chip, const u_char *buf,
- int len)
+static void omap_nand_data_out_irq_pref(struct nand_chip *chip,
+ const void *buf, unsigned int len,
+ bool force_8bit)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct omap_nand_info *info = mtd_to_omap(mtd);
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ struct mtd_info *mtd = nand_to_mtd(&info->nand);
int ret = 0;
unsigned long tim, limit;
u32 val;
- if (len <= mtd->oobsize) {
- omap_write_buf_pref(chip, buf, len);
+ if (len <= mtd->oobsize || force_8bit) {
+ omap_nand_data_out(chip, buf, len, force_8bit);
return;
}
@@ -687,9 +591,11 @@ static void omap_write_buf_irq_pref(struct nand_chip *chip, const u_char *buf,
/* configure and start prefetch transfer : size=24 */
ret = omap_prefetch_enable(info->gpmc_cs,
(PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1, info);
- if (ret)
+ if (ret) {
/* PFPW engine is busy, use cpu copy method */
- goto out_copy;
+ omap_nand_data_out(chip, buf, len, false);
+ return;
+ }
info->buf_len = len;
@@ -711,12 +617,6 @@ static void omap_write_buf_irq_pref(struct nand_chip *chip, const u_char *buf,
/* disable and stop the PFPW engine */
omap_prefetch_reset(info->gpmc_cs, info);
return;
-
-out_copy:
- if (info->nand.options & NAND_BUSWIDTH_16)
- omap_write_buf16(mtd, buf, len);
- else
- omap_write_buf8(mtd, buf, len);
}
/**
@@ -982,50 +882,6 @@ static void omap_enable_hwecc(struct nand_chip *chip, int mode)
}
/**
- * omap_wait - wait until the command is done
- * @this: NAND Chip structure
- *
- * Wait function is called during Program and erase operations and
- * the way it is called from MTD layer, we should wait till the NAND
- * chip is ready after the programming/erase operation has completed.
- *
- * Erase can take up to 400ms and program up to 20ms according to
- * general NAND and SmartMedia specs
- */
-static int omap_wait(struct nand_chip *this)
-{
- struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(this));
- unsigned long timeo = jiffies;
- int status;
-
- timeo += msecs_to_jiffies(400);
-
- writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command);
- while (time_before(jiffies, timeo)) {
- status = readb(info->reg.gpmc_nand_data);
- if (status & NAND_STATUS_READY)
- break;
- cond_resched();
- }
-
- status = readb(info->reg.gpmc_nand_data);
- return status;
-}
-
-/**
- * omap_dev_ready - checks the NAND Ready GPIO line
- * @chip: NAND chip object
- *
- * Returns true if ready and false if busy.
- */
-static int omap_dev_ready(struct nand_chip *chip)
-{
- struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
-
- return gpiod_get_value(info->ready_gpiod);
-}
-
-/**
* omap_enable_hwecc_bch - Program GPMC to perform BCH ECC calculation
* @chip: NAND chip object
* @mode: Read/Write mode
@@ -1543,8 +1399,8 @@ static int omap_write_page_bch(struct nand_chip *chip, const uint8_t *buf,
chip->ecc.hwctl(chip, NAND_ECC_WRITE);
/* Write data */
- chip->legacy.write_buf(chip, buf + (eccpg * info->eccpg_size),
- info->eccpg_size);
+ info->data_out(chip, buf + (eccpg * info->eccpg_size),
+ info->eccpg_size, false);
/* Update ecc vector from GPMC result registers */
ret = omap_calculate_ecc_bch_multi(mtd,
@@ -1562,7 +1418,7 @@ static int omap_write_page_bch(struct nand_chip *chip, const uint8_t *buf,
}
/* Write ecc vector to OOB area */
- chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
+ info->data_out(chip, chip->oob_poi, mtd->oobsize, false);
return nand_prog_page_end_op(chip);
}
@@ -1607,8 +1463,8 @@ static int omap_write_subpage_bch(struct nand_chip *chip, u32 offset,
chip->ecc.hwctl(chip, NAND_ECC_WRITE);
/* Write data */
- chip->legacy.write_buf(chip, buf + (eccpg * info->eccpg_size),
- info->eccpg_size);
+ info->data_out(chip, buf + (eccpg * info->eccpg_size),
+ info->eccpg_size, false);
for (step = 0; step < info->nsteps_per_eccpg; step++) {
unsigned int base_step = eccpg * info->nsteps_per_eccpg;
@@ -1641,7 +1497,7 @@ static int omap_write_subpage_bch(struct nand_chip *chip, u32 offset,
}
/* write OOB buffer to NAND device */
- chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
+ info->data_out(chip, chip->oob_poi, mtd->oobsize, false);
return nand_prog_page_end_op(chip);
}
@@ -1984,8 +1840,8 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
/* Re-populate low-level callbacks based on xfer modes */
switch (info->xfer_type) {
case NAND_OMAP_PREFETCH_POLLED:
- chip->legacy.read_buf = omap_read_buf_pref;
- chip->legacy.write_buf = omap_write_buf_pref;
+ info->data_in = omap_nand_data_in_pref;
+ info->data_out = omap_nand_data_out_pref;
break;
case NAND_OMAP_POLLED:
@@ -2017,8 +1873,9 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
err);
return err;
}
- chip->legacy.read_buf = omap_read_buf_dma_pref;
- chip->legacy.write_buf = omap_write_buf_dma_pref;
+
+ info->data_in = omap_nand_data_in_dma_pref;
+ info->data_out = omap_nand_data_out_dma_pref;
}
break;
@@ -2049,9 +1906,8 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
return err;
}
- chip->legacy.read_buf = omap_read_buf_irq_pref;
- chip->legacy.write_buf = omap_write_buf_irq_pref;
-
+ info->data_in = omap_nand_data_in_irq_pref;
+ info->data_out = omap_nand_data_out_irq_pref;
break;
default:
@@ -2217,8 +2073,105 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
return 0;
}
+static void omap_nand_data_in(struct nand_chip *chip, void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ u32 alignment = ((uintptr_t)buf | len) & 3;
+
+ if (force_8bit || (alignment & 1))
+ ioread8_rep(info->fifo, buf, len);
+ else if (alignment & 3)
+ ioread16_rep(info->fifo, buf, len >> 1);
+ else
+ ioread32_rep(info->fifo, buf, len >> 2);
+}
+
+static void omap_nand_data_out(struct nand_chip *chip,
+ const void *buf, unsigned int len,
+ bool force_8bit)
+{
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ u32 alignment = ((uintptr_t)buf | len) & 3;
+
+ if (force_8bit || (alignment & 1))
+ iowrite8_rep(info->fifo, buf, len);
+ else if (alignment & 3)
+ iowrite16_rep(info->fifo, buf, len >> 1);
+ else
+ iowrite32_rep(info->fifo, buf, len >> 2);
+}
+
+static int omap_nand_exec_instr(struct nand_chip *chip,
+ const struct nand_op_instr *instr)
+{
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ unsigned int i;
+ int ret;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ iowrite8(instr->ctx.cmd.opcode,
+ info->reg.gpmc_nand_command);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+ iowrite8(instr->ctx.addr.addrs[i],
+ info->reg.gpmc_nand_address);
+ }
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ info->data_in(chip, instr->ctx.data.buf.in,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ info->data_out(chip, instr->ctx.data.buf.out,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ ret = info->ready_gpiod ?
+ nand_gpio_waitrdy(chip, info->ready_gpiod, instr->ctx.waitrdy.timeout_ms) :
+ nand_soft_waitrdy(chip, instr->ctx.waitrdy.timeout_ms);
+ if (ret)
+ return ret;
+ break;
+ }
+
+ if (instr->delay_ns)
+ ndelay(instr->delay_ns);
+
+ return 0;
+}
+
+static int omap_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ unsigned int i;
+
+ if (check_only)
+ return 0;
+
+ for (i = 0; i < op->ninstrs; i++) {
+ int ret;
+
+ ret = omap_nand_exec_instr(chip, &op->instrs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct nand_controller_ops omap_nand_controller_ops = {
.attach_chip = omap_nand_attach_chip,
+ .exec_op = omap_nand_exec_op,
};
/* Shared among all NAND instances to synchronize access to the ECC Engine */
@@ -2233,6 +2186,7 @@ static int omap_nand_probe(struct platform_device *pdev)
int err;
struct resource *res;
struct device *dev = &pdev->dev;
+ void __iomem *vaddr;
info = devm_kzalloc(&pdev->dev, sizeof(struct omap_nand_info),
GFP_KERNEL);
@@ -2266,10 +2220,11 @@ static int omap_nand_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- nand_chip->legacy.IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(nand_chip->legacy.IO_ADDR_R))
- return PTR_ERR(nand_chip->legacy.IO_ADDR_R);
+ vaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+ info->fifo = vaddr;
info->phys_base = res->start;
if (!omap_gpmc_controller_initialized) {
@@ -2280,9 +2235,6 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->controller = &omap_gpmc_controller;
- nand_chip->legacy.IO_ADDR_W = nand_chip->legacy.IO_ADDR_R;
- nand_chip->legacy.cmd_ctrl = omap_hwcontrol;
-
info->ready_gpiod = devm_gpiod_get_optional(&pdev->dev, "rb",
GPIOD_IN);
if (IS_ERR(info->ready_gpiod)) {
@@ -2290,26 +2242,12 @@ static int omap_nand_probe(struct platform_device *pdev)
return PTR_ERR(info->ready_gpiod);
}
- /*
- * If RDY/BSY line is connected to OMAP then use the omap ready
- * function and the generic nand_wait function which reads the status
- * register after monitoring the RDY/BSY line. Otherwise use a standard
- * chip delay which is slightly more than tR (AC Timing) of the NAND
- * device and read status register until you get a failure or success
- */
- if (info->ready_gpiod) {
- nand_chip->legacy.dev_ready = omap_dev_ready;
- nand_chip->legacy.chip_delay = 0;
- } else {
- nand_chip->legacy.waitfunc = omap_wait;
- nand_chip->legacy.chip_delay = 50;
- }
-
if (info->flash_bbt)
nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
- /* scan NAND device connected to chip controller */
- nand_chip->options |= info->devsize & NAND_BUSWIDTH_16;
+ /* default operations */
+ info->data_in = omap_nand_data_in;
+ info->data_out = omap_nand_data_out;
err = nand_scan(nand_chip, 1);
if (err)
@@ -2352,10 +2290,7 @@ static int omap_nand_remove(struct platform_device *pdev)
return ret;
}
-static const struct of_device_id omap_nand_ids[] = {
- { .compatible = "ti,omap2-nand", },
- {},
-};
+/* omap_nand_ids defined in linux/platform_data/mtd-nand-omap2.h */
MODULE_DEVICE_TABLE(of, omap_nand_ids);
static struct platform_driver omap_nand_driver = {
@@ -2363,7 +2298,7 @@ static struct platform_driver omap_nand_driver = {
.remove = omap_nand_remove,
.driver = {
.name = DRIVER_NAME,
- .of_match_table = of_match_ptr(omap_nand_ids),
+ .of_match_table = omap_nand_ids,
},
};
diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c
index 8bab753211e9..893e9979c4a2 100644
--- a/drivers/mtd/nand/raw/omap_elm.c
+++ b/drivers/mtd/nand/raw/omap_elm.c
@@ -282,7 +282,7 @@ static void elm_start_processing(struct elm_info *info,
static void elm_error_correction(struct elm_info *info,
struct elm_errorvec *err_vec)
{
- int i, j, errors = 0;
+ int i, j;
int offset;
u32 reg_val;
@@ -312,8 +312,6 @@ static void elm_error_correction(struct elm_info *info,
/* Update error location register */
offset += 4;
}
-
- errors += err_vec[i].error_count;
} else {
err_vec[i].error_uncorrectable = true;
}
@@ -384,8 +382,8 @@ static irqreturn_t elm_isr(int this_irq, void *dev_id)
static int elm_probe(struct platform_device *pdev)
{
int ret = 0;
- struct resource *irq;
struct elm_info *info;
+ int irq;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
@@ -393,20 +391,18 @@ static int elm_probe(struct platform_device *pdev)
info->dev = &pdev->dev;
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!irq) {
- dev_err(&pdev->dev, "no irq resource defined\n");
- return -ENODEV;
- }
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
info->elm_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(info->elm_base))
return PTR_ERR(info->elm_base);
- ret = devm_request_irq(&pdev->dev, irq->start, elm_isr, 0,
- pdev->name, info);
+ ret = devm_request_irq(&pdev->dev, irq, elm_isr, 0,
+ pdev->name, info);
if (ret) {
- dev_err(&pdev->dev, "failure requesting %pr\n", irq);
+ dev_err(&pdev->dev, "failure requesting %d\n", irq);
return ret;
}
diff --git a/drivers/mtd/nand/raw/pl35x-nand-controller.c b/drivers/mtd/nand/raw/pl35x-nand-controller.c
index 8a91e069ee2e..3c6f6aff649f 100644
--- a/drivers/mtd/nand/raw/pl35x-nand-controller.c
+++ b/drivers/mtd/nand/raw/pl35x-nand-controller.c
@@ -1062,7 +1062,7 @@ static int pl35x_nand_chip_init(struct pl35x_nandc *nfc,
chip->controller = &nfc->controller;
mtd = nand_to_mtd(chip);
mtd->dev.parent = nfc->dev;
- nand_set_flash_node(chip, nfc->dev->of_node);
+ nand_set_flash_node(chip, np);
if (!mtd->name) {
mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL,
"%s", PL35X_NANDC_DRIVER_NAME);
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 04e6f7b26706..1a77542c6d67 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -2,10 +2,10 @@
/*
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*/
-
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/bitops.h>
+#include <linux/dma/qcom_adm.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/module.h>
@@ -952,6 +952,7 @@ static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
struct dma_async_tx_descriptor *dma_desc;
struct scatterlist *sgl;
struct dma_slave_config slave_conf;
+ struct qcom_adm_peripheral_config periph_conf = {};
enum dma_transfer_direction dir_eng;
int ret;
@@ -983,11 +984,19 @@ static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
if (read) {
slave_conf.src_maxburst = 16;
slave_conf.src_addr = nandc->base_dma + reg_off;
- slave_conf.slave_id = nandc->data_crci;
+ if (nandc->data_crci) {
+ periph_conf.crci = nandc->data_crci;
+ slave_conf.peripheral_config = &periph_conf;
+ slave_conf.peripheral_size = sizeof(periph_conf);
+ }
} else {
slave_conf.dst_maxburst = 16;
slave_conf.dst_addr = nandc->base_dma + reg_off;
- slave_conf.slave_id = nandc->cmd_crci;
+ if (nandc->cmd_crci) {
+ periph_conf.crci = nandc->cmd_crci;
+ slave_conf.peripheral_config = &periph_conf;
+ slave_conf.peripheral_size = sizeof(periph_conf);
+ }
}
ret = dmaengine_slave_config(nandc->chan, &slave_conf);
@@ -3063,10 +3072,6 @@ static int qcom_nandc_probe(struct platform_device *pdev)
if (dma_mapping_error(dev, nandc->base_dma))
return -ENXIO;
- ret = qcom_nandc_alloc(nandc);
- if (ret)
- goto err_nandc_alloc;
-
ret = clk_prepare_enable(nandc->core_clk);
if (ret)
goto err_core_clk;
@@ -3075,6 +3080,10 @@ static int qcom_nandc_probe(struct platform_device *pdev)
if (ret)
goto err_aon_clk;
+ ret = qcom_nandc_alloc(nandc);
+ if (ret)
+ goto err_nandc_alloc;
+
ret = qcom_nandc_setup(nandc);
if (ret)
goto err_setup;
@@ -3086,15 +3095,14 @@ static int qcom_nandc_probe(struct platform_device *pdev)
return 0;
err_setup:
+ qcom_nandc_unalloc(nandc);
+err_nandc_alloc:
clk_disable_unprepare(nandc->aon_clk);
err_aon_clk:
clk_disable_unprepare(nandc->core_clk);
err_core_clk:
- qcom_nandc_unalloc(nandc);
-err_nandc_alloc:
dma_unmap_resource(dev, res->start, resource_size(res),
DMA_BIDIRECTIONAL, 0);
-
return ret;
}
diff --git a/drivers/mtd/nand/raw/renesas-nand-controller.c b/drivers/mtd/nand/raw/renesas-nand-controller.c
new file mode 100644
index 000000000000..6db063b230a9
--- /dev/null
+++ b/drivers/mtd/nand/raw/renesas-nand-controller.c
@@ -0,0 +1,1424 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Evatronix/Renesas R-Car Gen3, RZ/N1D, RZ/N1S, RZ/N1L NAND controller driver
+ *
+ * Copyright (C) 2021 Schneider Electric
+ * Author: Miquel RAYNAL <miquel.raynal@bootlin.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define COMMAND_REG 0x00
+#define COMMAND_SEQ(x) FIELD_PREP(GENMASK(5, 0), (x))
+#define COMMAND_SEQ_10 COMMAND_SEQ(0x2A)
+#define COMMAND_SEQ_12 COMMAND_SEQ(0x0C)
+#define COMMAND_SEQ_18 COMMAND_SEQ(0x32)
+#define COMMAND_SEQ_19 COMMAND_SEQ(0x13)
+#define COMMAND_SEQ_GEN_IN COMMAND_SEQ_18
+#define COMMAND_SEQ_GEN_OUT COMMAND_SEQ_19
+#define COMMAND_SEQ_READ_PAGE COMMAND_SEQ_10
+#define COMMAND_SEQ_WRITE_PAGE COMMAND_SEQ_12
+#define COMMAND_INPUT_SEL_AHBS 0
+#define COMMAND_INPUT_SEL_DMA BIT(6)
+#define COMMAND_FIFO_SEL 0
+#define COMMAND_DATA_SEL BIT(7)
+#define COMMAND_0(x) FIELD_PREP(GENMASK(15, 8), (x))
+#define COMMAND_1(x) FIELD_PREP(GENMASK(23, 16), (x))
+#define COMMAND_2(x) FIELD_PREP(GENMASK(31, 24), (x))
+
+#define CONTROL_REG 0x04
+#define CONTROL_CHECK_RB_LINE 0
+#define CONTROL_ECC_BLOCK_SIZE(x) FIELD_PREP(GENMASK(2, 1), (x))
+#define CONTROL_ECC_BLOCK_SIZE_256 CONTROL_ECC_BLOCK_SIZE(0)
+#define CONTROL_ECC_BLOCK_SIZE_512 CONTROL_ECC_BLOCK_SIZE(1)
+#define CONTROL_ECC_BLOCK_SIZE_1024 CONTROL_ECC_BLOCK_SIZE(2)
+#define CONTROL_INT_EN BIT(4)
+#define CONTROL_ECC_EN BIT(5)
+#define CONTROL_BLOCK_SIZE(x) FIELD_PREP(GENMASK(7, 6), (x))
+#define CONTROL_BLOCK_SIZE_32P CONTROL_BLOCK_SIZE(0)
+#define CONTROL_BLOCK_SIZE_64P CONTROL_BLOCK_SIZE(1)
+#define CONTROL_BLOCK_SIZE_128P CONTROL_BLOCK_SIZE(2)
+#define CONTROL_BLOCK_SIZE_256P CONTROL_BLOCK_SIZE(3)
+
+#define STATUS_REG 0x8
+#define MEM_RDY(cs, reg) (FIELD_GET(GENMASK(3, 0), (reg)) & BIT(cs))
+#define CTRL_RDY(reg) (FIELD_GET(BIT(8), (reg)) == 0)
+
+#define ECC_CTRL_REG 0x18
+#define ECC_CTRL_CAP(x) FIELD_PREP(GENMASK(2, 0), (x))
+#define ECC_CTRL_CAP_2B ECC_CTRL_CAP(0)
+#define ECC_CTRL_CAP_4B ECC_CTRL_CAP(1)
+#define ECC_CTRL_CAP_8B ECC_CTRL_CAP(2)
+#define ECC_CTRL_CAP_16B ECC_CTRL_CAP(3)
+#define ECC_CTRL_CAP_24B ECC_CTRL_CAP(4)
+#define ECC_CTRL_CAP_32B ECC_CTRL_CAP(5)
+#define ECC_CTRL_ERR_THRESHOLD(x) FIELD_PREP(GENMASK(13, 8), (x))
+
+#define INT_MASK_REG 0x10
+#define INT_STATUS_REG 0x14
+#define INT_CMD_END BIT(1)
+#define INT_DMA_END BIT(3)
+#define INT_MEM_RDY(cs) FIELD_PREP(GENMASK(11, 8), BIT(cs))
+#define INT_DMA_ENDED BIT(3)
+#define MEM_IS_RDY(cs, reg) (FIELD_GET(GENMASK(11, 8), (reg)) & BIT(cs))
+#define DMA_HAS_ENDED(reg) FIELD_GET(BIT(3), (reg))
+
+#define ECC_OFFSET_REG 0x1C
+#define ECC_OFFSET(x) FIELD_PREP(GENMASK(15, 0), (x))
+
+#define ECC_STAT_REG 0x20
+#define ECC_STAT_CORRECTABLE(cs, reg) (FIELD_GET(GENMASK(3, 0), (reg)) & BIT(cs))
+#define ECC_STAT_UNCORRECTABLE(cs, reg) (FIELD_GET(GENMASK(11, 8), (reg)) & BIT(cs))
+
+#define ADDR0_COL_REG 0x24
+#define ADDR0_COL(x) FIELD_PREP(GENMASK(15, 0), (x))
+
+#define ADDR0_ROW_REG 0x28
+#define ADDR0_ROW(x) FIELD_PREP(GENMASK(23, 0), (x))
+
+#define ADDR1_COL_REG 0x2C
+#define ADDR1_COL(x) FIELD_PREP(GENMASK(15, 0), (x))
+
+#define ADDR1_ROW_REG 0x30
+#define ADDR1_ROW(x) FIELD_PREP(GENMASK(23, 0), (x))
+
+#define FIFO_DATA_REG 0x38
+
+#define DATA_REG 0x3C
+
+#define DATA_REG_SIZE_REG 0x40
+
+#define DMA_ADDR_LOW_REG 0x64
+
+#define DMA_ADDR_HIGH_REG 0x68
+
+#define DMA_CNT_REG 0x6C
+
+#define DMA_CTRL_REG 0x70
+#define DMA_CTRL_INCREMENT_BURST_4 0
+#define DMA_CTRL_REGISTER_MANAGED_MODE 0
+#define DMA_CTRL_START BIT(7)
+
+#define MEM_CTRL_REG 0x80
+#define MEM_CTRL_CS(cs) FIELD_PREP(GENMASK(1, 0), (cs))
+#define MEM_CTRL_DIS_WP(cs) FIELD_PREP(GENMASK(11, 8), BIT((cs)))
+
+#define DATA_SIZE_REG 0x84
+#define DATA_SIZE(x) FIELD_PREP(GENMASK(14, 0), (x))
+
+#define TIMINGS_ASYN_REG 0x88
+#define TIMINGS_ASYN_TRWP(x) FIELD_PREP(GENMASK(3, 0), max((x), 1U) - 1)
+#define TIMINGS_ASYN_TRWH(x) FIELD_PREP(GENMASK(7, 4), max((x), 1U) - 1)
+
+#define TIM_SEQ0_REG 0x90
+#define TIM_SEQ0_TCCS(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
+#define TIM_SEQ0_TADL(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
+#define TIM_SEQ0_TRHW(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
+#define TIM_SEQ0_TWHR(x) FIELD_PREP(GENMASK(29, 24), max((x), 1U) - 1)
+
+#define TIM_SEQ1_REG 0x94
+#define TIM_SEQ1_TWB(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
+#define TIM_SEQ1_TRR(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
+#define TIM_SEQ1_TWW(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
+
+#define TIM_GEN_SEQ0_REG 0x98
+#define TIM_GEN_SEQ0_D0(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
+#define TIM_GEN_SEQ0_D1(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
+#define TIM_GEN_SEQ0_D2(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
+#define TIM_GEN_SEQ0_D3(x) FIELD_PREP(GENMASK(29, 24), max((x), 1U) - 1)
+
+#define TIM_GEN_SEQ1_REG 0x9c
+#define TIM_GEN_SEQ1_D4(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
+#define TIM_GEN_SEQ1_D5(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
+#define TIM_GEN_SEQ1_D6(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
+#define TIM_GEN_SEQ1_D7(x) FIELD_PREP(GENMASK(29, 24), max((x), 1U) - 1)
+
+#define TIM_GEN_SEQ2_REG 0xA0
+#define TIM_GEN_SEQ2_D8(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
+#define TIM_GEN_SEQ2_D9(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
+#define TIM_GEN_SEQ2_D10(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
+#define TIM_GEN_SEQ2_D11(x) FIELD_PREP(GENMASK(29, 24), max((x), 1U) - 1)
+
+#define FIFO_INIT_REG 0xB4
+#define FIFO_INIT BIT(0)
+
+#define FIFO_STATE_REG 0xB4
+#define FIFO_STATE_R_EMPTY(reg) FIELD_GET(BIT(0), (reg))
+#define FIFO_STATE_W_FULL(reg) FIELD_GET(BIT(1), (reg))
+#define FIFO_STATE_C_EMPTY(reg) FIELD_GET(BIT(2), (reg))
+#define FIFO_STATE_R_FULL(reg) FIELD_GET(BIT(6), (reg))
+#define FIFO_STATE_W_EMPTY(reg) FIELD_GET(BIT(7), (reg))
+
+#define GEN_SEQ_CTRL_REG 0xB8
+#define GEN_SEQ_CMD0_EN BIT(0)
+#define GEN_SEQ_CMD1_EN BIT(1)
+#define GEN_SEQ_CMD2_EN BIT(2)
+#define GEN_SEQ_CMD3_EN BIT(3)
+#define GEN_SEQ_COL_A0(x) FIELD_PREP(GENMASK(5, 4), min((x), 2U))
+#define GEN_SEQ_COL_A1(x) FIELD_PREP(GENMASK(7, 6), min((x), 2U))
+#define GEN_SEQ_ROW_A0(x) FIELD_PREP(GENMASK(9, 8), min((x), 3U))
+#define GEN_SEQ_ROW_A1(x) FIELD_PREP(GENMASK(11, 10), min((x), 3U))
+#define GEN_SEQ_DATA_EN BIT(12)
+#define GEN_SEQ_DELAY_EN(x) FIELD_PREP(GENMASK(14, 13), (x))
+#define GEN_SEQ_DELAY0_EN GEN_SEQ_DELAY_EN(1)
+#define GEN_SEQ_DELAY1_EN GEN_SEQ_DELAY_EN(2)
+#define GEN_SEQ_IMD_SEQ BIT(15)
+#define GEN_SEQ_COMMAND_3(x) FIELD_PREP(GENMASK(26, 16), (x))
+
+#define DMA_TLVL_REG 0x114
+#define DMA_TLVL(x) FIELD_PREP(GENMASK(7, 0), (x))
+#define DMA_TLVL_MAX DMA_TLVL(0xFF)
+
+#define TIM_GEN_SEQ3_REG 0x134
+#define TIM_GEN_SEQ3_D12(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
+
+#define ECC_CNT_REG 0x14C
+#define ECC_CNT(cs, reg) FIELD_GET(GENMASK(5, 0), (reg) >> ((cs) * 8))
+
+#define RNANDC_CS_NUM 4
+
+#define TO_CYCLES64(ps, period_ns) ((unsigned int)DIV_ROUND_UP_ULL(div_u64(ps, 1000), \
+ period_ns))
+
+struct rnand_chip_sel {
+ unsigned int cs;
+};
+
+struct rnand_chip {
+ struct nand_chip chip;
+ struct list_head node;
+ int selected_die;
+ u32 ctrl;
+ unsigned int nsels;
+ u32 control;
+ u32 ecc_ctrl;
+ u32 timings_asyn;
+ u32 tim_seq0;
+ u32 tim_seq1;
+ u32 tim_gen_seq0;
+ u32 tim_gen_seq1;
+ u32 tim_gen_seq2;
+ u32 tim_gen_seq3;
+ struct rnand_chip_sel sels[];
+};
+
+struct rnandc {
+ struct nand_controller controller;
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *hclk;
+ struct clk *eclk;
+ unsigned long assigned_cs;
+ struct list_head chips;
+ struct nand_chip *selected_chip;
+ struct completion complete;
+ bool use_polling;
+ u8 *buf;
+ unsigned int buf_sz;
+};
+
+struct rnandc_op {
+ u32 command;
+ u32 addr0_col;
+ u32 addr0_row;
+ u32 addr1_col;
+ u32 addr1_row;
+ u32 data_size;
+ u32 ecc_offset;
+ u32 gen_seq_ctrl;
+ u8 *buf;
+ bool read;
+ unsigned int len;
+};
+
+static inline struct rnandc *to_rnandc(struct nand_controller *ctrl)
+{
+ return container_of(ctrl, struct rnandc, controller);
+}
+
+static inline struct rnand_chip *to_rnand(struct nand_chip *chip)
+{
+ return container_of(chip, struct rnand_chip, chip);
+}
+
+static inline unsigned int to_rnandc_cs(struct rnand_chip *nand)
+{
+ return nand->sels[nand->selected_die].cs;
+}
+
+static void rnandc_dis_correction(struct rnandc *rnandc)
+{
+ u32 control;
+
+ control = readl_relaxed(rnandc->regs + CONTROL_REG);
+ control &= ~CONTROL_ECC_EN;
+ writel_relaxed(control, rnandc->regs + CONTROL_REG);
+}
+
+static void rnandc_en_correction(struct rnandc *rnandc)
+{
+ u32 control;
+
+ control = readl_relaxed(rnandc->regs + CONTROL_REG);
+ control |= CONTROL_ECC_EN;
+ writel_relaxed(control, rnandc->regs + CONTROL_REG);
+}
+
+static void rnandc_clear_status(struct rnandc *rnandc)
+{
+ writel_relaxed(0, rnandc->regs + INT_STATUS_REG);
+ writel_relaxed(0, rnandc->regs + ECC_STAT_REG);
+ writel_relaxed(0, rnandc->regs + ECC_CNT_REG);
+}
+
+static void rnandc_dis_interrupts(struct rnandc *rnandc)
+{
+ writel_relaxed(0, rnandc->regs + INT_MASK_REG);
+}
+
+static void rnandc_en_interrupts(struct rnandc *rnandc, u32 val)
+{
+ if (!rnandc->use_polling)
+ writel_relaxed(val, rnandc->regs + INT_MASK_REG);
+}
+
+static void rnandc_clear_fifo(struct rnandc *rnandc)
+{
+ writel_relaxed(FIFO_INIT, rnandc->regs + FIFO_INIT_REG);
+}
+
+static void rnandc_select_target(struct nand_chip *chip, int die_nr)
+{
+ struct rnand_chip *rnand = to_rnand(chip);
+ struct rnandc *rnandc = to_rnandc(chip->controller);
+ unsigned int cs = rnand->sels[die_nr].cs;
+
+ if (chip == rnandc->selected_chip && die_nr == rnand->selected_die)
+ return;
+
+ rnandc_clear_status(rnandc);
+ writel_relaxed(MEM_CTRL_CS(cs) | MEM_CTRL_DIS_WP(cs), rnandc->regs + MEM_CTRL_REG);
+ writel_relaxed(rnand->control, rnandc->regs + CONTROL_REG);
+ writel_relaxed(rnand->ecc_ctrl, rnandc->regs + ECC_CTRL_REG);
+ writel_relaxed(rnand->timings_asyn, rnandc->regs + TIMINGS_ASYN_REG);
+ writel_relaxed(rnand->tim_seq0, rnandc->regs + TIM_SEQ0_REG);
+ writel_relaxed(rnand->tim_seq1, rnandc->regs + TIM_SEQ1_REG);
+ writel_relaxed(rnand->tim_gen_seq0, rnandc->regs + TIM_GEN_SEQ0_REG);
+ writel_relaxed(rnand->tim_gen_seq1, rnandc->regs + TIM_GEN_SEQ1_REG);
+ writel_relaxed(rnand->tim_gen_seq2, rnandc->regs + TIM_GEN_SEQ2_REG);
+ writel_relaxed(rnand->tim_gen_seq3, rnandc->regs + TIM_GEN_SEQ3_REG);
+
+ rnandc->selected_chip = chip;
+ rnand->selected_die = die_nr;
+}
+
+static void rnandc_trigger_op(struct rnandc *rnandc, struct rnandc_op *rop)
+{
+ writel_relaxed(rop->addr0_col, rnandc->regs + ADDR0_COL_REG);
+ writel_relaxed(rop->addr0_row, rnandc->regs + ADDR0_ROW_REG);
+ writel_relaxed(rop->addr1_col, rnandc->regs + ADDR1_COL_REG);
+ writel_relaxed(rop->addr1_row, rnandc->regs + ADDR1_ROW_REG);
+ writel_relaxed(rop->ecc_offset, rnandc->regs + ECC_OFFSET_REG);
+ writel_relaxed(rop->gen_seq_ctrl, rnandc->regs + GEN_SEQ_CTRL_REG);
+ writel_relaxed(DATA_SIZE(rop->len), rnandc->regs + DATA_SIZE_REG);
+ writel_relaxed(rop->command, rnandc->regs + COMMAND_REG);
+}
+
+static void rnandc_trigger_dma(struct rnandc *rnandc)
+{
+ writel_relaxed(DMA_CTRL_INCREMENT_BURST_4 |
+ DMA_CTRL_REGISTER_MANAGED_MODE |
+ DMA_CTRL_START, rnandc->regs + DMA_CTRL_REG);
+}
+
+static irqreturn_t rnandc_irq_handler(int irq, void *private)
+{
+ struct rnandc *rnandc = private;
+
+ rnandc_dis_interrupts(rnandc);
+ complete(&rnandc->complete);
+
+ return IRQ_HANDLED;
+}
+
+static int rnandc_wait_end_of_op(struct rnandc *rnandc,
+ struct nand_chip *chip)
+{
+ struct rnand_chip *rnand = to_rnand(chip);
+ unsigned int cs = to_rnandc_cs(rnand);
+ u32 status;
+ int ret;
+
+ ret = readl_poll_timeout(rnandc->regs + STATUS_REG, status,
+ MEM_RDY(cs, status) && CTRL_RDY(status),
+ 1, 100000);
+ if (ret)
+ dev_err(rnandc->dev, "Operation timed out, status: 0x%08x\n",
+ status);
+
+ return ret;
+}
+
+static int rnandc_wait_end_of_io(struct rnandc *rnandc,
+ struct nand_chip *chip)
+{
+ int timeout_ms = 1000;
+ int ret;
+
+ if (rnandc->use_polling) {
+ struct rnand_chip *rnand = to_rnand(chip);
+ unsigned int cs = to_rnandc_cs(rnand);
+ u32 status;
+
+ ret = readl_poll_timeout(rnandc->regs + INT_STATUS_REG, status,
+ MEM_IS_RDY(cs, status) &
+ DMA_HAS_ENDED(status),
+ 0, timeout_ms * 1000);
+ } else {
+ ret = wait_for_completion_timeout(&rnandc->complete,
+ msecs_to_jiffies(timeout_ms));
+ if (!ret)
+ ret = -ETIMEDOUT;
+ else
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int rnandc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct rnandc *rnandc = to_rnandc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct rnand_chip *rnand = to_rnand(chip);
+ unsigned int cs = to_rnandc_cs(rnand);
+ struct rnandc_op rop = {
+ .command = COMMAND_INPUT_SEL_DMA | COMMAND_0(NAND_CMD_READ0) |
+ COMMAND_2(NAND_CMD_READSTART) | COMMAND_FIFO_SEL |
+ COMMAND_SEQ_READ_PAGE,
+ .addr0_row = page,
+ .len = mtd->writesize,
+ .ecc_offset = ECC_OFFSET(mtd->writesize + 2),
+ };
+ unsigned int max_bitflips = 0;
+ dma_addr_t dma_addr;
+ u32 ecc_stat;
+ int bf, ret, i;
+
+ /* Prepare controller */
+ rnandc_select_target(chip, chip->cur_cs);
+ rnandc_clear_status(rnandc);
+ reinit_completion(&rnandc->complete);
+ rnandc_en_interrupts(rnandc, INT_DMA_ENDED);
+ rnandc_en_correction(rnandc);
+
+ /* Configure DMA */
+ dma_addr = dma_map_single(rnandc->dev, rnandc->buf, mtd->writesize,
+ DMA_FROM_DEVICE);
+ writel(dma_addr, rnandc->regs + DMA_ADDR_LOW_REG);
+ writel(mtd->writesize, rnandc->regs + DMA_CNT_REG);
+ writel(DMA_TLVL_MAX, rnandc->regs + DMA_TLVL_REG);
+
+ rnandc_trigger_op(rnandc, &rop);
+ rnandc_trigger_dma(rnandc);
+
+ ret = rnandc_wait_end_of_io(rnandc, chip);
+ dma_unmap_single(rnandc->dev, dma_addr, mtd->writesize, DMA_FROM_DEVICE);
+ rnandc_dis_correction(rnandc);
+ if (ret) {
+ dev_err(rnandc->dev, "Read page operation never ending\n");
+ return ret;
+ }
+
+ ecc_stat = readl_relaxed(rnandc->regs + ECC_STAT_REG);
+
+ if (oob_required || ECC_STAT_UNCORRECTABLE(cs, ecc_stat)) {
+ ret = nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ if (ECC_STAT_UNCORRECTABLE(cs, ecc_stat)) {
+ for (i = 0; i < chip->ecc.steps; i++) {
+ unsigned int off = i * chip->ecc.size;
+ unsigned int eccoff = i * chip->ecc.bytes;
+
+ bf = nand_check_erased_ecc_chunk(rnandc->buf + off,
+ chip->ecc.size,
+ chip->oob_poi + 2 + eccoff,
+ chip->ecc.bytes,
+ NULL, 0,
+ chip->ecc.strength);
+ if (bf < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += bf;
+ max_bitflips = max_t(unsigned int, max_bitflips, bf);
+ }
+ }
+ } else if (ECC_STAT_CORRECTABLE(cs, ecc_stat)) {
+ bf = ECC_CNT(cs, readl_relaxed(rnandc->regs + ECC_CNT_REG));
+ /*
+ * The number of bitflips is an approximation given the fact
+ * that this controller does not provide per-chunk details but
+ * only gives statistics on the entire page.
+ */
+ mtd->ecc_stats.corrected += bf;
+ }
+
+ memcpy(buf, rnandc->buf, mtd->writesize);
+
+ return 0;
+}
+
+static int rnandc_read_subpage_hw_ecc(struct nand_chip *chip, u32 req_offset,
+ u32 req_len, u8 *bufpoi, int page)
+{
+ struct rnandc *rnandc = to_rnandc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct rnand_chip *rnand = to_rnand(chip);
+ unsigned int cs = to_rnandc_cs(rnand);
+ unsigned int page_off = round_down(req_offset, chip->ecc.size);
+ unsigned int real_len = round_up(req_offset + req_len - page_off,
+ chip->ecc.size);
+ unsigned int start_chunk = page_off / chip->ecc.size;
+ unsigned int nchunks = real_len / chip->ecc.size;
+ unsigned int ecc_off = 2 + (start_chunk * chip->ecc.bytes);
+ struct rnandc_op rop = {
+ .command = COMMAND_INPUT_SEL_AHBS | COMMAND_0(NAND_CMD_READ0) |
+ COMMAND_2(NAND_CMD_READSTART) | COMMAND_FIFO_SEL |
+ COMMAND_SEQ_READ_PAGE,
+ .addr0_row = page,
+ .addr0_col = page_off,
+ .len = real_len,
+ .ecc_offset = ECC_OFFSET(mtd->writesize + ecc_off),
+ };
+ unsigned int max_bitflips = 0, i;
+ u32 ecc_stat;
+ int bf, ret;
+
+ /* Prepare controller */
+ rnandc_select_target(chip, chip->cur_cs);
+ rnandc_clear_status(rnandc);
+ rnandc_en_correction(rnandc);
+ rnandc_trigger_op(rnandc, &rop);
+
+ while (!FIFO_STATE_C_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
+ cpu_relax();
+
+ while (FIFO_STATE_R_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
+ cpu_relax();
+
+ ioread32_rep(rnandc->regs + FIFO_DATA_REG, bufpoi + page_off,
+ real_len / 4);
+
+ if (!FIFO_STATE_R_EMPTY(readl(rnandc->regs + FIFO_STATE_REG))) {
+ dev_err(rnandc->dev, "Clearing residual data in the read FIFO\n");
+ rnandc_clear_fifo(rnandc);
+ }
+
+ ret = rnandc_wait_end_of_op(rnandc, chip);
+ rnandc_dis_correction(rnandc);
+ if (ret) {
+ dev_err(rnandc->dev, "Read subpage operation never ending\n");
+ return ret;
+ }
+
+ ecc_stat = readl_relaxed(rnandc->regs + ECC_STAT_REG);
+
+ if (ECC_STAT_UNCORRECTABLE(cs, ecc_stat)) {
+ ret = nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+
+ for (i = start_chunk; i < nchunks; i++) {
+ unsigned int dataoff = i * chip->ecc.size;
+ unsigned int eccoff = 2 + (i * chip->ecc.bytes);
+
+ bf = nand_check_erased_ecc_chunk(bufpoi + dataoff,
+ chip->ecc.size,
+ chip->oob_poi + eccoff,
+ chip->ecc.bytes,
+ NULL, 0,
+ chip->ecc.strength);
+ if (bf < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += bf;
+ max_bitflips = max_t(unsigned int, max_bitflips, bf);
+ }
+ }
+ } else if (ECC_STAT_CORRECTABLE(cs, ecc_stat)) {
+ bf = ECC_CNT(cs, readl_relaxed(rnandc->regs + ECC_CNT_REG));
+ /*
+ * The number of bitflips is an approximation given the fact
+ * that this controller does not provide per-chunk details but
+ * only gives statistics on the entire page.
+ */
+ mtd->ecc_stats.corrected += bf;
+ }
+
+ return 0;
+}
+
+static int rnandc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ struct rnandc *rnandc = to_rnandc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct rnand_chip *rnand = to_rnand(chip);
+ unsigned int cs = to_rnandc_cs(rnand);
+ struct rnandc_op rop = {
+ .command = COMMAND_INPUT_SEL_DMA | COMMAND_0(NAND_CMD_SEQIN) |
+ COMMAND_1(NAND_CMD_PAGEPROG) | COMMAND_FIFO_SEL |
+ COMMAND_SEQ_WRITE_PAGE,
+ .addr0_row = page,
+ .len = mtd->writesize,
+ .ecc_offset = ECC_OFFSET(mtd->writesize + 2),
+ };
+ dma_addr_t dma_addr;
+ int ret;
+
+ memcpy(rnandc->buf, buf, mtd->writesize);
+
+ /* Prepare controller */
+ rnandc_select_target(chip, chip->cur_cs);
+ rnandc_clear_status(rnandc);
+ reinit_completion(&rnandc->complete);
+ rnandc_en_interrupts(rnandc, INT_MEM_RDY(cs));
+ rnandc_en_correction(rnandc);
+
+ /* Configure DMA */
+ dma_addr = dma_map_single(rnandc->dev, (void *)rnandc->buf, mtd->writesize,
+ DMA_TO_DEVICE);
+ writel(dma_addr, rnandc->regs + DMA_ADDR_LOW_REG);
+ writel(mtd->writesize, rnandc->regs + DMA_CNT_REG);
+ writel(DMA_TLVL_MAX, rnandc->regs + DMA_TLVL_REG);
+
+ rnandc_trigger_op(rnandc, &rop);
+ rnandc_trigger_dma(rnandc);
+
+ ret = rnandc_wait_end_of_io(rnandc, chip);
+ dma_unmap_single(rnandc->dev, dma_addr, mtd->writesize, DMA_TO_DEVICE);
+ rnandc_dis_correction(rnandc);
+ if (ret) {
+ dev_err(rnandc->dev, "Write page operation never ending\n");
+ return ret;
+ }
+
+ if (!oob_required)
+ return 0;
+
+ return nand_change_write_column_op(chip, mtd->writesize, chip->oob_poi,
+ mtd->oobsize, false);
+}
+
+static int rnandc_write_subpage_hw_ecc(struct nand_chip *chip, u32 req_offset,
+ u32 req_len, const u8 *bufpoi,
+ int oob_required, int page)
+{
+ struct rnandc *rnandc = to_rnandc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int page_off = round_down(req_offset, chip->ecc.size);
+ unsigned int real_len = round_up(req_offset + req_len - page_off,
+ chip->ecc.size);
+ unsigned int start_chunk = page_off / chip->ecc.size;
+ unsigned int ecc_off = 2 + (start_chunk * chip->ecc.bytes);
+ struct rnandc_op rop = {
+ .command = COMMAND_INPUT_SEL_AHBS | COMMAND_0(NAND_CMD_SEQIN) |
+ COMMAND_1(NAND_CMD_PAGEPROG) | COMMAND_FIFO_SEL |
+ COMMAND_SEQ_WRITE_PAGE,
+ .addr0_row = page,
+ .addr0_col = page_off,
+ .len = real_len,
+ .ecc_offset = ECC_OFFSET(mtd->writesize + ecc_off),
+ };
+ int ret;
+
+ /* Prepare controller */
+ rnandc_select_target(chip, chip->cur_cs);
+ rnandc_clear_status(rnandc);
+ rnandc_en_correction(rnandc);
+ rnandc_trigger_op(rnandc, &rop);
+
+ while (FIFO_STATE_W_FULL(readl(rnandc->regs + FIFO_STATE_REG)))
+ cpu_relax();
+
+ iowrite32_rep(rnandc->regs + FIFO_DATA_REG, bufpoi + page_off,
+ real_len / 4);
+
+ while (!FIFO_STATE_W_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
+ cpu_relax();
+
+ ret = rnandc_wait_end_of_op(rnandc, chip);
+ rnandc_dis_correction(rnandc);
+ if (ret) {
+ dev_err(rnandc->dev, "Write subpage operation never ending\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * This controller is simple enough and thus does not need to use the parser
+ * provided by the core, instead, handle every situation here.
+ */
+static int rnandc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op, bool check_only)
+{
+ struct rnandc *rnandc = to_rnandc(chip->controller);
+ const struct nand_op_instr *instr = NULL;
+ struct rnandc_op rop = {
+ .command = COMMAND_INPUT_SEL_AHBS,
+ .gen_seq_ctrl = GEN_SEQ_IMD_SEQ,
+ };
+ unsigned int cmd_phase = 0, addr_phase = 0, data_phase = 0,
+ delay_phase = 0, delays = 0;
+ unsigned int op_id, col_addrs, row_addrs, naddrs, remainder, words, i;
+ const u8 *addrs;
+ u32 last_bytes;
+ int ret;
+
+ if (!check_only)
+ rnandc_select_target(chip, op->cs);
+
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ instr = &op->instrs[op_id];
+
+ nand_op_trace(" ", instr);
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ switch (cmd_phase++) {
+ case 0:
+ rop.command |= COMMAND_0(instr->ctx.cmd.opcode);
+ rop.gen_seq_ctrl |= GEN_SEQ_CMD0_EN;
+ break;
+ case 1:
+ rop.gen_seq_ctrl |= GEN_SEQ_COMMAND_3(instr->ctx.cmd.opcode);
+ rop.gen_seq_ctrl |= GEN_SEQ_CMD3_EN;
+ if (addr_phase == 0)
+ addr_phase = 1;
+ break;
+ case 2:
+ rop.command |= COMMAND_2(instr->ctx.cmd.opcode);
+ rop.gen_seq_ctrl |= GEN_SEQ_CMD2_EN;
+ if (addr_phase <= 1)
+ addr_phase = 2;
+ break;
+ case 3:
+ rop.command |= COMMAND_1(instr->ctx.cmd.opcode);
+ rop.gen_seq_ctrl |= GEN_SEQ_CMD1_EN;
+ if (addr_phase <= 1)
+ addr_phase = 2;
+ if (delay_phase == 0)
+ delay_phase = 1;
+ if (data_phase == 0)
+ data_phase = 1;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ addrs = instr->ctx.addr.addrs;
+ naddrs = instr->ctx.addr.naddrs;
+ if (naddrs > 5)
+ return -EOPNOTSUPP;
+
+ col_addrs = min(2U, naddrs);
+ row_addrs = naddrs > 2 ? naddrs - col_addrs : 0;
+
+ switch (addr_phase++) {
+ case 0:
+ for (i = 0; i < col_addrs; i++)
+ rop.addr0_col |= addrs[i] << (i * 8);
+ rop.gen_seq_ctrl |= GEN_SEQ_COL_A0(col_addrs);
+
+ for (i = 0; i < row_addrs; i++)
+ rop.addr0_row |= addrs[2 + i] << (i * 8);
+ rop.gen_seq_ctrl |= GEN_SEQ_ROW_A0(row_addrs);
+
+ if (cmd_phase == 0)
+ cmd_phase = 1;
+ break;
+ case 1:
+ for (i = 0; i < col_addrs; i++)
+ rop.addr1_col |= addrs[i] << (i * 8);
+ rop.gen_seq_ctrl |= GEN_SEQ_COL_A1(col_addrs);
+
+ for (i = 0; i < row_addrs; i++)
+ rop.addr1_row |= addrs[2 + i] << (i * 8);
+ rop.gen_seq_ctrl |= GEN_SEQ_ROW_A1(row_addrs);
+
+ if (cmd_phase <= 1)
+ cmd_phase = 2;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ rop.read = true;
+ fallthrough;
+ case NAND_OP_DATA_OUT_INSTR:
+ rop.gen_seq_ctrl |= GEN_SEQ_DATA_EN;
+ rop.buf = instr->ctx.data.buf.in;
+ rop.len = instr->ctx.data.len;
+ rop.command |= COMMAND_FIFO_SEL;
+
+ switch (data_phase++) {
+ case 0:
+ if (cmd_phase <= 2)
+ cmd_phase = 3;
+ if (addr_phase <= 1)
+ addr_phase = 2;
+ if (delay_phase == 0)
+ delay_phase = 1;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ switch (delay_phase++) {
+ case 0:
+ rop.gen_seq_ctrl |= GEN_SEQ_DELAY0_EN;
+
+ if (cmd_phase <= 2)
+ cmd_phase = 3;
+ break;
+ case 1:
+ rop.gen_seq_ctrl |= GEN_SEQ_DELAY1_EN;
+
+ if (cmd_phase <= 3)
+ cmd_phase = 4;
+ if (data_phase == 0)
+ data_phase = 1;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ }
+ }
+
+ /*
+ * Sequence 19 is generic and dedicated to write operations.
+ * Sequence 18 is also generic and works for all other operations.
+ */
+ if (rop.buf && !rop.read)
+ rop.command |= COMMAND_SEQ_GEN_OUT;
+ else
+ rop.command |= COMMAND_SEQ_GEN_IN;
+
+ if (delays > 1) {
+ dev_err(rnandc->dev, "Cannot handle more than one wait delay\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (check_only)
+ return 0;
+
+ rnandc_trigger_op(rnandc, &rop);
+
+ words = rop.len / sizeof(u32);
+ remainder = rop.len % sizeof(u32);
+ if (rop.buf && rop.read) {
+ while (!FIFO_STATE_C_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
+ cpu_relax();
+
+ while (FIFO_STATE_R_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
+ cpu_relax();
+
+ ioread32_rep(rnandc->regs + FIFO_DATA_REG, rop.buf, words);
+ if (remainder) {
+ last_bytes = readl_relaxed(rnandc->regs + FIFO_DATA_REG);
+ memcpy(rop.buf + (words * sizeof(u32)), &last_bytes,
+ remainder);
+ }
+
+ if (!FIFO_STATE_R_EMPTY(readl(rnandc->regs + FIFO_STATE_REG))) {
+ dev_warn(rnandc->dev,
+ "Clearing residual data in the read FIFO\n");
+ rnandc_clear_fifo(rnandc);
+ }
+ } else if (rop.len && !rop.read) {
+ while (FIFO_STATE_W_FULL(readl(rnandc->regs + FIFO_STATE_REG)))
+ cpu_relax();
+
+ iowrite32_rep(rnandc->regs + FIFO_DATA_REG, rop.buf,
+ DIV_ROUND_UP(rop.len, 4));
+
+ if (remainder) {
+ last_bytes = 0;
+ memcpy(&last_bytes, rop.buf + (words * sizeof(u32)), remainder);
+ writel_relaxed(last_bytes, rnandc->regs + FIFO_DATA_REG);
+ }
+
+ while (!FIFO_STATE_W_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
+ cpu_relax();
+ }
+
+ ret = rnandc_wait_end_of_op(rnandc, chip);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rnandc_setup_interface(struct nand_chip *chip, int chipnr,
+ const struct nand_interface_config *conf)
+{
+ struct rnand_chip *rnand = to_rnand(chip);
+ struct rnandc *rnandc = to_rnandc(chip->controller);
+ unsigned int period_ns = 1000000000 / clk_get_rate(rnandc->eclk);
+ const struct nand_sdr_timings *sdr;
+ unsigned int cyc, cle, ale, bef_dly, ca_to_data;
+
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ if (sdr->tRP_min != sdr->tWP_min || sdr->tREH_min != sdr->tWH_min) {
+ dev_err(rnandc->dev, "Read and write hold times must be identical\n");
+ return -EINVAL;
+ }
+
+ if (chipnr < 0)
+ return 0;
+
+ rnand->timings_asyn =
+ TIMINGS_ASYN_TRWP(TO_CYCLES64(sdr->tRP_min, period_ns)) |
+ TIMINGS_ASYN_TRWH(TO_CYCLES64(sdr->tREH_min, period_ns));
+ rnand->tim_seq0 =
+ TIM_SEQ0_TCCS(TO_CYCLES64(sdr->tCCS_min, period_ns)) |
+ TIM_SEQ0_TADL(TO_CYCLES64(sdr->tADL_min, period_ns)) |
+ TIM_SEQ0_TRHW(TO_CYCLES64(sdr->tRHW_min, period_ns)) |
+ TIM_SEQ0_TWHR(TO_CYCLES64(sdr->tWHR_min, period_ns));
+ rnand->tim_seq1 =
+ TIM_SEQ1_TWB(TO_CYCLES64(sdr->tWB_max, period_ns)) |
+ TIM_SEQ1_TRR(TO_CYCLES64(sdr->tRR_min, period_ns)) |
+ TIM_SEQ1_TWW(TO_CYCLES64(sdr->tWW_min, period_ns));
+
+ cyc = sdr->tDS_min + sdr->tDH_min;
+ cle = sdr->tCLH_min + sdr->tCLS_min;
+ ale = sdr->tALH_min + sdr->tALS_min;
+ bef_dly = sdr->tWB_max - sdr->tDH_min;
+ ca_to_data = sdr->tWHR_min + sdr->tREA_max - sdr->tDH_min;
+
+ /*
+ * D0 = CMD -> ADDR = tCLH + tCLS - 1 cycle
+ * D1 = CMD -> CMD = tCLH + tCLS - 1 cycle
+ * D2 = CMD -> DLY = tWB - tDH
+ * D3 = CMD -> DATA = tWHR + tREA - tDH
+ */
+ rnand->tim_gen_seq0 =
+ TIM_GEN_SEQ0_D0(TO_CYCLES64(cle - cyc, period_ns)) |
+ TIM_GEN_SEQ0_D1(TO_CYCLES64(cle - cyc, period_ns)) |
+ TIM_GEN_SEQ0_D2(TO_CYCLES64(bef_dly, period_ns)) |
+ TIM_GEN_SEQ0_D3(TO_CYCLES64(ca_to_data, period_ns));
+
+ /*
+ * D4 = ADDR -> CMD = tALH + tALS - 1 cyle
+ * D5 = ADDR -> ADDR = tALH + tALS - 1 cyle
+ * D6 = ADDR -> DLY = tWB - tDH
+ * D7 = ADDR -> DATA = tWHR + tREA - tDH
+ */
+ rnand->tim_gen_seq1 =
+ TIM_GEN_SEQ1_D4(TO_CYCLES64(ale - cyc, period_ns)) |
+ TIM_GEN_SEQ1_D5(TO_CYCLES64(ale - cyc, period_ns)) |
+ TIM_GEN_SEQ1_D6(TO_CYCLES64(bef_dly, period_ns)) |
+ TIM_GEN_SEQ1_D7(TO_CYCLES64(ca_to_data, period_ns));
+
+ /*
+ * D8 = DLY -> DATA = tRR + tREA
+ * D9 = DLY -> CMD = tRR
+ * D10 = DATA -> CMD = tCLH + tCLS - 1 cycle
+ * D11 = DATA -> DLY = tWB - tDH
+ */
+ rnand->tim_gen_seq2 =
+ TIM_GEN_SEQ2_D8(TO_CYCLES64(sdr->tRR_min + sdr->tREA_max, period_ns)) |
+ TIM_GEN_SEQ2_D9(TO_CYCLES64(sdr->tRR_min, period_ns)) |
+ TIM_GEN_SEQ2_D10(TO_CYCLES64(cle - cyc, period_ns)) |
+ TIM_GEN_SEQ2_D11(TO_CYCLES64(bef_dly, period_ns));
+
+ /* D12 = DATA -> END = tCLH - tDH */
+ rnand->tim_gen_seq3 =
+ TIM_GEN_SEQ3_D12(TO_CYCLES64(sdr->tCLH_min - sdr->tDH_min, period_ns));
+
+ return 0;
+}
+
+static int rnandc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ unsigned int eccbytes = round_up(chip->ecc.bytes, 4) * chip->ecc.steps;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 2;
+ oobregion->length = eccbytes;
+
+ return 0;
+}
+
+static int rnandc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ unsigned int eccbytes = round_up(chip->ecc.bytes, 4) * chip->ecc.steps;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 2 + eccbytes;
+ oobregion->length = mtd->oobsize - oobregion->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops rnandc_ooblayout_ops = {
+ .ecc = rnandc_ooblayout_ecc,
+ .free = rnandc_ooblayout_free,
+};
+
+static int rnandc_hw_ecc_controller_init(struct nand_chip *chip)
+{
+ struct rnand_chip *rnand = to_rnand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct rnandc *rnandc = to_rnandc(chip->controller);
+
+ if (mtd->writesize > SZ_16K) {
+ dev_err(rnandc->dev, "Unsupported page size\n");
+ return -EINVAL;
+ }
+
+ switch (chip->ecc.size) {
+ case SZ_256:
+ rnand->control |= CONTROL_ECC_BLOCK_SIZE_256;
+ break;
+ case SZ_512:
+ rnand->control |= CONTROL_ECC_BLOCK_SIZE_512;
+ break;
+ case SZ_1K:
+ rnand->control |= CONTROL_ECC_BLOCK_SIZE_1024;
+ break;
+ default:
+ dev_err(rnandc->dev, "Unsupported ECC chunk size\n");
+ return -EINVAL;
+ }
+
+ switch (chip->ecc.strength) {
+ case 2:
+ chip->ecc.bytes = 4;
+ rnand->ecc_ctrl |= ECC_CTRL_CAP_2B;
+ break;
+ case 4:
+ chip->ecc.bytes = 7;
+ rnand->ecc_ctrl |= ECC_CTRL_CAP_4B;
+ break;
+ case 8:
+ chip->ecc.bytes = 14;
+ rnand->ecc_ctrl |= ECC_CTRL_CAP_8B;
+ break;
+ case 16:
+ chip->ecc.bytes = 28;
+ rnand->ecc_ctrl |= ECC_CTRL_CAP_16B;
+ break;
+ case 24:
+ chip->ecc.bytes = 42;
+ rnand->ecc_ctrl |= ECC_CTRL_CAP_24B;
+ break;
+ case 32:
+ chip->ecc.bytes = 56;
+ rnand->ecc_ctrl |= ECC_CTRL_CAP_32B;
+ break;
+ default:
+ dev_err(rnandc->dev, "Unsupported ECC strength\n");
+ return -EINVAL;
+ }
+
+ rnand->ecc_ctrl |= ECC_CTRL_ERR_THRESHOLD(chip->ecc.strength);
+
+ mtd_set_ooblayout(mtd, &rnandc_ooblayout_ops);
+ chip->ecc.steps = mtd->writesize / chip->ecc.size;
+ chip->ecc.read_page = rnandc_read_page_hw_ecc;
+ chip->ecc.read_subpage = rnandc_read_subpage_hw_ecc;
+ chip->ecc.write_page = rnandc_write_page_hw_ecc;
+ chip->ecc.write_subpage = rnandc_write_subpage_hw_ecc;
+
+ return 0;
+}
+
+static int rnandc_ecc_init(struct nand_chip *chip)
+{
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&chip->base);
+ struct rnandc *rnandc = to_rnandc(chip->controller);
+ int ret;
+
+ if (ecc->engine_type != NAND_ECC_ENGINE_TYPE_NONE &&
+ (!ecc->size || !ecc->strength)) {
+ if (requirements->step_size && requirements->strength) {
+ ecc->size = requirements->step_size;
+ ecc->strength = requirements->strength;
+ } else {
+ dev_err(rnandc->dev, "No minimum ECC strength\n");
+ return -EINVAL;
+ }
+ }
+
+ switch (ecc->engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ ret = rnandc_hw_ecc_controller_init(chip);
+ if (ret)
+ return ret;
+ break;
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rnandc_attach_chip(struct nand_chip *chip)
+{
+ struct rnand_chip *rnand = to_rnand(chip);
+ struct rnandc *rnandc = to_rnandc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg = nanddev_get_memorg(&chip->base);
+ int ret;
+
+ /* Do not store BBT bits in the OOB section as it is not protected */
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ if (mtd->writesize <= 512) {
+ dev_err(rnandc->dev, "Small page devices not supported\n");
+ return -EINVAL;
+ }
+
+ rnand->control |= CONTROL_CHECK_RB_LINE | CONTROL_INT_EN;
+
+ switch (memorg->pages_per_eraseblock) {
+ case 32:
+ rnand->control |= CONTROL_BLOCK_SIZE_32P;
+ break;
+ case 64:
+ rnand->control |= CONTROL_BLOCK_SIZE_64P;
+ break;
+ case 128:
+ rnand->control |= CONTROL_BLOCK_SIZE_128P;
+ break;
+ case 256:
+ rnand->control |= CONTROL_BLOCK_SIZE_256P;
+ break;
+ default:
+ dev_err(rnandc->dev, "Unsupported memory organization\n");
+ return -EINVAL;
+ }
+
+ chip->options |= NAND_SUBPAGE_READ;
+
+ ret = rnandc_ecc_init(chip);
+ if (ret) {
+ dev_err(rnandc->dev, "ECC initialization failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* Force an update of the configuration registers */
+ rnand->selected_die = -1;
+
+ return 0;
+}
+
+static const struct nand_controller_ops rnandc_ops = {
+ .attach_chip = rnandc_attach_chip,
+ .exec_op = rnandc_exec_op,
+ .setup_interface = rnandc_setup_interface,
+};
+
+static int rnandc_alloc_dma_buf(struct rnandc *rnandc,
+ struct mtd_info *new_mtd)
+{
+ unsigned int max_len = new_mtd->writesize + new_mtd->oobsize;
+ struct rnand_chip *entry, *temp;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+
+ list_for_each_entry_safe(entry, temp, &rnandc->chips, node) {
+ chip = &entry->chip;
+ mtd = nand_to_mtd(chip);
+ max_len = max(max_len, mtd->writesize + mtd->oobsize);
+ }
+
+ if (rnandc->buf && rnandc->buf_sz < max_len) {
+ devm_kfree(rnandc->dev, rnandc->buf);
+ rnandc->buf = NULL;
+ }
+
+ if (!rnandc->buf) {
+ rnandc->buf_sz = max_len;
+ rnandc->buf = devm_kmalloc(rnandc->dev, max_len,
+ GFP_KERNEL | GFP_DMA);
+ if (!rnandc->buf)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int rnandc_chip_init(struct rnandc *rnandc, struct device_node *np)
+{
+ struct rnand_chip *rnand;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ int nsels, ret, i;
+ u32 cs;
+
+ nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
+ if (nsels <= 0) {
+ ret = (nsels < 0) ? nsels : -EINVAL;
+ dev_err(rnandc->dev, "Invalid reg property (%d)\n", ret);
+ return ret;
+ }
+
+ /* Alloc the driver's NAND chip structure */
+ rnand = devm_kzalloc(rnandc->dev, struct_size(rnand, sels, nsels),
+ GFP_KERNEL);
+ if (!rnand)
+ return -ENOMEM;
+
+ rnand->nsels = nsels;
+ rnand->selected_die = -1;
+
+ for (i = 0; i < nsels; i++) {
+ ret = of_property_read_u32_index(np, "reg", i, &cs);
+ if (ret) {
+ dev_err(rnandc->dev, "Incomplete reg property (%d)\n", ret);
+ return ret;
+ }
+
+ if (cs >= RNANDC_CS_NUM) {
+ dev_err(rnandc->dev, "Invalid reg property (%d)\n", cs);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(cs, &rnandc->assigned_cs)) {
+ dev_err(rnandc->dev, "CS %d already assigned\n", cs);
+ return -EINVAL;
+ }
+
+ /*
+ * No need to check for RB or WP properties, there is a 1:1
+ * mandatory mapping with the CS.
+ */
+ rnand->sels[i].cs = cs;
+ }
+
+ chip = &rnand->chip;
+ chip->controller = &rnandc->controller;
+ nand_set_flash_node(chip, np);
+
+ mtd = nand_to_mtd(chip);
+ mtd->dev.parent = rnandc->dev;
+ if (!mtd->name) {
+ dev_err(rnandc->dev, "Missing MTD label\n");
+ return -EINVAL;
+ }
+
+ ret = nand_scan(chip, rnand->nsels);
+ if (ret) {
+ dev_err(rnandc->dev, "Failed to scan the NAND chip (%d)\n", ret);
+ return ret;
+ }
+
+ ret = rnandc_alloc_dma_buf(rnandc, mtd);
+ if (ret)
+ goto cleanup_nand;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(rnandc->dev, "Failed to register MTD device (%d)\n", ret);
+ goto cleanup_nand;
+ }
+
+ list_add_tail(&rnand->node, &rnandc->chips);
+
+ return 0;
+
+cleanup_nand:
+ nand_cleanup(chip);
+
+ return ret;
+}
+
+static void rnandc_chips_cleanup(struct rnandc *rnandc)
+{
+ struct rnand_chip *entry, *temp;
+ struct nand_chip *chip;
+ int ret;
+
+ list_for_each_entry_safe(entry, temp, &rnandc->chips, node) {
+ chip = &entry->chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ list_del(&entry->node);
+ }
+}
+
+static int rnandc_chips_init(struct rnandc *rnandc)
+{
+ struct device_node *np;
+ int ret;
+
+ for_each_child_of_node(rnandc->dev->of_node, np) {
+ ret = rnandc_chip_init(rnandc, np);
+ if (ret) {
+ of_node_put(np);
+ goto cleanup_chips;
+ }
+ }
+
+ return 0;
+
+cleanup_chips:
+ rnandc_chips_cleanup(rnandc);
+
+ return ret;
+}
+
+static int rnandc_probe(struct platform_device *pdev)
+{
+ struct rnandc *rnandc;
+ int irq, ret;
+
+ rnandc = devm_kzalloc(&pdev->dev, sizeof(*rnandc), GFP_KERNEL);
+ if (!rnandc)
+ return -ENOMEM;
+
+ rnandc->dev = &pdev->dev;
+ nand_controller_init(&rnandc->controller);
+ rnandc->controller.ops = &rnandc_ops;
+ INIT_LIST_HEAD(&rnandc->chips);
+ init_completion(&rnandc->complete);
+
+ rnandc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rnandc->regs))
+ return PTR_ERR(rnandc->regs);
+
+ /* APB clock */
+ rnandc->hclk = devm_clk_get(&pdev->dev, "hclk");
+ if (IS_ERR(rnandc->hclk))
+ return PTR_ERR(rnandc->hclk);
+
+ /* External NAND bus clock */
+ rnandc->eclk = devm_clk_get(&pdev->dev, "eclk");
+ if (IS_ERR(rnandc->eclk))
+ return PTR_ERR(rnandc->eclk);
+
+ ret = clk_prepare_enable(rnandc->hclk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(rnandc->eclk);
+ if (ret)
+ goto disable_hclk;
+
+ rnandc_dis_interrupts(rnandc);
+ irq = platform_get_irq_optional(pdev, 0);
+ if (irq == -EPROBE_DEFER) {
+ ret = irq;
+ goto disable_eclk;
+ } else if (irq < 0) {
+ dev_info(&pdev->dev, "No IRQ found, fallback to polling\n");
+ rnandc->use_polling = true;
+ } else {
+ ret = devm_request_irq(&pdev->dev, irq, rnandc_irq_handler, 0,
+ "renesas-nand-controller", rnandc);
+ if (ret < 0)
+ goto disable_eclk;
+ }
+
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto disable_eclk;
+
+ rnandc_clear_fifo(rnandc);
+
+ platform_set_drvdata(pdev, rnandc);
+
+ ret = rnandc_chips_init(rnandc);
+ if (ret)
+ goto disable_eclk;
+
+ return 0;
+
+disable_eclk:
+ clk_disable_unprepare(rnandc->eclk);
+disable_hclk:
+ clk_disable_unprepare(rnandc->hclk);
+
+ return ret;
+}
+
+static int rnandc_remove(struct platform_device *pdev)
+{
+ struct rnandc *rnandc = platform_get_drvdata(pdev);
+
+ rnandc_chips_cleanup(rnandc);
+
+ clk_disable_unprepare(rnandc->eclk);
+ clk_disable_unprepare(rnandc->hclk);
+
+ return 0;
+}
+
+static const struct of_device_id rnandc_id_table[] = {
+ { .compatible = "renesas,rcar-gen3-nandc" },
+ { .compatible = "renesas,rzn1-nandc" },
+ {} /* sentinel */
+};
+MODULE_DEVICE_TABLE(of, rnandc_id_table);
+
+static struct platform_driver rnandc_driver = {
+ .driver = {
+ .name = "renesas-nandc",
+ .of_match_table = rnandc_id_table,
+ },
+ .probe = rnandc_probe,
+ .remove = rnandc_remove,
+};
+module_platform_driver(rnandc_driver);
+
+MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
+MODULE_DESCRIPTION("Renesas R-Car Gen3 & RZ/N1 NAND controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c
index b5405bc7ca3a..cbaa4f1c83da 100644
--- a/drivers/mtd/nand/raw/rockchip-nand-controller.c
+++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c
@@ -1403,7 +1403,6 @@ static int rk_nfc_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(dev, "no NFC irq resource\n");
ret = -EINVAL;
goto clk_disable;
}
diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c
index 13df4bdf792a..b85b9c6fcc42 100644
--- a/drivers/mtd/nand/raw/sh_flctl.c
+++ b/drivers/mtd/nand/raw/sh_flctl.c
@@ -1220,7 +1220,7 @@ static struct platform_driver flctl_driver = {
.remove = flctl_remove,
.driver = {
.name = "sh_flctl",
- .of_match_table = of_match_ptr(of_flctl_match),
+ .of_match_table = of_flctl_match,
},
};
diff --git a/drivers/mtd/nand/raw/sharpsl.c b/drivers/mtd/nand/raw/sharpsl.c
index 5612ee628425..52ce5162538a 100644
--- a/drivers/mtd/nand/raw/sharpsl.c
+++ b/drivers/mtd/nand/raw/sharpsl.c
@@ -6,7 +6,6 @@
* Based on Sharp's NAND driver sharp_sl.c
*/
-#include <linux/genhd.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/delay.h>
diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
index 97b4e02e43e4..87c1c7dd97eb 100644
--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
@@ -9,6 +9,7 @@
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/mfd/syscon.h>
@@ -231,6 +232,7 @@ struct stm32_fmc2_timings {
struct stm32_fmc2_nand {
struct nand_chip chip;
+ struct gpio_desc *wp_gpio;
struct stm32_fmc2_timings timings;
int ncs;
int cs_used[FMC2_MAX_CE];
@@ -1747,6 +1749,18 @@ static const struct nand_controller_ops stm32_fmc2_nfc_controller_ops = {
.setup_interface = stm32_fmc2_nfc_setup_interface,
};
+static void stm32_fmc2_nfc_wp_enable(struct stm32_fmc2_nand *nand)
+{
+ if (nand->wp_gpio)
+ gpiod_set_value(nand->wp_gpio, 1);
+}
+
+static void stm32_fmc2_nfc_wp_disable(struct stm32_fmc2_nand *nand)
+{
+ if (nand->wp_gpio)
+ gpiod_set_value(nand->wp_gpio, 0);
+}
+
static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc,
struct device_node *dn)
{
@@ -1785,6 +1799,18 @@ static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc,
nand->cs_used[i] = cs;
}
+ nand->wp_gpio = devm_gpiod_get_from_of_node(nfc->dev, dn,
+ "wp-gpios", 0,
+ GPIOD_OUT_HIGH, "wp");
+ if (IS_ERR(nand->wp_gpio)) {
+ ret = PTR_ERR(nand->wp_gpio);
+ if (ret != -ENOENT)
+ return dev_err_probe(nfc->dev, ret,
+ "failed to request WP GPIO\n");
+
+ nand->wp_gpio = NULL;
+ }
+
nand_set_flash_node(&nand->chip, dn);
return 0;
@@ -1956,10 +1982,12 @@ static int stm32_fmc2_nfc_probe(struct platform_device *pdev)
chip->options |= NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE |
NAND_USES_DMA;
+ stm32_fmc2_nfc_wp_disable(nand);
+
/* Scan to find existence of the device */
ret = nand_scan(chip, nand->ncs);
if (ret)
- goto err_release_dma;
+ goto err_wp_enable;
ret = mtd_device_register(mtd, NULL, 0);
if (ret)
@@ -1972,6 +2000,9 @@ static int stm32_fmc2_nfc_probe(struct platform_device *pdev)
err_nand_cleanup:
nand_cleanup(chip);
+err_wp_enable:
+ stm32_fmc2_nfc_wp_enable(nand);
+
err_release_dma:
if (nfc->dma_ecc_ch)
dma_release_channel(nfc->dma_ecc_ch);
@@ -2012,15 +2043,20 @@ static int stm32_fmc2_nfc_remove(struct platform_device *pdev)
clk_disable_unprepare(nfc->clk);
+ stm32_fmc2_nfc_wp_enable(nand);
+
return 0;
}
static int __maybe_unused stm32_fmc2_nfc_suspend(struct device *dev)
{
struct stm32_fmc2_nfc *nfc = dev_get_drvdata(dev);
+ struct stm32_fmc2_nand *nand = &nfc->nand;
clk_disable_unprepare(nfc->clk);
+ stm32_fmc2_nfc_wp_enable(nand);
+
pinctrl_pm_select_sleep_state(dev);
return 0;
@@ -2042,6 +2078,8 @@ static int __maybe_unused stm32_fmc2_nfc_resume(struct device *dev)
stm32_fmc2_nfc_init(nfc);
+ stm32_fmc2_nfc_wp_disable(nand);
+
for (chip_cs = 0; chip_cs < FMC2_MAX_CE; chip_cs++) {
if (!(nfc->cs_assigned & BIT(chip_cs)))
continue;
diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c
index 32431bbe69b8..b36e5260ae27 100644
--- a/drivers/mtd/nand/raw/tegra_nand.c
+++ b/drivers/mtd/nand/raw/tegra_nand.c
@@ -17,8 +17,11 @@
#include <linux/mtd/rawnand.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
+#include <soc/tegra/common.h>
+
#define COMMAND 0x00
#define COMMAND_GO BIT(31)
#define COMMAND_CLE BIT(30)
@@ -1151,6 +1154,7 @@ static int tegra_nand_probe(struct platform_device *pdev)
return -ENOMEM;
ctrl->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ctrl);
nand_controller_init(&ctrl->controller);
ctrl->controller.ops = &tegra_nand_controller_ops;
@@ -1166,14 +1170,23 @@ static int tegra_nand_probe(struct platform_device *pdev)
if (IS_ERR(ctrl->clk))
return PTR_ERR(ctrl->clk);
- err = clk_prepare_enable(ctrl->clk);
+ err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
+ if (err)
+ return err;
+
+ /*
+ * This driver doesn't support active power management yet,
+ * so we will simply keep device resumed.
+ */
+ pm_runtime_enable(&pdev->dev);
+ err = pm_runtime_resume_and_get(&pdev->dev);
if (err)
return err;
err = reset_control_reset(rst);
if (err) {
dev_err(ctrl->dev, "Failed to reset HW: %d\n", err);
- goto err_disable_clk;
+ goto err_put_pm;
}
writel_relaxed(HWSTATUS_CMD_DEFAULT, ctrl->regs + HWSTATUS_CMD);
@@ -1188,21 +1201,20 @@ static int tegra_nand_probe(struct platform_device *pdev)
dev_name(&pdev->dev), ctrl);
if (err) {
dev_err(ctrl->dev, "Failed to get IRQ: %d\n", err);
- goto err_disable_clk;
+ goto err_put_pm;
}
writel_relaxed(DMA_MST_CTRL_IS_DONE, ctrl->regs + DMA_MST_CTRL);
err = tegra_nand_chips_init(ctrl->dev, ctrl);
if (err)
- goto err_disable_clk;
-
- platform_set_drvdata(pdev, ctrl);
+ goto err_put_pm;
return 0;
-err_disable_clk:
- clk_disable_unprepare(ctrl->clk);
+err_put_pm:
+ pm_runtime_put_sync_suspend(ctrl->dev);
+ pm_runtime_force_suspend(ctrl->dev);
return err;
}
@@ -1219,11 +1231,40 @@ static int tegra_nand_remove(struct platform_device *pdev)
nand_cleanup(chip);
+ pm_runtime_put_sync_suspend(ctrl->dev);
+ pm_runtime_force_suspend(ctrl->dev);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_nand_runtime_resume(struct device *dev)
+{
+ struct tegra_nand_controller *ctrl = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(ctrl->clk);
+ if (err) {
+ dev_err(dev, "Failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused tegra_nand_runtime_suspend(struct device *dev)
+{
+ struct tegra_nand_controller *ctrl = dev_get_drvdata(dev);
+
clk_disable_unprepare(ctrl->clk);
return 0;
}
+static const struct dev_pm_ops tegra_nand_pm = {
+ SET_RUNTIME_PM_OPS(tegra_nand_runtime_suspend, tegra_nand_runtime_resume,
+ NULL)
+};
+
static const struct of_device_id tegra_nand_of_match[] = {
{ .compatible = "nvidia,tegra20-nand" },
{ /* sentinel */ }
@@ -1234,6 +1275,7 @@ static struct platform_driver tegra_nand_driver = {
.driver = {
.name = "tegra-nand",
.of_match_table = tegra_nand_of_match,
+ .pm = &tegra_nand_pm,
},
.probe = tegra_nand_probe,
.remove = tegra_nand_remove,
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 2c8685f1f2fa..ff8336870bc0 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -381,7 +381,10 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
}
}
- rdesc = spinand->dirmaps[req->pos.plane].rdesc;
+ if (req->mode == MTD_OPS_RAW)
+ rdesc = spinand->dirmaps[req->pos.plane].rdesc;
+ else
+ rdesc = spinand->dirmaps[req->pos.plane].rdesc_ecc;
while (nbytes) {
ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
@@ -452,7 +455,10 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
req->ooblen);
}
- wdesc = spinand->dirmaps[req->pos.plane].wdesc;
+ if (req->mode == MTD_OPS_RAW)
+ wdesc = spinand->dirmaps[req->pos.plane].wdesc;
+ else
+ wdesc = spinand->dirmaps[req->pos.plane].wdesc_ecc;
while (nbytes) {
ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
@@ -865,6 +871,31 @@ static int spinand_create_dirmap(struct spinand_device *spinand,
spinand->dirmaps[plane].rdesc = desc;
+ if (nand->ecc.engine->integration != NAND_ECC_ENGINE_INTEGRATION_PIPELINED) {
+ spinand->dirmaps[plane].wdesc_ecc = spinand->dirmaps[plane].wdesc;
+ spinand->dirmaps[plane].rdesc_ecc = spinand->dirmaps[plane].rdesc;
+
+ return 0;
+ }
+
+ info.op_tmpl = *spinand->op_templates.update_cache;
+ info.op_tmpl.data.ecc = true;
+ desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
+ spinand->spimem, &info);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ spinand->dirmaps[plane].wdesc_ecc = desc;
+
+ info.op_tmpl = *spinand->op_templates.read_cache;
+ info.op_tmpl.data.ecc = true;
+ desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
+ spinand->spimem, &info);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ spinand->dirmaps[plane].rdesc_ecc = desc;
+
return 0;
}
@@ -1208,14 +1239,6 @@ static int spinand_init(struct spinand_device *spinand)
if (ret)
goto err_free_bufs;
- ret = spinand_create_dirmaps(spinand);
- if (ret) {
- dev_err(dev,
- "Failed to create direct mappings for read/write operations (err = %d)\n",
- ret);
- goto err_manuf_cleanup;
- }
-
ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
if (ret)
goto err_manuf_cleanup;
@@ -1250,6 +1273,14 @@ static int spinand_init(struct spinand_device *spinand)
mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength;
mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size;
+ ret = spinand_create_dirmaps(spinand);
+ if (ret) {
+ dev_err(dev,
+ "Failed to create direct mappings for read/write operations (err = %d)\n",
+ ret);
+ goto err_cleanup_ecc_engine;
+ }
+
return 0;
err_cleanup_ecc_engine:
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
index 3f31f1381a62..dce835132a1e 100644
--- a/drivers/mtd/nand/spi/macronix.c
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -20,7 +20,7 @@ static SPINAND_OP_VARIANTS(read_cache_variants,
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
diff --git a/drivers/mtd/parsers/Kconfig b/drivers/mtd/parsers/Kconfig
index 337ea8b9a4c3..23763d16e4f9 100644
--- a/drivers/mtd/parsers/Kconfig
+++ b/drivers/mtd/parsers/Kconfig
@@ -115,7 +115,7 @@ config MTD_AFS_PARTS
config MTD_PARSER_TRX
tristate "Parser for TRX format partitions"
- depends on MTD && (BCM47XX || ARCH_BCM_5301X || ARCH_MEDIATEK || COMPILE_TEST)
+ depends on MTD && (BCM47XX || ARCH_BCM_5301X || ARCH_MEDIATEK || RALINK || COMPILE_TEST)
help
TRX is a firmware format used by Broadcom on their devices. It
may contain up to 3/4 partitions (depending on the version).
diff --git a/drivers/mtd/parsers/qcomsmempart.c b/drivers/mtd/parsers/qcomsmempart.c
index 06a818cd2433..4311b89d8df0 100644
--- a/drivers/mtd/parsers/qcomsmempart.c
+++ b/drivers/mtd/parsers/qcomsmempart.c
@@ -58,11 +58,11 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
const struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
+ size_t len = SMEM_FLASH_PTABLE_HDR_LEN;
+ int ret, i, j, tmpparts, numparts = 0;
struct smem_flash_pentry *pentry;
struct smem_flash_ptable *ptable;
- size_t len = SMEM_FLASH_PTABLE_HDR_LEN;
struct mtd_partition *parts;
- int ret, i, numparts;
char *name, *c;
if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS)
@@ -75,7 +75,8 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
pr_debug("Parsing partition table info from SMEM\n");
ptable = qcom_smem_get(SMEM_APPS, SMEM_AARM_PARTITION_TABLE, &len);
if (IS_ERR(ptable)) {
- pr_err("Error reading partition table header\n");
+ if (PTR_ERR(ptable) != -EPROBE_DEFER)
+ pr_err("Error reading partition table header\n");
return PTR_ERR(ptable);
}
@@ -87,8 +88,8 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
}
/* Ensure that # of partitions is less than the max we have allocated */
- numparts = le32_to_cpu(ptable->numparts);
- if (numparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) {
+ tmpparts = le32_to_cpu(ptable->numparts);
+ if (tmpparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) {
pr_err("Partition numbers exceed the max limit\n");
return -EINVAL;
}
@@ -116,11 +117,17 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
return PTR_ERR(ptable);
}
+ for (i = 0; i < tmpparts; i++) {
+ pentry = &ptable->pentry[i];
+ if (pentry->name[0] != '\0')
+ numparts++;
+ }
+
parts = kcalloc(numparts, sizeof(*parts), GFP_KERNEL);
if (!parts)
return -ENOMEM;
- for (i = 0; i < numparts; i++) {
+ for (i = 0, j = 0; i < tmpparts; i++) {
pentry = &ptable->pentry[i];
if (pentry->name[0] == '\0')
continue;
@@ -135,24 +142,25 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
for (c = name; *c != '\0'; c++)
*c = tolower(*c);
- parts[i].name = name;
- parts[i].offset = le32_to_cpu(pentry->offset) * mtd->erasesize;
- parts[i].mask_flags = pentry->attr;
- parts[i].size = le32_to_cpu(pentry->length) * mtd->erasesize;
+ parts[j].name = name;
+ parts[j].offset = le32_to_cpu(pentry->offset) * mtd->erasesize;
+ parts[j].mask_flags = pentry->attr;
+ parts[j].size = le32_to_cpu(pentry->length) * mtd->erasesize;
pr_debug("%d: %s offs=0x%08x size=0x%08x attr:0x%08x\n",
i, pentry->name, le32_to_cpu(pentry->offset),
le32_to_cpu(pentry->length), pentry->attr);
+ j++;
}
pr_debug("SMEM partition table found: ver: %d len: %d\n",
- le32_to_cpu(ptable->version), numparts);
+ le32_to_cpu(ptable->version), tmpparts);
*pparts = parts;
return numparts;
out_free_parts:
- while (--i >= 0)
- kfree(parts[i].name);
+ while (--j >= 0)
+ kfree(parts[j].name);
kfree(parts);
*pparts = NULL;
@@ -166,6 +174,8 @@ static void parse_qcomsmem_cleanup(const struct mtd_partition *pparts,
for (i = 0; i < nr_parts; i++)
kfree(pparts[i].name);
+
+ kfree(pparts);
}
static const struct of_device_id qcomsmem_of_match_table[] = {
diff --git a/drivers/mtd/spi-nor/atmel.c b/drivers/mtd/spi-nor/atmel.c
index 1fea5cab492c..656dd80a0be7 100644
--- a/drivers/mtd/spi-nor/atmel.c
+++ b/drivers/mtd/spi-nor/atmel.c
@@ -16,12 +16,12 @@
* is to unlock the whole flash array on startup. Therefore, we have to support
* exactly this operation.
*/
-static int atmel_at25fs_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int at25fs_nor_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
return -EOPNOTSUPP;
}
-static int atmel_at25fs_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int at25fs_nor_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
int ret;
@@ -37,28 +37,28 @@ static int atmel_at25fs_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
return ret;
}
-static int atmel_at25fs_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int at25fs_nor_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
return -EOPNOTSUPP;
}
-static const struct spi_nor_locking_ops atmel_at25fs_locking_ops = {
- .lock = atmel_at25fs_lock,
- .unlock = atmel_at25fs_unlock,
- .is_locked = atmel_at25fs_is_locked,
+static const struct spi_nor_locking_ops at25fs_nor_locking_ops = {
+ .lock = at25fs_nor_lock,
+ .unlock = at25fs_nor_unlock,
+ .is_locked = at25fs_nor_is_locked,
};
-static void atmel_at25fs_default_init(struct spi_nor *nor)
+static void at25fs_nor_late_init(struct spi_nor *nor)
{
- nor->params->locking_ops = &atmel_at25fs_locking_ops;
+ nor->params->locking_ops = &at25fs_nor_locking_ops;
}
-static const struct spi_nor_fixups atmel_at25fs_fixups = {
- .default_init = atmel_at25fs_default_init,
+static const struct spi_nor_fixups at25fs_nor_fixups = {
+ .late_init = at25fs_nor_late_init,
};
/**
- * atmel_set_global_protection - Do a Global Protect or Unprotect command
+ * atmel_nor_set_global_protection - Do a Global Protect or Unprotect command
* @nor: pointer to 'struct spi_nor'
* @ofs: offset in bytes
* @len: len in bytes
@@ -66,8 +66,8 @@ static const struct spi_nor_fixups atmel_at25fs_fixups = {
*
* Return: 0 on success, -error otherwise.
*/
-static int atmel_set_global_protection(struct spi_nor *nor, loff_t ofs,
- uint64_t len, bool is_protect)
+static int atmel_nor_set_global_protection(struct spi_nor *nor, loff_t ofs,
+ uint64_t len, bool is_protect)
{
int ret;
u8 sr;
@@ -116,17 +116,20 @@ static int atmel_set_global_protection(struct spi_nor *nor, loff_t ofs,
return spi_nor_write_sr(nor, nor->bouncebuf, 1);
}
-static int atmel_global_protect(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int atmel_nor_global_protect(struct spi_nor *nor, loff_t ofs,
+ uint64_t len)
{
- return atmel_set_global_protection(nor, ofs, len, true);
+ return atmel_nor_set_global_protection(nor, ofs, len, true);
}
-static int atmel_global_unprotect(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int atmel_nor_global_unprotect(struct spi_nor *nor, loff_t ofs,
+ uint64_t len)
{
- return atmel_set_global_protection(nor, ofs, len, false);
+ return atmel_nor_set_global_protection(nor, ofs, len, false);
}
-static int atmel_is_global_protected(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int atmel_nor_is_global_protected(struct spi_nor *nor, loff_t ofs,
+ uint64_t len)
{
int ret;
@@ -140,60 +143,69 @@ static int atmel_is_global_protected(struct spi_nor *nor, loff_t ofs, uint64_t l
return ((nor->bouncebuf[0] & ATMEL_SR_GLOBAL_PROTECT_MASK) == ATMEL_SR_GLOBAL_PROTECT_MASK);
}
-static const struct spi_nor_locking_ops atmel_global_protection_ops = {
- .lock = atmel_global_protect,
- .unlock = atmel_global_unprotect,
- .is_locked = atmel_is_global_protected,
+static const struct spi_nor_locking_ops atmel_nor_global_protection_ops = {
+ .lock = atmel_nor_global_protect,
+ .unlock = atmel_nor_global_unprotect,
+ .is_locked = atmel_nor_is_global_protected,
};
-static void atmel_global_protection_default_init(struct spi_nor *nor)
+static void atmel_nor_global_protection_late_init(struct spi_nor *nor)
{
- nor->params->locking_ops = &atmel_global_protection_ops;
+ nor->params->locking_ops = &atmel_nor_global_protection_ops;
}
-static const struct spi_nor_fixups atmel_global_protection_fixups = {
- .default_init = atmel_global_protection_default_init,
+static const struct spi_nor_fixups atmel_nor_global_protection_fixups = {
+ .late_init = atmel_nor_global_protection_late_init,
};
-static const struct flash_info atmel_parts[] = {
+static const struct flash_info atmel_nor_parts[] = {
/* Atmel -- some are (confusingly) marketed as "DataFlash" */
- { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K | SPI_NOR_HAS_LOCK)
- .fixups = &atmel_at25fs_fixups },
- { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_HAS_LOCK)
- .fixups = &atmel_at25fs_fixups },
-
- { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8,
- SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- .fixups = &atmel_global_protection_fixups },
- { "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- .fixups = &atmel_global_protection_fixups },
- { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- .fixups = &atmel_global_protection_fixups },
- { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- .fixups = &atmel_global_protection_fixups },
-
- { "at25sl321", INFO(0x1f4216, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-
- { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
- { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16,
- SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- .fixups = &atmel_global_protection_fixups },
- { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- .fixups = &atmel_global_protection_fixups },
- { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- .fixups = &atmel_global_protection_fixups },
-
- { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
+ { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4)
+ FLAGS(SPI_NOR_HAS_LOCK)
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &at25fs_nor_fixups },
+ { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8)
+ FLAGS(SPI_NOR_HAS_LOCK)
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &at25fs_nor_fixups },
+ { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &atmel_nor_global_protection_fixups },
+ { "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &atmel_nor_global_protection_fixups },
+ { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &atmel_nor_global_protection_fixups },
+ { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &atmel_nor_global_protection_fixups },
+ { "at25sl321", INFO(0x1f4216, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &atmel_nor_global_protection_fixups },
+ { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &atmel_nor_global_protection_fixups },
+ { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &atmel_nor_global_protection_fixups },
+ { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16)
+ NO_SFDP_FLAGS(SECT_4K) },
};
const struct spi_nor_manufacturer spi_nor_atmel = {
.name = "atmel",
- .parts = atmel_parts,
- .nparts = ARRAY_SIZE(atmel_parts),
+ .parts = atmel_nor_parts,
+ .nparts = ARRAY_SIZE(atmel_nor_parts),
};
diff --git a/drivers/mtd/spi-nor/catalyst.c b/drivers/mtd/spi-nor/catalyst.c
index 011b83e99e95..6d310815fb12 100644
--- a/drivers/mtd/spi-nor/catalyst.c
+++ b/drivers/mtd/spi-nor/catalyst.c
@@ -8,22 +8,17 @@
#include "core.h"
-static const struct flash_info catalyst_parts[] = {
+static const struct flash_info catalyst_nor_parts[] = {
/* Catalyst / On Semiconductor -- non-JEDEC */
- { "cat25c11", CAT25_INFO(16, 8, 16, 1,
- SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "cat25c03", CAT25_INFO(32, 8, 16, 2,
- SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "cat25c09", CAT25_INFO(128, 8, 32, 2,
- SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "cat25c17", CAT25_INFO(256, 8, 32, 2,
- SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "cat25128", CAT25_INFO(2048, 8, 64, 2,
- SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "cat25c11", CAT25_INFO(16, 8, 16, 1) },
+ { "cat25c03", CAT25_INFO(32, 8, 16, 2) },
+ { "cat25c09", CAT25_INFO(128, 8, 32, 2) },
+ { "cat25c17", CAT25_INFO(256, 8, 32, 2) },
+ { "cat25128", CAT25_INFO(2048, 8, 64, 2) },
};
const struct spi_nor_manufacturer spi_nor_catalyst = {
.name = "catalyst",
- .parts = catalyst_parts,
- .nparts = ARRAY_SIZE(catalyst_parts),
+ .parts = catalyst_nor_parts,
+ .nparts = ARRAY_SIZE(catalyst_nor_parts),
};
diff --git a/drivers/mtd/spi-nor/controllers/Kconfig b/drivers/mtd/spi-nor/controllers/Kconfig
index 5c0e0ec2e6d1..50f4f3484d42 100644
--- a/drivers/mtd/spi-nor/controllers/Kconfig
+++ b/drivers/mtd/spi-nor/controllers/Kconfig
@@ -26,39 +26,3 @@ config SPI_NXP_SPIFI
SPIFI is a specialized controller for connecting serial SPI
Flash. Enable this option if you have a device with a SPIFI
controller and want to access the Flash as a mtd device.
-
-config SPI_INTEL_SPI
- tristate
-
-config SPI_INTEL_SPI_PCI
- tristate "Intel PCH/PCU SPI flash PCI driver (DANGEROUS)"
- depends on X86 && PCI
- select SPI_INTEL_SPI
- help
- This enables PCI support for the Intel PCH/PCU SPI controller in
- master mode. This controller is present in modern Intel hardware
- and is used to hold BIOS and other persistent settings. Using
- this driver it is possible to upgrade BIOS directly from Linux.
-
- Say N here unless you know what you are doing. Overwriting the
- SPI flash may render the system unbootable.
-
- To compile this driver as a module, choose M here: the module
- will be called intel-spi-pci.
-
-config SPI_INTEL_SPI_PLATFORM
- tristate "Intel PCH/PCU SPI flash platform driver (DANGEROUS)"
- depends on X86
- select SPI_INTEL_SPI
- help
- This enables platform support for the Intel PCH/PCU SPI
- controller in master mode. This controller is present in modern
- Intel hardware and is used to hold BIOS and other persistent
- settings. Using this driver it is possible to upgrade BIOS
- directly from Linux.
-
- Say N here unless you know what you are doing. Overwriting the
- SPI flash may render the system unbootable.
-
- To compile this driver as a module, choose M here: the module
- will be called intel-spi-platform.
diff --git a/drivers/mtd/spi-nor/controllers/Makefile b/drivers/mtd/spi-nor/controllers/Makefile
index e7abba491d98..6e2a1dc68466 100644
--- a/drivers/mtd/spi-nor/controllers/Makefile
+++ b/drivers/mtd/spi-nor/controllers/Makefile
@@ -2,6 +2,3 @@
obj-$(CONFIG_SPI_ASPEED_SMC) += aspeed-smc.o
obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o
obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o
-obj-$(CONFIG_SPI_INTEL_SPI) += intel-spi.o
-obj-$(CONFIG_SPI_INTEL_SPI_PCI) += intel-spi-pci.o
-obj-$(CONFIG_SPI_INTEL_SPI_PLATFORM) += intel-spi-platform.o
diff --git a/drivers/mtd/spi-nor/controllers/aspeed-smc.c b/drivers/mtd/spi-nor/controllers/aspeed-smc.c
index 7225870e8b18..acfe010f9dd7 100644
--- a/drivers/mtd/spi-nor/controllers/aspeed-smc.c
+++ b/drivers/mtd/spi-nor/controllers/aspeed-smc.c
@@ -769,6 +769,7 @@ static int aspeed_smc_setup_flash(struct aspeed_smc_controller *controller,
struct device_node *child;
unsigned int cs;
int ret = -ENODEV;
+ bool found_one = false;
for_each_available_child_of_node(np, child) {
struct aspeed_smc_chip *chip;
@@ -827,8 +828,17 @@ static int aspeed_smc_setup_flash(struct aspeed_smc_controller *controller,
* by of property.
*/
ret = spi_nor_scan(nor, NULL, &hwcaps);
- if (ret)
- break;
+ /*
+ * If we fail to scan the device it might not be present or
+ * broken. Don't fail the whole controller if others work.
+ */
+ if (ret) {
+ if (found_one)
+ ret = 0;
+
+ devm_kfree(controller->dev, chip);
+ continue;
+ }
ret = aspeed_smc_chip_setup_finish(chip);
if (ret)
@@ -839,6 +849,7 @@ static int aspeed_smc_setup_flash(struct aspeed_smc_controller *controller,
break;
controller->chips[cs] = chip;
+ found_one = true;
}
if (ret) {
diff --git a/drivers/mtd/spi-nor/controllers/intel-spi-pci.c b/drivers/mtd/spi-nor/controllers/intel-spi-pci.c
deleted file mode 100644
index 1bc53b8bb88a..000000000000
--- a/drivers/mtd/spi-nor/controllers/intel-spi-pci.c
+++ /dev/null
@@ -1,99 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel PCH/PCU SPI flash PCI driver.
- *
- * Copyright (C) 2016, Intel Corporation
- * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- */
-
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include "intel-spi.h"
-
-#define BCR 0xdc
-#define BCR_WPD BIT(0)
-
-static const struct intel_spi_boardinfo bxt_info = {
- .type = INTEL_SPI_BXT,
-};
-
-static const struct intel_spi_boardinfo cnl_info = {
- .type = INTEL_SPI_CNL,
-};
-
-static int intel_spi_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
-{
- struct intel_spi_boardinfo *info;
- struct intel_spi *ispi;
- u32 bcr;
- int ret;
-
- ret = pcim_enable_device(pdev);
- if (ret)
- return ret;
-
- info = devm_kmemdup(&pdev->dev, (void *)id->driver_data, sizeof(*info),
- GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- /* Try to make the chip read/write */
- pci_read_config_dword(pdev, BCR, &bcr);
- if (!(bcr & BCR_WPD)) {
- bcr |= BCR_WPD;
- pci_write_config_dword(pdev, BCR, bcr);
- pci_read_config_dword(pdev, BCR, &bcr);
- }
- info->writeable = !!(bcr & BCR_WPD);
-
- ispi = intel_spi_probe(&pdev->dev, &pdev->resource[0], info);
- if (IS_ERR(ispi))
- return PTR_ERR(ispi);
-
- pci_set_drvdata(pdev, ispi);
- return 0;
-}
-
-static void intel_spi_pci_remove(struct pci_dev *pdev)
-{
- intel_spi_remove(pci_get_drvdata(pdev));
-}
-
-static const struct pci_device_id intel_spi_pci_ids[] = {
- { PCI_VDEVICE(INTEL, 0x02a4), (unsigned long)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x06a4), (unsigned long)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x1bca), (unsigned long)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x43a4), (unsigned long)&cnl_info },
- { PCI_VDEVICE(INTEL, 0x4b24), (unsigned long)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x51a4), (unsigned long)&cnl_info },
- { PCI_VDEVICE(INTEL, 0x54a4), (unsigned long)&cnl_info },
- { PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info },
- { PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&bxt_info },
- { PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
- { PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info },
- { PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info },
- { PCI_VDEVICE(INTEL, 0xa3a4), (unsigned long)&bxt_info },
- { },
-};
-MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids);
-
-static struct pci_driver intel_spi_pci_driver = {
- .name = "intel-spi",
- .id_table = intel_spi_pci_ids,
- .probe = intel_spi_pci_probe,
- .remove = intel_spi_pci_remove,
-};
-
-module_pci_driver(intel_spi_pci_driver);
-
-MODULE_DESCRIPTION("Intel PCH/PCU SPI flash PCI driver");
-MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/spi-nor/controllers/intel-spi-platform.c b/drivers/mtd/spi-nor/controllers/intel-spi-platform.c
deleted file mode 100644
index f80f1086f928..000000000000
--- a/drivers/mtd/spi-nor/controllers/intel-spi-platform.c
+++ /dev/null
@@ -1,54 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel PCH/PCU SPI flash platform driver.
- *
- * Copyright (C) 2016, Intel Corporation
- * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- */
-
-#include <linux/ioport.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-#include "intel-spi.h"
-
-static int intel_spi_platform_probe(struct platform_device *pdev)
-{
- struct intel_spi_boardinfo *info;
- struct intel_spi *ispi;
- struct resource *mem;
-
- info = dev_get_platdata(&pdev->dev);
- if (!info)
- return -EINVAL;
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ispi = intel_spi_probe(&pdev->dev, mem, info);
- if (IS_ERR(ispi))
- return PTR_ERR(ispi);
-
- platform_set_drvdata(pdev, ispi);
- return 0;
-}
-
-static int intel_spi_platform_remove(struct platform_device *pdev)
-{
- struct intel_spi *ispi = platform_get_drvdata(pdev);
-
- return intel_spi_remove(ispi);
-}
-
-static struct platform_driver intel_spi_platform_driver = {
- .probe = intel_spi_platform_probe,
- .remove = intel_spi_platform_remove,
- .driver = {
- .name = "intel-spi",
- },
-};
-
-module_platform_driver(intel_spi_platform_driver);
-
-MODULE_DESCRIPTION("Intel PCH/PCU SPI flash platform driver");
-MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:intel-spi");
diff --git a/drivers/mtd/spi-nor/controllers/intel-spi.c b/drivers/mtd/spi-nor/controllers/intel-spi.c
deleted file mode 100644
index a413892ff449..000000000000
--- a/drivers/mtd/spi-nor/controllers/intel-spi.c
+++ /dev/null
@@ -1,968 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel PCH/PCU SPI flash driver.
- *
- * Copyright (C) 2016, Intel Corporation
- * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- */
-
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/iopoll.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/sizes.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/spi-nor.h>
-
-#include "intel-spi.h"
-
-/* Offsets are from @ispi->base */
-#define BFPREG 0x00
-
-#define HSFSTS_CTL 0x04
-#define HSFSTS_CTL_FSMIE BIT(31)
-#define HSFSTS_CTL_FDBC_SHIFT 24
-#define HSFSTS_CTL_FDBC_MASK (0x3f << HSFSTS_CTL_FDBC_SHIFT)
-
-#define HSFSTS_CTL_FCYCLE_SHIFT 17
-#define HSFSTS_CTL_FCYCLE_MASK (0x0f << HSFSTS_CTL_FCYCLE_SHIFT)
-/* HW sequencer opcodes */
-#define HSFSTS_CTL_FCYCLE_READ (0x00 << HSFSTS_CTL_FCYCLE_SHIFT)
-#define HSFSTS_CTL_FCYCLE_WRITE (0x02 << HSFSTS_CTL_FCYCLE_SHIFT)
-#define HSFSTS_CTL_FCYCLE_ERASE (0x03 << HSFSTS_CTL_FCYCLE_SHIFT)
-#define HSFSTS_CTL_FCYCLE_ERASE_64K (0x04 << HSFSTS_CTL_FCYCLE_SHIFT)
-#define HSFSTS_CTL_FCYCLE_RDID (0x06 << HSFSTS_CTL_FCYCLE_SHIFT)
-#define HSFSTS_CTL_FCYCLE_WRSR (0x07 << HSFSTS_CTL_FCYCLE_SHIFT)
-#define HSFSTS_CTL_FCYCLE_RDSR (0x08 << HSFSTS_CTL_FCYCLE_SHIFT)
-
-#define HSFSTS_CTL_FGO BIT(16)
-#define HSFSTS_CTL_FLOCKDN BIT(15)
-#define HSFSTS_CTL_FDV BIT(14)
-#define HSFSTS_CTL_SCIP BIT(5)
-#define HSFSTS_CTL_AEL BIT(2)
-#define HSFSTS_CTL_FCERR BIT(1)
-#define HSFSTS_CTL_FDONE BIT(0)
-
-#define FADDR 0x08
-#define DLOCK 0x0c
-#define FDATA(n) (0x10 + ((n) * 4))
-
-#define FRACC 0x50
-
-#define FREG(n) (0x54 + ((n) * 4))
-#define FREG_BASE_MASK 0x3fff
-#define FREG_LIMIT_SHIFT 16
-#define FREG_LIMIT_MASK (0x03fff << FREG_LIMIT_SHIFT)
-
-/* Offset is from @ispi->pregs */
-#define PR(n) ((n) * 4)
-#define PR_WPE BIT(31)
-#define PR_LIMIT_SHIFT 16
-#define PR_LIMIT_MASK (0x3fff << PR_LIMIT_SHIFT)
-#define PR_RPE BIT(15)
-#define PR_BASE_MASK 0x3fff
-
-/* Offsets are from @ispi->sregs */
-#define SSFSTS_CTL 0x00
-#define SSFSTS_CTL_FSMIE BIT(23)
-#define SSFSTS_CTL_DS BIT(22)
-#define SSFSTS_CTL_DBC_SHIFT 16
-#define SSFSTS_CTL_SPOP BIT(11)
-#define SSFSTS_CTL_ACS BIT(10)
-#define SSFSTS_CTL_SCGO BIT(9)
-#define SSFSTS_CTL_COP_SHIFT 12
-#define SSFSTS_CTL_FRS BIT(7)
-#define SSFSTS_CTL_DOFRS BIT(6)
-#define SSFSTS_CTL_AEL BIT(4)
-#define SSFSTS_CTL_FCERR BIT(3)
-#define SSFSTS_CTL_FDONE BIT(2)
-#define SSFSTS_CTL_SCIP BIT(0)
-
-#define PREOP_OPTYPE 0x04
-#define OPMENU0 0x08
-#define OPMENU1 0x0c
-
-#define OPTYPE_READ_NO_ADDR 0
-#define OPTYPE_WRITE_NO_ADDR 1
-#define OPTYPE_READ_WITH_ADDR 2
-#define OPTYPE_WRITE_WITH_ADDR 3
-
-/* CPU specifics */
-#define BYT_PR 0x74
-#define BYT_SSFSTS_CTL 0x90
-#define BYT_BCR 0xfc
-#define BYT_BCR_WPD BIT(0)
-#define BYT_FREG_NUM 5
-#define BYT_PR_NUM 5
-
-#define LPT_PR 0x74
-#define LPT_SSFSTS_CTL 0x90
-#define LPT_FREG_NUM 5
-#define LPT_PR_NUM 5
-
-#define BXT_PR 0x84
-#define BXT_SSFSTS_CTL 0xa0
-#define BXT_FREG_NUM 12
-#define BXT_PR_NUM 6
-
-#define CNL_PR 0x84
-#define CNL_FREG_NUM 6
-#define CNL_PR_NUM 5
-
-#define LVSCC 0xc4
-#define UVSCC 0xc8
-#define ERASE_OPCODE_SHIFT 8
-#define ERASE_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT)
-#define ERASE_64K_OPCODE_SHIFT 16
-#define ERASE_64K_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT)
-
-#define INTEL_SPI_TIMEOUT 5000 /* ms */
-#define INTEL_SPI_FIFO_SZ 64
-
-/**
- * struct intel_spi - Driver private data
- * @dev: Device pointer
- * @info: Pointer to board specific info
- * @nor: SPI NOR layer structure
- * @base: Beginning of MMIO space
- * @pregs: Start of protection registers
- * @sregs: Start of software sequencer registers
- * @nregions: Maximum number of regions
- * @pr_num: Maximum number of protected range registers
- * @writeable: Is the chip writeable
- * @locked: Is SPI setting locked
- * @swseq_reg: Use SW sequencer in register reads/writes
- * @swseq_erase: Use SW sequencer in erase operation
- * @erase_64k: 64k erase supported
- * @atomic_preopcode: Holds preopcode when atomic sequence is requested
- * @opcodes: Opcodes which are supported. This are programmed by BIOS
- * before it locks down the controller.
- */
-struct intel_spi {
- struct device *dev;
- const struct intel_spi_boardinfo *info;
- struct spi_nor nor;
- void __iomem *base;
- void __iomem *pregs;
- void __iomem *sregs;
- size_t nregions;
- size_t pr_num;
- bool writeable;
- bool locked;
- bool swseq_reg;
- bool swseq_erase;
- bool erase_64k;
- u8 atomic_preopcode;
- u8 opcodes[8];
-};
-
-static bool writeable;
-module_param(writeable, bool, 0);
-MODULE_PARM_DESC(writeable, "Enable write access to SPI flash chip (default=0)");
-
-static void intel_spi_dump_regs(struct intel_spi *ispi)
-{
- u32 value;
- int i;
-
- dev_dbg(ispi->dev, "BFPREG=0x%08x\n", readl(ispi->base + BFPREG));
-
- value = readl(ispi->base + HSFSTS_CTL);
- dev_dbg(ispi->dev, "HSFSTS_CTL=0x%08x\n", value);
- if (value & HSFSTS_CTL_FLOCKDN)
- dev_dbg(ispi->dev, "-> Locked\n");
-
- dev_dbg(ispi->dev, "FADDR=0x%08x\n", readl(ispi->base + FADDR));
- dev_dbg(ispi->dev, "DLOCK=0x%08x\n", readl(ispi->base + DLOCK));
-
- for (i = 0; i < 16; i++)
- dev_dbg(ispi->dev, "FDATA(%d)=0x%08x\n",
- i, readl(ispi->base + FDATA(i)));
-
- dev_dbg(ispi->dev, "FRACC=0x%08x\n", readl(ispi->base + FRACC));
-
- for (i = 0; i < ispi->nregions; i++)
- dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i,
- readl(ispi->base + FREG(i)));
- for (i = 0; i < ispi->pr_num; i++)
- dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i,
- readl(ispi->pregs + PR(i)));
-
- if (ispi->sregs) {
- value = readl(ispi->sregs + SSFSTS_CTL);
- dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value);
- dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n",
- readl(ispi->sregs + PREOP_OPTYPE));
- dev_dbg(ispi->dev, "OPMENU0=0x%08x\n",
- readl(ispi->sregs + OPMENU0));
- dev_dbg(ispi->dev, "OPMENU1=0x%08x\n",
- readl(ispi->sregs + OPMENU1));
- }
-
- if (ispi->info->type == INTEL_SPI_BYT)
- dev_dbg(ispi->dev, "BCR=0x%08x\n", readl(ispi->base + BYT_BCR));
-
- dev_dbg(ispi->dev, "LVSCC=0x%08x\n", readl(ispi->base + LVSCC));
- dev_dbg(ispi->dev, "UVSCC=0x%08x\n", readl(ispi->base + UVSCC));
-
- dev_dbg(ispi->dev, "Protected regions:\n");
- for (i = 0; i < ispi->pr_num; i++) {
- u32 base, limit;
-
- value = readl(ispi->pregs + PR(i));
- if (!(value & (PR_WPE | PR_RPE)))
- continue;
-
- limit = (value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT;
- base = value & PR_BASE_MASK;
-
- dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x [%c%c]\n",
- i, base << 12, (limit << 12) | 0xfff,
- value & PR_WPE ? 'W' : '.',
- value & PR_RPE ? 'R' : '.');
- }
-
- dev_dbg(ispi->dev, "Flash regions:\n");
- for (i = 0; i < ispi->nregions; i++) {
- u32 region, base, limit;
-
- region = readl(ispi->base + FREG(i));
- base = region & FREG_BASE_MASK;
- limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT;
-
- if (base >= limit || (i > 0 && limit == 0))
- dev_dbg(ispi->dev, " %02d disabled\n", i);
- else
- dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x\n",
- i, base << 12, (limit << 12) | 0xfff);
- }
-
- dev_dbg(ispi->dev, "Using %cW sequencer for register access\n",
- ispi->swseq_reg ? 'S' : 'H');
- dev_dbg(ispi->dev, "Using %cW sequencer for erase operation\n",
- ispi->swseq_erase ? 'S' : 'H');
-}
-
-/* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */
-static int intel_spi_read_block(struct intel_spi *ispi, void *buf, size_t size)
-{
- size_t bytes;
- int i = 0;
-
- if (size > INTEL_SPI_FIFO_SZ)
- return -EINVAL;
-
- while (size > 0) {
- bytes = min_t(size_t, size, 4);
- memcpy_fromio(buf, ispi->base + FDATA(i), bytes);
- size -= bytes;
- buf += bytes;
- i++;
- }
-
- return 0;
-}
-
-/* Writes max INTEL_SPI_FIFO_SZ bytes to the device fifo */
-static int intel_spi_write_block(struct intel_spi *ispi, const void *buf,
- size_t size)
-{
- size_t bytes;
- int i = 0;
-
- if (size > INTEL_SPI_FIFO_SZ)
- return -EINVAL;
-
- while (size > 0) {
- bytes = min_t(size_t, size, 4);
- memcpy_toio(ispi->base + FDATA(i), buf, bytes);
- size -= bytes;
- buf += bytes;
- i++;
- }
-
- return 0;
-}
-
-static int intel_spi_wait_hw_busy(struct intel_spi *ispi)
-{
- u32 val;
-
- return readl_poll_timeout(ispi->base + HSFSTS_CTL, val,
- !(val & HSFSTS_CTL_SCIP), 0,
- INTEL_SPI_TIMEOUT * 1000);
-}
-
-static int intel_spi_wait_sw_busy(struct intel_spi *ispi)
-{
- u32 val;
-
- return readl_poll_timeout(ispi->sregs + SSFSTS_CTL, val,
- !(val & SSFSTS_CTL_SCIP), 0,
- INTEL_SPI_TIMEOUT * 1000);
-}
-
-static int intel_spi_init(struct intel_spi *ispi)
-{
- u32 opmenu0, opmenu1, lvscc, uvscc, val;
- int i;
-
- switch (ispi->info->type) {
- case INTEL_SPI_BYT:
- ispi->sregs = ispi->base + BYT_SSFSTS_CTL;
- ispi->pregs = ispi->base + BYT_PR;
- ispi->nregions = BYT_FREG_NUM;
- ispi->pr_num = BYT_PR_NUM;
- ispi->swseq_reg = true;
-
- if (writeable) {
- /* Disable write protection */
- val = readl(ispi->base + BYT_BCR);
- if (!(val & BYT_BCR_WPD)) {
- val |= BYT_BCR_WPD;
- writel(val, ispi->base + BYT_BCR);
- val = readl(ispi->base + BYT_BCR);
- }
-
- ispi->writeable = !!(val & BYT_BCR_WPD);
- }
-
- break;
-
- case INTEL_SPI_LPT:
- ispi->sregs = ispi->base + LPT_SSFSTS_CTL;
- ispi->pregs = ispi->base + LPT_PR;
- ispi->nregions = LPT_FREG_NUM;
- ispi->pr_num = LPT_PR_NUM;
- ispi->swseq_reg = true;
- break;
-
- case INTEL_SPI_BXT:
- ispi->sregs = ispi->base + BXT_SSFSTS_CTL;
- ispi->pregs = ispi->base + BXT_PR;
- ispi->nregions = BXT_FREG_NUM;
- ispi->pr_num = BXT_PR_NUM;
- ispi->erase_64k = true;
- break;
-
- case INTEL_SPI_CNL:
- ispi->sregs = NULL;
- ispi->pregs = ispi->base + CNL_PR;
- ispi->nregions = CNL_FREG_NUM;
- ispi->pr_num = CNL_PR_NUM;
- break;
-
- default:
- return -EINVAL;
- }
-
- /* Disable #SMI generation from HW sequencer */
- val = readl(ispi->base + HSFSTS_CTL);
- val &= ~HSFSTS_CTL_FSMIE;
- writel(val, ispi->base + HSFSTS_CTL);
-
- /*
- * Determine whether erase operation should use HW or SW sequencer.
- *
- * The HW sequencer has a predefined list of opcodes, with only the
- * erase opcode being programmable in LVSCC and UVSCC registers.
- * If these registers don't contain a valid erase opcode, erase
- * cannot be done using HW sequencer.
- */
- lvscc = readl(ispi->base + LVSCC);
- uvscc = readl(ispi->base + UVSCC);
- if (!(lvscc & ERASE_OPCODE_MASK) || !(uvscc & ERASE_OPCODE_MASK))
- ispi->swseq_erase = true;
- /* SPI controller on Intel BXT supports 64K erase opcode */
- if (ispi->info->type == INTEL_SPI_BXT && !ispi->swseq_erase)
- if (!(lvscc & ERASE_64K_OPCODE_MASK) ||
- !(uvscc & ERASE_64K_OPCODE_MASK))
- ispi->erase_64k = false;
-
- if (ispi->sregs == NULL && (ispi->swseq_reg || ispi->swseq_erase)) {
- dev_err(ispi->dev, "software sequencer not supported, but required\n");
- return -EINVAL;
- }
-
- /*
- * Some controllers can only do basic operations using hardware
- * sequencer. All other operations are supposed to be carried out
- * using software sequencer.
- */
- if (ispi->swseq_reg) {
- /* Disable #SMI generation from SW sequencer */
- val = readl(ispi->sregs + SSFSTS_CTL);
- val &= ~SSFSTS_CTL_FSMIE;
- writel(val, ispi->sregs + SSFSTS_CTL);
- }
-
- /* Check controller's lock status */
- val = readl(ispi->base + HSFSTS_CTL);
- ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN);
-
- if (ispi->locked && ispi->sregs) {
- /*
- * BIOS programs allowed opcodes and then locks down the
- * register. So read back what opcodes it decided to support.
- * That's the set we are going to support as well.
- */
- opmenu0 = readl(ispi->sregs + OPMENU0);
- opmenu1 = readl(ispi->sregs + OPMENU1);
-
- if (opmenu0 && opmenu1) {
- for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) {
- ispi->opcodes[i] = opmenu0 >> i * 8;
- ispi->opcodes[i + 4] = opmenu1 >> i * 8;
- }
- }
- }
-
- intel_spi_dump_regs(ispi);
-
- return 0;
-}
-
-static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype)
-{
- int i;
- int preop;
-
- if (ispi->locked) {
- for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++)
- if (ispi->opcodes[i] == opcode)
- return i;
-
- return -EINVAL;
- }
-
- /* The lock is off, so just use index 0 */
- writel(opcode, ispi->sregs + OPMENU0);
- preop = readw(ispi->sregs + PREOP_OPTYPE);
- writel(optype << 16 | preop, ispi->sregs + PREOP_OPTYPE);
-
- return 0;
-}
-
-static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, size_t len)
-{
- u32 val, status;
- int ret;
-
- val = readl(ispi->base + HSFSTS_CTL);
- val &= ~(HSFSTS_CTL_FCYCLE_MASK | HSFSTS_CTL_FDBC_MASK);
-
- switch (opcode) {
- case SPINOR_OP_RDID:
- val |= HSFSTS_CTL_FCYCLE_RDID;
- break;
- case SPINOR_OP_WRSR:
- val |= HSFSTS_CTL_FCYCLE_WRSR;
- break;
- case SPINOR_OP_RDSR:
- val |= HSFSTS_CTL_FCYCLE_RDSR;
- break;
- default:
- return -EINVAL;
- }
-
- if (len > INTEL_SPI_FIFO_SZ)
- return -EINVAL;
-
- val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT;
- val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
- val |= HSFSTS_CTL_FGO;
- writel(val, ispi->base + HSFSTS_CTL);
-
- ret = intel_spi_wait_hw_busy(ispi);
- if (ret)
- return ret;
-
- status = readl(ispi->base + HSFSTS_CTL);
- if (status & HSFSTS_CTL_FCERR)
- return -EIO;
- else if (status & HSFSTS_CTL_AEL)
- return -EACCES;
-
- return 0;
-}
-
-static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, size_t len,
- int optype)
-{
- u32 val = 0, status;
- u8 atomic_preopcode;
- int ret;
-
- ret = intel_spi_opcode_index(ispi, opcode, optype);
- if (ret < 0)
- return ret;
-
- if (len > INTEL_SPI_FIFO_SZ)
- return -EINVAL;
-
- /*
- * Always clear it after each SW sequencer operation regardless
- * of whether it is successful or not.
- */
- atomic_preopcode = ispi->atomic_preopcode;
- ispi->atomic_preopcode = 0;
-
- /* Only mark 'Data Cycle' bit when there is data to be transferred */
- if (len > 0)
- val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
- val |= ret << SSFSTS_CTL_COP_SHIFT;
- val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE;
- val |= SSFSTS_CTL_SCGO;
- if (atomic_preopcode) {
- u16 preop;
-
- switch (optype) {
- case OPTYPE_WRITE_NO_ADDR:
- case OPTYPE_WRITE_WITH_ADDR:
- /* Pick matching preopcode for the atomic sequence */
- preop = readw(ispi->sregs + PREOP_OPTYPE);
- if ((preop & 0xff) == atomic_preopcode)
- ; /* Do nothing */
- else if ((preop >> 8) == atomic_preopcode)
- val |= SSFSTS_CTL_SPOP;
- else
- return -EINVAL;
-
- /* Enable atomic sequence */
- val |= SSFSTS_CTL_ACS;
- break;
-
- default:
- return -EINVAL;
- }
-
- }
- writel(val, ispi->sregs + SSFSTS_CTL);
-
- ret = intel_spi_wait_sw_busy(ispi);
- if (ret)
- return ret;
-
- status = readl(ispi->sregs + SSFSTS_CTL);
- if (status & SSFSTS_CTL_FCERR)
- return -EIO;
- else if (status & SSFSTS_CTL_AEL)
- return -EACCES;
-
- return 0;
-}
-
-static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
- size_t len)
-{
- struct intel_spi *ispi = nor->priv;
- int ret;
-
- /* Address of the first chip */
- writel(0, ispi->base + FADDR);
-
- if (ispi->swseq_reg)
- ret = intel_spi_sw_cycle(ispi, opcode, len,
- OPTYPE_READ_NO_ADDR);
- else
- ret = intel_spi_hw_cycle(ispi, opcode, len);
-
- if (ret)
- return ret;
-
- return intel_spi_read_block(ispi, buf, len);
-}
-
-static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf,
- size_t len)
-{
- struct intel_spi *ispi = nor->priv;
- int ret;
-
- /*
- * This is handled with atomic operation and preop code in Intel
- * controller so we only verify that it is available. If the
- * controller is not locked, program the opcode to the PREOP
- * register for later use.
- *
- * When hardware sequencer is used there is no need to program
- * any opcodes (it handles them automatically as part of a command).
- */
- if (opcode == SPINOR_OP_WREN) {
- u16 preop;
-
- if (!ispi->swseq_reg)
- return 0;
-
- preop = readw(ispi->sregs + PREOP_OPTYPE);
- if ((preop & 0xff) != opcode && (preop >> 8) != opcode) {
- if (ispi->locked)
- return -EINVAL;
- writel(opcode, ispi->sregs + PREOP_OPTYPE);
- }
-
- /*
- * This enables atomic sequence on next SW sycle. Will
- * be cleared after next operation.
- */
- ispi->atomic_preopcode = opcode;
- return 0;
- }
-
- /*
- * We hope that HW sequencer will do the right thing automatically and
- * with the SW sequencer we cannot use preopcode anyway, so just ignore
- * the Write Disable operation and pretend it was completed
- * successfully.
- */
- if (opcode == SPINOR_OP_WRDI)
- return 0;
-
- writel(0, ispi->base + FADDR);
-
- /* Write the value beforehand */
- ret = intel_spi_write_block(ispi, buf, len);
- if (ret)
- return ret;
-
- if (ispi->swseq_reg)
- return intel_spi_sw_cycle(ispi, opcode, len,
- OPTYPE_WRITE_NO_ADDR);
- return intel_spi_hw_cycle(ispi, opcode, len);
-}
-
-static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
- u_char *read_buf)
-{
- struct intel_spi *ispi = nor->priv;
- size_t block_size, retlen = 0;
- u32 val, status;
- ssize_t ret;
-
- /*
- * Atomic sequence is not expected with HW sequencer reads. Make
- * sure it is cleared regardless.
- */
- if (WARN_ON_ONCE(ispi->atomic_preopcode))
- ispi->atomic_preopcode = 0;
-
- switch (nor->read_opcode) {
- case SPINOR_OP_READ:
- case SPINOR_OP_READ_FAST:
- case SPINOR_OP_READ_4B:
- case SPINOR_OP_READ_FAST_4B:
- break;
- default:
- return -EINVAL;
- }
-
- while (len > 0) {
- block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
-
- /* Read cannot cross 4K boundary */
- block_size = min_t(loff_t, from + block_size,
- round_up(from + 1, SZ_4K)) - from;
-
- writel(from, ispi->base + FADDR);
-
- val = readl(ispi->base + HSFSTS_CTL);
- val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
- val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
- val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
- val |= HSFSTS_CTL_FCYCLE_READ;
- val |= HSFSTS_CTL_FGO;
- writel(val, ispi->base + HSFSTS_CTL);
-
- ret = intel_spi_wait_hw_busy(ispi);
- if (ret)
- return ret;
-
- status = readl(ispi->base + HSFSTS_CTL);
- if (status & HSFSTS_CTL_FCERR)
- ret = -EIO;
- else if (status & HSFSTS_CTL_AEL)
- ret = -EACCES;
-
- if (ret < 0) {
- dev_err(ispi->dev, "read error: %llx: %#x\n", from,
- status);
- return ret;
- }
-
- ret = intel_spi_read_block(ispi, read_buf, block_size);
- if (ret)
- return ret;
-
- len -= block_size;
- from += block_size;
- retlen += block_size;
- read_buf += block_size;
- }
-
- return retlen;
-}
-
-static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
- const u_char *write_buf)
-{
- struct intel_spi *ispi = nor->priv;
- size_t block_size, retlen = 0;
- u32 val, status;
- ssize_t ret;
-
- /* Not needed with HW sequencer write, make sure it is cleared */
- ispi->atomic_preopcode = 0;
-
- while (len > 0) {
- block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
-
- /* Write cannot cross 4K boundary */
- block_size = min_t(loff_t, to + block_size,
- round_up(to + 1, SZ_4K)) - to;
-
- writel(to, ispi->base + FADDR);
-
- val = readl(ispi->base + HSFSTS_CTL);
- val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
- val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
- val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
- val |= HSFSTS_CTL_FCYCLE_WRITE;
-
- ret = intel_spi_write_block(ispi, write_buf, block_size);
- if (ret) {
- dev_err(ispi->dev, "failed to write block\n");
- return ret;
- }
-
- /* Start the write now */
- val |= HSFSTS_CTL_FGO;
- writel(val, ispi->base + HSFSTS_CTL);
-
- ret = intel_spi_wait_hw_busy(ispi);
- if (ret) {
- dev_err(ispi->dev, "timeout\n");
- return ret;
- }
-
- status = readl(ispi->base + HSFSTS_CTL);
- if (status & HSFSTS_CTL_FCERR)
- ret = -EIO;
- else if (status & HSFSTS_CTL_AEL)
- ret = -EACCES;
-
- if (ret < 0) {
- dev_err(ispi->dev, "write error: %llx: %#x\n", to,
- status);
- return ret;
- }
-
- len -= block_size;
- to += block_size;
- retlen += block_size;
- write_buf += block_size;
- }
-
- return retlen;
-}
-
-static int intel_spi_erase(struct spi_nor *nor, loff_t offs)
-{
- size_t erase_size, len = nor->mtd.erasesize;
- struct intel_spi *ispi = nor->priv;
- u32 val, status, cmd;
- int ret;
-
- /* If the hardware can do 64k erase use that when possible */
- if (len >= SZ_64K && ispi->erase_64k) {
- cmd = HSFSTS_CTL_FCYCLE_ERASE_64K;
- erase_size = SZ_64K;
- } else {
- cmd = HSFSTS_CTL_FCYCLE_ERASE;
- erase_size = SZ_4K;
- }
-
- if (ispi->swseq_erase) {
- while (len > 0) {
- writel(offs, ispi->base + FADDR);
-
- ret = intel_spi_sw_cycle(ispi, nor->erase_opcode,
- 0, OPTYPE_WRITE_WITH_ADDR);
- if (ret)
- return ret;
-
- offs += erase_size;
- len -= erase_size;
- }
-
- return 0;
- }
-
- /* Not needed with HW sequencer erase, make sure it is cleared */
- ispi->atomic_preopcode = 0;
-
- while (len > 0) {
- writel(offs, ispi->base + FADDR);
-
- val = readl(ispi->base + HSFSTS_CTL);
- val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
- val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
- val |= cmd;
- val |= HSFSTS_CTL_FGO;
- writel(val, ispi->base + HSFSTS_CTL);
-
- ret = intel_spi_wait_hw_busy(ispi);
- if (ret)
- return ret;
-
- status = readl(ispi->base + HSFSTS_CTL);
- if (status & HSFSTS_CTL_FCERR)
- return -EIO;
- else if (status & HSFSTS_CTL_AEL)
- return -EACCES;
-
- offs += erase_size;
- len -= erase_size;
- }
-
- return 0;
-}
-
-static bool intel_spi_is_protected(const struct intel_spi *ispi,
- unsigned int base, unsigned int limit)
-{
- int i;
-
- for (i = 0; i < ispi->pr_num; i++) {
- u32 pr_base, pr_limit, pr_value;
-
- pr_value = readl(ispi->pregs + PR(i));
- if (!(pr_value & (PR_WPE | PR_RPE)))
- continue;
-
- pr_limit = (pr_value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT;
- pr_base = pr_value & PR_BASE_MASK;
-
- if (pr_base >= base && pr_limit <= limit)
- return true;
- }
-
- return false;
-}
-
-/*
- * There will be a single partition holding all enabled flash regions. We
- * call this "BIOS".
- */
-static void intel_spi_fill_partition(struct intel_spi *ispi,
- struct mtd_partition *part)
-{
- u64 end;
- int i;
-
- memset(part, 0, sizeof(*part));
-
- /* Start from the mandatory descriptor region */
- part->size = 4096;
- part->name = "BIOS";
-
- /*
- * Now try to find where this partition ends based on the flash
- * region registers.
- */
- for (i = 1; i < ispi->nregions; i++) {
- u32 region, base, limit;
-
- region = readl(ispi->base + FREG(i));
- base = region & FREG_BASE_MASK;
- limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT;
-
- if (base >= limit || limit == 0)
- continue;
-
- /*
- * If any of the regions have protection bits set, make the
- * whole partition read-only to be on the safe side.
- */
- if (intel_spi_is_protected(ispi, base, limit))
- ispi->writeable = false;
-
- end = (limit << 12) + 4096;
- if (end > part->size)
- part->size = end;
- }
-}
-
-static const struct spi_nor_controller_ops intel_spi_controller_ops = {
- .read_reg = intel_spi_read_reg,
- .write_reg = intel_spi_write_reg,
- .read = intel_spi_read,
- .write = intel_spi_write,
- .erase = intel_spi_erase,
-};
-
-struct intel_spi *intel_spi_probe(struct device *dev,
- struct resource *mem, const struct intel_spi_boardinfo *info)
-{
- const struct spi_nor_hwcaps hwcaps = {
- .mask = SNOR_HWCAPS_READ |
- SNOR_HWCAPS_READ_FAST |
- SNOR_HWCAPS_PP,
- };
- struct mtd_partition part;
- struct intel_spi *ispi;
- int ret;
-
- if (!info || !mem)
- return ERR_PTR(-EINVAL);
-
- ispi = devm_kzalloc(dev, sizeof(*ispi), GFP_KERNEL);
- if (!ispi)
- return ERR_PTR(-ENOMEM);
-
- ispi->base = devm_ioremap_resource(dev, mem);
- if (IS_ERR(ispi->base))
- return ERR_CAST(ispi->base);
-
- ispi->dev = dev;
- ispi->info = info;
- ispi->writeable = info->writeable;
-
- ret = intel_spi_init(ispi);
- if (ret)
- return ERR_PTR(ret);
-
- ispi->nor.dev = ispi->dev;
- ispi->nor.priv = ispi;
- ispi->nor.controller_ops = &intel_spi_controller_ops;
-
- ret = spi_nor_scan(&ispi->nor, NULL, &hwcaps);
- if (ret) {
- dev_info(dev, "failed to locate the chip\n");
- return ERR_PTR(ret);
- }
-
- intel_spi_fill_partition(ispi, &part);
-
- /* Prevent writes if not explicitly enabled */
- if (!ispi->writeable || !writeable)
- ispi->nor.mtd.flags &= ~MTD_WRITEABLE;
-
- ret = mtd_device_register(&ispi->nor.mtd, &part, 1);
- if (ret)
- return ERR_PTR(ret);
-
- return ispi;
-}
-EXPORT_SYMBOL_GPL(intel_spi_probe);
-
-int intel_spi_remove(struct intel_spi *ispi)
-{
- return mtd_device_unregister(&ispi->nor.mtd);
-}
-EXPORT_SYMBOL_GPL(intel_spi_remove);
-
-MODULE_DESCRIPTION("Intel PCH/PCU SPI flash core driver");
-MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/spi-nor/controllers/intel-spi.h b/drivers/mtd/spi-nor/controllers/intel-spi.h
deleted file mode 100644
index f2871179fd34..000000000000
--- a/drivers/mtd/spi-nor/controllers/intel-spi.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel PCH/PCU SPI flash driver.
- *
- * Copyright (C) 2016, Intel Corporation
- * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- */
-
-#ifndef INTEL_SPI_H
-#define INTEL_SPI_H
-
-#include <linux/platform_data/x86/intel-spi.h>
-
-struct intel_spi;
-struct resource;
-
-struct intel_spi *intel_spi_probe(struct device *dev,
- struct resource *mem, const struct intel_spi_boardinfo *info);
-int intel_spi_remove(struct intel_spi *ispi);
-
-#endif /* INTEL_SPI_H */
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index cc08bd707378..b4f141ad9c9c 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -157,8 +157,8 @@ static int spi_nor_spimem_exec_op(struct spi_nor *nor, struct spi_mem_op *op)
return spi_mem_exec_op(nor->spimem, op);
}
-static int spi_nor_controller_ops_read_reg(struct spi_nor *nor, u8 opcode,
- u8 *buf, size_t len)
+int spi_nor_controller_ops_read_reg(struct spi_nor *nor, u8 opcode,
+ u8 *buf, size_t len)
{
if (spi_nor_protocol_is_dtr(nor->reg_proto))
return -EOPNOTSUPP;
@@ -166,8 +166,8 @@ static int spi_nor_controller_ops_read_reg(struct spi_nor *nor, u8 opcode,
return nor->controller_ops->read_reg(nor, opcode, buf, len);
}
-static int spi_nor_controller_ops_write_reg(struct spi_nor *nor, u8 opcode,
- const u8 *buf, size_t len)
+int spi_nor_controller_ops_write_reg(struct spi_nor *nor, u8 opcode,
+ const u8 *buf, size_t len)
{
if (spi_nor_protocol_is_dtr(nor->reg_proto))
return -EOPNOTSUPP;
@@ -413,50 +413,6 @@ int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
}
/**
- * spi_nor_read_fsr() - Read the Flag Status Register.
- * @nor: pointer to 'struct spi_nor'
- * @fsr: pointer to a DMA-able buffer where the value of the
- * Flag Status Register will be written. Should be at least 2
- * bytes.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_read_fsr(struct spi_nor *nor, u8 *fsr)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 0),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, fsr, 0));
-
- if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) {
- op.addr.nbytes = nor->params->rdsr_addr_nbytes;
- op.dummy.nbytes = nor->params->rdsr_dummy;
- /*
- * We don't want to read only one byte in DTR mode. So,
- * read 2 and then discard the second byte.
- */
- op.data.nbytes = 2;
- }
-
- spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDFSR, fsr,
- 1);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d reading FSR\n", ret);
-
- return ret;
-}
-
-/**
* spi_nor_read_cr() - Read the Configuration Register using the
* SPINOR_OP_RDCR (35h) command.
* @nor: pointer to 'struct spi_nor'
@@ -599,189 +555,21 @@ int spi_nor_write_ear(struct spi_nor *nor, u8 ear)
}
/**
- * spi_nor_xread_sr() - Read the Status Register on S3AN flashes.
- * @nor: pointer to 'struct spi_nor'.
- * @sr: pointer to a DMA-able buffer where the value of the
- * Status Register will be written.
- *
- * Return: 0 on success, -errno otherwise.
- */
-int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 0),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, sr, 0));
-
- spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_XRDSR, sr,
- 1);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d reading XRDSR\n", ret);
-
- return ret;
-}
-
-/**
- * spi_nor_xsr_ready() - Query the Status Register of the S3AN flash to see if
- * the flash is ready for new commands.
- * @nor: pointer to 'struct spi_nor'.
- *
- * Return: 1 if ready, 0 if not ready, -errno on errors.
- */
-static int spi_nor_xsr_ready(struct spi_nor *nor)
-{
- int ret;
-
- ret = spi_nor_xread_sr(nor, nor->bouncebuf);
- if (ret)
- return ret;
-
- return !!(nor->bouncebuf[0] & XSR_RDY);
-}
-
-/**
- * spi_nor_clear_sr() - Clear the Status Register.
- * @nor: pointer to 'struct spi_nor'.
- */
-static void spi_nor_clear_sr(struct spi_nor *nor)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 0),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_NO_DATA);
-
- spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CLSR,
- NULL, 0);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d clearing SR\n", ret);
-}
-
-/**
* spi_nor_sr_ready() - Query the Status Register to see if the flash is ready
* for new commands.
* @nor: pointer to 'struct spi_nor'.
*
* Return: 1 if ready, 0 if not ready, -errno on errors.
*/
-static int spi_nor_sr_ready(struct spi_nor *nor)
-{
- int ret = spi_nor_read_sr(nor, nor->bouncebuf);
-
- if (ret)
- return ret;
-
- if (nor->flags & SNOR_F_USE_CLSR &&
- nor->bouncebuf[0] & (SR_E_ERR | SR_P_ERR)) {
- if (nor->bouncebuf[0] & SR_E_ERR)
- dev_err(nor->dev, "Erase Error occurred\n");
- else
- dev_err(nor->dev, "Programming Error occurred\n");
-
- spi_nor_clear_sr(nor);
-
- /*
- * WEL bit remains set to one when an erase or page program
- * error occurs. Issue a Write Disable command to protect
- * against inadvertent writes that can possibly corrupt the
- * contents of the memory.
- */
- ret = spi_nor_write_disable(nor);
- if (ret)
- return ret;
-
- return -EIO;
- }
-
- return !(nor->bouncebuf[0] & SR_WIP);
-}
-
-/**
- * spi_nor_clear_fsr() - Clear the Flag Status Register.
- * @nor: pointer to 'struct spi_nor'.
- */
-static void spi_nor_clear_fsr(struct spi_nor *nor)
+int spi_nor_sr_ready(struct spi_nor *nor)
{
int ret;
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 0),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_NO_DATA);
-
- spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CLFSR,
- NULL, 0);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d clearing FSR\n", ret);
-}
-
-/**
- * spi_nor_fsr_ready() - Query the Flag Status Register to see if the flash is
- * ready for new commands.
- * @nor: pointer to 'struct spi_nor'.
- *
- * Return: 1 if ready, 0 if not ready, -errno on errors.
- */
-static int spi_nor_fsr_ready(struct spi_nor *nor)
-{
- int ret = spi_nor_read_fsr(nor, nor->bouncebuf);
-
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
if (ret)
return ret;
- if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) {
- if (nor->bouncebuf[0] & FSR_E_ERR)
- dev_err(nor->dev, "Erase operation failed.\n");
- else
- dev_err(nor->dev, "Program operation failed.\n");
-
- if (nor->bouncebuf[0] & FSR_PT_ERR)
- dev_err(nor->dev,
- "Attempted to modify a protected sector.\n");
-
- spi_nor_clear_fsr(nor);
-
- /*
- * WEL bit remains set to one when an erase or page program
- * error occurs. Issue a Write Disable command to protect
- * against inadvertent writes that can possibly corrupt the
- * contents of the memory.
- */
- ret = spi_nor_write_disable(nor);
- if (ret)
- return ret;
-
- return -EIO;
- }
-
- return !!(nor->bouncebuf[0] & FSR_READY);
+ return !(nor->bouncebuf[0] & SR_WIP);
}
/**
@@ -792,18 +580,11 @@ static int spi_nor_fsr_ready(struct spi_nor *nor)
*/
static int spi_nor_ready(struct spi_nor *nor)
{
- int sr, fsr;
+ /* Flashes might override the standard routine. */
+ if (nor->params->ready)
+ return nor->params->ready(nor);
- if (nor->flags & SNOR_F_READY_XSR_RDY)
- sr = spi_nor_xsr_ready(nor);
- else
- sr = spi_nor_sr_ready(nor);
- if (sr < 0)
- return sr;
- fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
- if (fsr < 0)
- return fsr;
- return sr && fsr;
+ return spi_nor_sr_ready(nor);
}
/**
@@ -1952,6 +1733,7 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
struct spi_nor *nor = mtd_to_spi_nor(mtd);
size_t page_offset, page_remain, i;
ssize_t ret;
+ u32 page_size = nor->params->page_size;
dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
@@ -1968,16 +1750,15 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
* calculated with an AND operation. On the other cases we
* need to do a modulus operation (more expensive).
*/
- if (is_power_of_2(nor->page_size)) {
- page_offset = addr & (nor->page_size - 1);
+ if (is_power_of_2(page_size)) {
+ page_offset = addr & (page_size - 1);
} else {
uint64_t aux = addr;
- page_offset = do_div(aux, nor->page_size);
+ page_offset = do_div(aux, page_size);
}
/* the size of data remaining on the first page */
- page_remain = min_t(size_t,
- nor->page_size - page_offset, len - i);
+ page_remain = min_t(size_t, page_size - page_offset, len - i);
addr = spi_nor_convert_addr(nor, addr);
@@ -2115,7 +1896,7 @@ static int spi_nor_spimem_check_op(struct spi_nor *nor,
*/
op->addr.nbytes = 4;
if (!spi_mem_supports_op(nor->spimem, op)) {
- if (nor->mtd.size > SZ_16M)
+ if (nor->params->size > SZ_16M)
return -EOPNOTSUPP;
/* If flash size <= 16MB, 3 address bytes are sufficient */
@@ -2141,7 +1922,7 @@ static int spi_nor_spimem_check_readop(struct spi_nor *nor,
struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(read->opcode, 0),
SPI_MEM_OP_ADDR(3, 0, 0),
SPI_MEM_OP_DUMMY(1, 0),
- SPI_MEM_OP_DATA_IN(1, NULL, 0));
+ SPI_MEM_OP_DATA_IN(2, NULL, 0));
spi_nor_spimem_setup_op(nor, &op, read->proto);
@@ -2167,7 +1948,7 @@ static int spi_nor_spimem_check_pp(struct spi_nor *nor,
struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 0),
SPI_MEM_OP_ADDR(3, 0, 0),
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(1, NULL, 0));
+ SPI_MEM_OP_DATA_OUT(2, NULL, 0));
spi_nor_spimem_setup_op(nor, &op, pp->proto);
@@ -2484,13 +2265,62 @@ static int spi_nor_default_setup(struct spi_nor *nor,
return 0;
}
+static int spi_nor_set_addr_width(struct spi_nor *nor)
+{
+ if (nor->addr_width) {
+ /* already configured from SFDP */
+ } else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) {
+ /*
+ * In 8D-8D-8D mode, one byte takes half a cycle to transfer. So
+ * in this protocol an odd address width cannot be used because
+ * then the address phase would only span a cycle and a half.
+ * Half a cycle would be left over. We would then have to start
+ * the dummy phase in the middle of a cycle and so too the data
+ * phase, and we will end the transaction with half a cycle left
+ * over.
+ *
+ * Force all 8D-8D-8D flashes to use an address width of 4 to
+ * avoid this situation.
+ */
+ nor->addr_width = 4;
+ } else if (nor->info->addr_width) {
+ nor->addr_width = nor->info->addr_width;
+ } else {
+ nor->addr_width = 3;
+ }
+
+ if (nor->addr_width == 3 && nor->params->size > 0x1000000) {
+ /* enable 4-byte addressing if the device exceeds 16MiB */
+ nor->addr_width = 4;
+ }
+
+ if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
+ dev_dbg(nor->dev, "address width is too large: %u\n",
+ nor->addr_width);
+ return -EINVAL;
+ }
+
+ /* Set 4byte opcodes when possible. */
+ if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
+ !(nor->flags & SNOR_F_HAS_4BAIT))
+ spi_nor_set_4byte_opcodes(nor);
+
+ return 0;
+}
+
static int spi_nor_setup(struct spi_nor *nor,
const struct spi_nor_hwcaps *hwcaps)
{
- if (!nor->params->setup)
- return 0;
+ int ret;
- return nor->params->setup(nor, hwcaps);
+ if (nor->params->setup)
+ ret = nor->params->setup(nor, hwcaps);
+ else
+ ret = spi_nor_default_setup(nor, hwcaps);
+ if (ret)
+ return ret;
+
+ return spi_nor_set_addr_width(nor);
}
/**
@@ -2509,107 +2339,50 @@ static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
}
/**
- * spi_nor_sfdp_init_params() - Initialize the flash's parameters and settings
- * based on JESD216 SFDP standard.
+ * spi_nor_no_sfdp_init_params() - Initialize the flash's parameters and
+ * settings based on nor->info->sfdp_flags. This method should be called only by
+ * flashes that do not define SFDP tables. If the flash supports SFDP but the
+ * information is wrong and the settings from this function can not be retrieved
+ * by parsing SFDP, one should instead use the fixup hooks and update the wrong
+ * bits.
* @nor: pointer to a 'struct spi_nor'.
- *
- * The method has a roll-back mechanism: in case the SFDP parsing fails, the
- * legacy flash parameters and settings will be restored.
*/
-static void spi_nor_sfdp_init_params(struct spi_nor *nor)
-{
- struct spi_nor_flash_parameter sfdp_params;
-
- memcpy(&sfdp_params, nor->params, sizeof(sfdp_params));
-
- if (spi_nor_parse_sfdp(nor)) {
- memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
- nor->addr_width = 0;
- nor->flags &= ~SNOR_F_4B_OPCODES;
- }
-}
-
-/**
- * spi_nor_info_init_params() - Initialize the flash's parameters and settings
- * based on nor->info data.
- * @nor: pointer to a 'struct spi_nor'.
- */
-static void spi_nor_info_init_params(struct spi_nor *nor)
+static void spi_nor_no_sfdp_init_params(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
struct spi_nor_erase_map *map = &params->erase_map;
- const struct flash_info *info = nor->info;
- struct device_node *np = spi_nor_get_flash_node(nor);
+ const u8 no_sfdp_flags = nor->info->no_sfdp_flags;
u8 i, erase_mask;
- /* Initialize default flash parameters and settings. */
- params->quad_enable = spi_nor_sr2_bit1_quad_enable;
- params->set_4byte_addr_mode = spansion_set_4byte_addr_mode;
- params->setup = spi_nor_default_setup;
- params->otp.org = &info->otp_org;
-
- /* Default to 16-bit Write Status (01h) Command */
- nor->flags |= SNOR_F_HAS_16BIT_SR;
-
- /* Set SPI NOR sizes. */
- params->writesize = 1;
- params->size = (u64)info->sector_size * info->n_sectors;
- params->page_size = info->page_size;
-
- if (!(info->flags & SPI_NOR_NO_FR)) {
- /* Default to Fast Read for DT and non-DT platform devices. */
- params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
-
- /* Mask out Fast Read if not requested at DT instantiation. */
- if (np && !of_property_read_bool(np, "m25p,fast-read"))
- params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
- }
-
- /* (Fast) Read settings. */
- params->hwcaps.mask |= SNOR_HWCAPS_READ;
- spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ],
- 0, 0, SPINOR_OP_READ,
- SNOR_PROTO_1_1_1);
-
- if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
- spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_FAST],
- 0, 8, SPINOR_OP_READ_FAST,
- SNOR_PROTO_1_1_1);
-
- if (info->flags & SPI_NOR_DUAL_READ) {
+ if (no_sfdp_flags & SPI_NOR_DUAL_READ) {
params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_2],
0, 8, SPINOR_OP_READ_1_1_2,
SNOR_PROTO_1_1_2);
}
- if (info->flags & SPI_NOR_QUAD_READ) {
+ if (no_sfdp_flags & SPI_NOR_QUAD_READ) {
params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_4],
0, 8, SPINOR_OP_READ_1_1_4,
SNOR_PROTO_1_1_4);
}
- if (info->flags & SPI_NOR_OCTAL_READ) {
+ if (no_sfdp_flags & SPI_NOR_OCTAL_READ) {
params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_8],
0, 8, SPINOR_OP_READ_1_1_8,
SNOR_PROTO_1_1_8);
}
- if (info->flags & SPI_NOR_OCTAL_DTR_READ) {
+ if (no_sfdp_flags & SPI_NOR_OCTAL_DTR_READ) {
params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR;
spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_8_8_8_DTR],
0, 20, SPINOR_OP_READ_FAST,
SNOR_PROTO_8_8_8_DTR);
}
- /* Page Program settings. */
- params->hwcaps.mask |= SNOR_HWCAPS_PP;
- spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
- SPINOR_OP_PP, SNOR_PROTO_1_1_1);
-
- if (info->flags & SPI_NOR_OCTAL_DTR_PP) {
+ if (no_sfdp_flags & SPI_NOR_OCTAL_DTR_PP) {
params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8_DTR;
/*
* Since xSPI Page Program opcode is backward compatible with
@@ -2625,52 +2398,97 @@ static void spi_nor_info_init_params(struct spi_nor *nor)
*/
erase_mask = 0;
i = 0;
- if (info->flags & SECT_4K_PMC) {
+ if (no_sfdp_flags & SECT_4K_PMC) {
erase_mask |= BIT(i);
spi_nor_set_erase_type(&map->erase_type[i], 4096u,
SPINOR_OP_BE_4K_PMC);
i++;
- } else if (info->flags & SECT_4K) {
+ } else if (no_sfdp_flags & SECT_4K) {
erase_mask |= BIT(i);
spi_nor_set_erase_type(&map->erase_type[i], 4096u,
SPINOR_OP_BE_4K);
i++;
}
erase_mask |= BIT(i);
- spi_nor_set_erase_type(&map->erase_type[i], info->sector_size,
+ spi_nor_set_erase_type(&map->erase_type[i], nor->info->sector_size,
SPINOR_OP_SE);
spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
}
/**
- * spi_nor_post_sfdp_fixups() - Updates the flash's parameters and settings
- * after SFDP has been parsed (is also called for SPI NORs that do not
- * support RDSFDP).
+ * spi_nor_init_flags() - Initialize NOR flags for settings that are not defined
+ * in the JESD216 SFDP standard, thus can not be retrieved when parsing SFDP.
* @nor: pointer to a 'struct spi_nor'
- *
- * Typically used to tweak various parameters that could not be extracted by
- * other means (i.e. when information provided by the SFDP/flash_info tables
- * are incomplete or wrong).
*/
-static void spi_nor_post_sfdp_fixups(struct spi_nor *nor)
+static void spi_nor_init_flags(struct spi_nor *nor)
{
- if (nor->manufacturer && nor->manufacturer->fixups &&
- nor->manufacturer->fixups->post_sfdp)
- nor->manufacturer->fixups->post_sfdp(nor);
+ struct device_node *np = spi_nor_get_flash_node(nor);
+ const u16 flags = nor->info->flags;
+
+ if (of_property_read_bool(np, "broken-flash-reset"))
+ nor->flags |= SNOR_F_BROKEN_RESET;
+
+ if (flags & SPI_NOR_SWP_IS_VOLATILE)
+ nor->flags |= SNOR_F_SWP_IS_VOLATILE;
+
+ if (flags & SPI_NOR_HAS_LOCK)
+ nor->flags |= SNOR_F_HAS_LOCK;
+
+ if (flags & SPI_NOR_HAS_TB) {
+ nor->flags |= SNOR_F_HAS_SR_TB;
+ if (flags & SPI_NOR_TB_SR_BIT6)
+ nor->flags |= SNOR_F_HAS_SR_TB_BIT6;
+ }
+
+ if (flags & SPI_NOR_4BIT_BP) {
+ nor->flags |= SNOR_F_HAS_4BIT_BP;
+ if (flags & SPI_NOR_BP3_SR_BIT6)
+ nor->flags |= SNOR_F_HAS_SR_BP3_BIT6;
+ }
+
+ if (flags & NO_CHIP_ERASE)
+ nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
+}
+
+/**
+ * spi_nor_init_fixup_flags() - Initialize NOR flags for settings that can not
+ * be discovered by SFDP for this particular flash because the SFDP table that
+ * indicates this support is not defined in the flash. In case the table for
+ * this support is defined but has wrong values, one should instead use a
+ * post_sfdp() hook to set the SNOR_F equivalent flag.
+ * @nor: pointer to a 'struct spi_nor'
+ */
+static void spi_nor_init_fixup_flags(struct spi_nor *nor)
+{
+ const u8 fixup_flags = nor->info->fixup_flags;
- if (nor->info->fixups && nor->info->fixups->post_sfdp)
- nor->info->fixups->post_sfdp(nor);
+ if (fixup_flags & SPI_NOR_4B_OPCODES)
+ nor->flags |= SNOR_F_4B_OPCODES;
+
+ if (fixup_flags & SPI_NOR_IO_MODE_EN_VOLATILE)
+ nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE;
}
/**
* spi_nor_late_init_params() - Late initialization of default flash parameters.
* @nor: pointer to a 'struct spi_nor'
*
- * Used to set default flash parameters and settings when the ->default_init()
- * hook or the SFDP parser let voids.
+ * Used to initialize flash parameters that are not declared in the JESD216
+ * SFDP standard, or where SFDP tables are not defined at all.
+ * Will replace the spi_nor_manufacturer_init_params() method.
*/
static void spi_nor_late_init_params(struct spi_nor *nor)
{
+ if (nor->manufacturer && nor->manufacturer->fixups &&
+ nor->manufacturer->fixups->late_init)
+ nor->manufacturer->fixups->late_init(nor);
+
+ if (nor->info->fixups && nor->info->fixups->late_init)
+ nor->info->fixups->late_init(nor);
+
+ spi_nor_init_flags(nor);
+ spi_nor_init_fixup_flags(nor);
+
/*
* NOR protection support. When locking_ops are not provided, we pick
* the default ones.
@@ -2680,6 +2498,98 @@ static void spi_nor_late_init_params(struct spi_nor *nor)
}
/**
+ * spi_nor_sfdp_init_params_deprecated() - Deprecated way of initializing flash
+ * parameters and settings based on JESD216 SFDP standard.
+ * @nor: pointer to a 'struct spi_nor'.
+ *
+ * The method has a roll-back mechanism: in case the SFDP parsing fails, the
+ * legacy flash parameters and settings will be restored.
+ */
+static void spi_nor_sfdp_init_params_deprecated(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter sfdp_params;
+
+ memcpy(&sfdp_params, nor->params, sizeof(sfdp_params));
+
+ if (spi_nor_parse_sfdp(nor)) {
+ memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
+ nor->addr_width = 0;
+ nor->flags &= ~SNOR_F_4B_OPCODES;
+ }
+}
+
+/**
+ * spi_nor_init_params_deprecated() - Deprecated way of initializing flash
+ * parameters and settings.
+ * @nor: pointer to a 'struct spi_nor'.
+ *
+ * The method assumes that flash doesn't support SFDP so it initializes flash
+ * parameters in spi_nor_no_sfdp_init_params() which later on can be overwritten
+ * when parsing SFDP, if supported.
+ */
+static void spi_nor_init_params_deprecated(struct spi_nor *nor)
+{
+ spi_nor_no_sfdp_init_params(nor);
+
+ spi_nor_manufacturer_init_params(nor);
+
+ if (nor->info->no_sfdp_flags & (SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ |
+ SPI_NOR_OCTAL_READ |
+ SPI_NOR_OCTAL_DTR_READ))
+ spi_nor_sfdp_init_params_deprecated(nor);
+}
+
+/**
+ * spi_nor_init_default_params() - Default initialization of flash parameters
+ * and settings. Done for all flashes, regardless is they define SFDP tables
+ * or not.
+ * @nor: pointer to a 'struct spi_nor'.
+ */
+static void spi_nor_init_default_params(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+ const struct flash_info *info = nor->info;
+ struct device_node *np = spi_nor_get_flash_node(nor);
+
+ params->quad_enable = spi_nor_sr2_bit1_quad_enable;
+ params->set_4byte_addr_mode = spansion_set_4byte_addr_mode;
+ params->otp.org = &info->otp_org;
+
+ /* Default to 16-bit Write Status (01h) Command */
+ nor->flags |= SNOR_F_HAS_16BIT_SR;
+
+ /* Set SPI NOR sizes. */
+ params->writesize = 1;
+ params->size = (u64)info->sector_size * info->n_sectors;
+ params->page_size = info->page_size;
+
+ if (!(info->flags & SPI_NOR_NO_FR)) {
+ /* Default to Fast Read for DT and non-DT platform devices. */
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
+
+ /* Mask out Fast Read if not requested at DT instantiation. */
+ if (np && !of_property_read_bool(np, "m25p,fast-read"))
+ params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
+ }
+
+ /* (Fast) Read settings. */
+ params->hwcaps.mask |= SNOR_HWCAPS_READ;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ],
+ 0, 0, SPINOR_OP_READ,
+ SNOR_PROTO_1_1_1);
+
+ if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_FAST],
+ 0, 8, SPINOR_OP_READ_FAST,
+ SNOR_PROTO_1_1_1);
+ /* Page Program settings. */
+ params->hwcaps.mask |= SNOR_HWCAPS_PP;
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
+ SPINOR_OP_PP, SNOR_PROTO_1_1_1);
+}
+
+/**
* spi_nor_init_params() - Initialize the flash's parameters and settings.
* @nor: pointer to a 'struct spi_nor'.
*
@@ -2699,39 +2609,44 @@ static void spi_nor_late_init_params(struct spi_nor *nor)
* which can be overwritten by:
* 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and
* should be more accurate that the above.
- * spi_nor_sfdp_init_params()
+ * spi_nor_parse_sfdp() or spi_nor_no_sfdp_init_params()
*
* Please note that there is a ->post_bfpt() fixup hook that can overwrite
* the flash parameters and settings immediately after parsing the Basic
* Flash Parameter Table.
+ * spi_nor_post_sfdp_fixups() is called after the SFDP tables are parsed.
+ * It is used to tweak various flash parameters when information provided
+ * by the SFDP tables are wrong.
*
* which can be overwritten by:
- * 4/ Post SFDP flash parameters initialization. Used to tweak various
- * parameters that could not be extracted by other means (i.e. when
- * information provided by the SFDP/flash_info tables are incomplete or
- * wrong).
- * spi_nor_post_sfdp_fixups()
- *
- * 5/ Late default flash parameters initialization, used when the
- * ->default_init() hook or the SFDP parser do not set specific params.
+ * 4/ Late flash parameters initialization, used to initialize flash
+ * parameters that are not declared in the JESD216 SFDP standard, or where SFDP
+ * tables are not defined at all.
* spi_nor_late_init_params()
+ *
+ * Return: 0 on success, -errno otherwise.
*/
static int spi_nor_init_params(struct spi_nor *nor)
{
+ int ret;
+
nor->params = devm_kzalloc(nor->dev, sizeof(*nor->params), GFP_KERNEL);
if (!nor->params)
return -ENOMEM;
- spi_nor_info_init_params(nor);
+ spi_nor_init_default_params(nor);
- spi_nor_manufacturer_init_params(nor);
-
- if ((nor->info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_OCTAL_READ | SPI_NOR_OCTAL_DTR_READ)) &&
- !(nor->info->flags & SPI_NOR_SKIP_SFDP))
- spi_nor_sfdp_init_params(nor);
-
- spi_nor_post_sfdp_fixups(nor);
+ if (nor->info->parse_sfdp) {
+ ret = spi_nor_parse_sfdp(nor);
+ if (ret) {
+ dev_err(nor->dev, "BFPT parsing failed. Please consider using SPI_NOR_SKIP_SFDP when declaring the flash\n");
+ return ret;
+ }
+ } else if (nor->info->no_sfdp_flags & SPI_NOR_SKIP_SFDP) {
+ spi_nor_no_sfdp_init_params(nor);
+ } else {
+ spi_nor_init_params_deprecated(nor);
+ }
spi_nor_late_init_params(nor);
@@ -2978,59 +2893,6 @@ static const struct flash_info *spi_nor_match_id(struct spi_nor *nor,
return NULL;
}
-static int spi_nor_set_addr_width(struct spi_nor *nor)
-{
- if (nor->addr_width) {
- /* already configured from SFDP */
- } else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) {
- /*
- * In 8D-8D-8D mode, one byte takes half a cycle to transfer. So
- * in this protocol an odd address width cannot be used because
- * then the address phase would only span a cycle and a half.
- * Half a cycle would be left over. We would then have to start
- * the dummy phase in the middle of a cycle and so too the data
- * phase, and we will end the transaction with half a cycle left
- * over.
- *
- * Force all 8D-8D-8D flashes to use an address width of 4 to
- * avoid this situation.
- */
- nor->addr_width = 4;
- } else if (nor->info->addr_width) {
- nor->addr_width = nor->info->addr_width;
- } else {
- nor->addr_width = 3;
- }
-
- if (nor->addr_width == 3 && nor->mtd.size > 0x1000000) {
- /* enable 4-byte addressing if the device exceeds 16MiB */
- nor->addr_width = 4;
- }
-
- if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
- dev_dbg(nor->dev, "address width is too large: %u\n",
- nor->addr_width);
- return -EINVAL;
- }
-
- /* Set 4byte opcodes when possible. */
- if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
- !(nor->flags & SNOR_F_HAS_4BAIT))
- spi_nor_set_4byte_opcodes(nor);
-
- return 0;
-}
-
-static void spi_nor_debugfs_init(struct spi_nor *nor,
- const struct flash_info *info)
-{
- struct mtd_info *mtd = &nor->mtd;
-
- mtd->dbg.partname = info->name;
- mtd->dbg.partid = devm_kasprintf(nor->dev, GFP_KERNEL, "spi-nor:%*phN",
- info->id_len, info->id);
-}
-
static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
const char *name)
{
@@ -3071,13 +2933,42 @@ static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
return info;
}
+static void spi_nor_set_mtd_info(struct spi_nor *nor)
+{
+ struct mtd_info *mtd = &nor->mtd;
+ struct device *dev = nor->dev;
+
+ spi_nor_set_mtd_locking_ops(nor);
+ spi_nor_set_mtd_otp_ops(nor);
+
+ mtd->dev.parent = dev;
+ if (!mtd->name)
+ mtd->name = dev_name(dev);
+ mtd->type = MTD_NORFLASH;
+ mtd->flags = MTD_CAP_NORFLASH;
+ if (nor->info->flags & SPI_NOR_NO_ERASE)
+ mtd->flags |= MTD_NO_ERASE;
+ else
+ mtd->_erase = spi_nor_erase;
+ mtd->writesize = nor->params->writesize;
+ mtd->writebufsize = nor->params->page_size;
+ mtd->size = nor->params->size;
+ mtd->_read = spi_nor_read;
+ /* Might be already set by some SST flashes. */
+ if (!mtd->_write)
+ mtd->_write = spi_nor_write;
+ mtd->_suspend = spi_nor_suspend;
+ mtd->_resume = spi_nor_resume;
+ mtd->_get_device = spi_nor_get_device;
+ mtd->_put_device = spi_nor_put_device;
+}
+
int spi_nor_scan(struct spi_nor *nor, const char *name,
const struct spi_nor_hwcaps *hwcaps)
{
const struct flash_info *info;
struct device *dev = nor->dev;
struct mtd_info *mtd = &nor->mtd;
- struct device_node *np = spi_nor_get_flash_node(nor);
int ret;
int i;
@@ -3094,7 +2985,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
* We need the bounce buffer early to read/write registers when going
* through the spi-mem layer (buffers have to be DMA-able).
* For spi-mem drivers, we'll reallocate a new buffer if
- * nor->page_size turns out to be greater than PAGE_SIZE (which
+ * nor->params->page_size turns out to be greater than PAGE_SIZE (which
* shouldn't happen before long since NOR pages are usually less
* than 1KB) after spi_nor_scan() returns.
*/
@@ -3110,102 +3001,31 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
nor->info = info;
- spi_nor_debugfs_init(nor, info);
-
mutex_init(&nor->lock);
- /*
- * Make sure the XSR_RDY flag is set before calling
- * spi_nor_wait_till_ready(). Xilinx S3AN share MFR
- * with Atmel SPI NOR.
- */
- if (info->flags & SPI_NOR_XSR_RDY)
- nor->flags |= SNOR_F_READY_XSR_RDY;
-
- if (info->flags & SPI_NOR_HAS_LOCK)
- nor->flags |= SNOR_F_HAS_LOCK;
-
- mtd->_write = spi_nor_write;
-
/* Init flash parameters based on flash_info struct and SFDP */
ret = spi_nor_init_params(nor);
if (ret)
return ret;
- if (!mtd->name)
- mtd->name = dev_name(dev);
- mtd->priv = nor;
- mtd->type = MTD_NORFLASH;
- mtd->writesize = nor->params->writesize;
- mtd->flags = MTD_CAP_NORFLASH;
- mtd->size = nor->params->size;
- mtd->_erase = spi_nor_erase;
- mtd->_read = spi_nor_read;
- mtd->_suspend = spi_nor_suspend;
- mtd->_resume = spi_nor_resume;
- mtd->_get_device = spi_nor_get_device;
- mtd->_put_device = spi_nor_put_device;
-
- if (info->flags & USE_FSR)
- nor->flags |= SNOR_F_USE_FSR;
- if (info->flags & SPI_NOR_HAS_TB) {
- nor->flags |= SNOR_F_HAS_SR_TB;
- if (info->flags & SPI_NOR_TB_SR_BIT6)
- nor->flags |= SNOR_F_HAS_SR_TB_BIT6;
- }
-
- if (info->flags & NO_CHIP_ERASE)
- nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
- if (info->flags & USE_CLSR)
- nor->flags |= SNOR_F_USE_CLSR;
- if (info->flags & SPI_NOR_SWP_IS_VOLATILE)
- nor->flags |= SNOR_F_SWP_IS_VOLATILE;
-
- if (info->flags & SPI_NOR_4BIT_BP) {
- nor->flags |= SNOR_F_HAS_4BIT_BP;
- if (info->flags & SPI_NOR_BP3_SR_BIT6)
- nor->flags |= SNOR_F_HAS_SR_BP3_BIT6;
- }
-
- if (info->flags & SPI_NOR_NO_ERASE)
- mtd->flags |= MTD_NO_ERASE;
-
- mtd->dev.parent = dev;
- nor->page_size = nor->params->page_size;
- mtd->writebufsize = nor->page_size;
-
- if (of_property_read_bool(np, "broken-flash-reset"))
- nor->flags |= SNOR_F_BROKEN_RESET;
-
/*
* Configure the SPI memory:
* - select op codes for (Fast) Read, Page Program and Sector Erase.
* - set the number of dummy cycles (mode cycles + wait states).
* - set the SPI protocols for register and memory accesses.
+ * - set the address width.
*/
ret = spi_nor_setup(nor, hwcaps);
if (ret)
return ret;
- if (info->flags & SPI_NOR_4B_OPCODES)
- nor->flags |= SNOR_F_4B_OPCODES;
-
- if (info->flags & SPI_NOR_IO_MODE_EN_VOLATILE)
- nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE;
-
- ret = spi_nor_set_addr_width(nor);
- if (ret)
- return ret;
-
- spi_nor_register_locking_ops(nor);
-
/* Send all the required SPI flash commands to initialize device */
ret = spi_nor_init(nor);
if (ret)
return ret;
- /* Configure OTP parameters and ops */
- spi_nor_otp_init(nor);
+ /* No mtd_info fields should be used up to this point. */
+ spi_nor_set_mtd_info(nor);
dev_info(dev, "%s (%lld Kbytes)\n", info->name,
(long long)mtd->size >> 10);
@@ -3238,7 +3058,7 @@ static int spi_nor_create_read_dirmap(struct spi_nor *nor)
SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
SPI_MEM_OP_DATA_IN(0, NULL, 0)),
.offset = 0,
- .length = nor->mtd.size,
+ .length = nor->params->size,
};
struct spi_mem_op *op = &info.op_tmpl;
@@ -3269,7 +3089,7 @@ static int spi_nor_create_write_dirmap(struct spi_nor *nor)
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_OUT(0, NULL, 0)),
.offset = 0,
- .length = nor->mtd.size,
+ .length = nor->params->size,
};
struct spi_mem_op *op = &info.op_tmpl;
@@ -3341,8 +3161,8 @@ static int spi_nor_probe(struct spi_mem *spimem)
* and add this logic so that if anyone ever adds support for such
* a NOR we don't end up with buffer overflows.
*/
- if (nor->page_size > PAGE_SIZE) {
- nor->bouncebuf_size = nor->page_size;
+ if (nor->params->page_size > PAGE_SIZE) {
+ nor->bouncebuf_size = nor->params->page_size;
devm_kfree(nor->dev, nor->bouncebuf);
nor->bouncebuf = devm_kmalloc(nor->dev,
nor->bouncebuf_size,
@@ -3389,8 +3209,8 @@ static void spi_nor_shutdown(struct spi_mem *spimem)
* encourage new users to add support to the spi-nor library, and simply bind
* against a generic string here (e.g., "jedec,spi-nor").
*
- * Many flash names are kept here in this list (as well as in spi-nor.c) to
- * keep them available as module aliases for existing platforms.
+ * Many flash names are kept here in this list to keep them available
+ * as module aliases for existing platforms.
*/
static const struct spi_device_id spi_nor_dev_ids[] = {
/*
diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
index 3348e1dd1445..b7fd760e3b47 100644
--- a/drivers/mtd/spi-nor/core.h
+++ b/drivers/mtd/spi-nor/core.h
@@ -12,23 +12,20 @@
#define SPI_NOR_MAX_ID_LEN 6
enum spi_nor_option_flags {
- SNOR_F_USE_FSR = BIT(0),
- SNOR_F_HAS_SR_TB = BIT(1),
- SNOR_F_NO_OP_CHIP_ERASE = BIT(2),
- SNOR_F_READY_XSR_RDY = BIT(3),
- SNOR_F_USE_CLSR = BIT(4),
- SNOR_F_BROKEN_RESET = BIT(5),
- SNOR_F_4B_OPCODES = BIT(6),
- SNOR_F_HAS_4BAIT = BIT(7),
- SNOR_F_HAS_LOCK = BIT(8),
- SNOR_F_HAS_16BIT_SR = BIT(9),
- SNOR_F_NO_READ_CR = BIT(10),
- SNOR_F_HAS_SR_TB_BIT6 = BIT(11),
- SNOR_F_HAS_4BIT_BP = BIT(12),
- SNOR_F_HAS_SR_BP3_BIT6 = BIT(13),
- SNOR_F_IO_MODE_EN_VOLATILE = BIT(14),
- SNOR_F_SOFT_RESET = BIT(15),
- SNOR_F_SWP_IS_VOLATILE = BIT(16),
+ SNOR_F_HAS_SR_TB = BIT(0),
+ SNOR_F_NO_OP_CHIP_ERASE = BIT(1),
+ SNOR_F_BROKEN_RESET = BIT(2),
+ SNOR_F_4B_OPCODES = BIT(3),
+ SNOR_F_HAS_4BAIT = BIT(4),
+ SNOR_F_HAS_LOCK = BIT(5),
+ SNOR_F_HAS_16BIT_SR = BIT(6),
+ SNOR_F_NO_READ_CR = BIT(7),
+ SNOR_F_HAS_SR_TB_BIT6 = BIT(8),
+ SNOR_F_HAS_4BIT_BP = BIT(9),
+ SNOR_F_HAS_SR_BP3_BIT6 = BIT(10),
+ SNOR_F_IO_MODE_EN_VOLATILE = BIT(11),
+ SNOR_F_SOFT_RESET = BIT(12),
+ SNOR_F_SWP_IS_VOLATILE = BIT(13),
};
struct spi_nor_read_command {
@@ -250,19 +247,21 @@ struct spi_nor_otp {
* higher index in the array, the higher priority.
* @erase_map: the erase map parsed from the SFDP Sector Map Parameter
* Table.
- * @otp_info: describes the OTP regions.
+ * @otp: SPI NOR OTP info.
* @octal_dtr_enable: enables SPI NOR octal DTR mode.
* @quad_enable: enables SPI NOR quad mode.
* @set_4byte_addr_mode: puts the SPI NOR in 4 byte addressing mode.
* @convert_addr: converts an absolute address into something the flash
* will understand. Particularly useful when pagesize is
* not a power-of-2.
- * @setup: configures the SPI NOR memory. Useful for SPI NOR
- * flashes that have peculiarities to the SPI NOR standard
- * e.g. different opcodes, specific address calculation,
- * page size, etc.
+ * @setup: (optional) configures the SPI NOR memory. Useful for
+ * SPI NOR flashes that have peculiarities to the SPI NOR
+ * standard e.g. different opcodes, specific address
+ * calculation, page size, etc.
+ * @ready: (optional) flashes might use a different mechanism
+ * than reading the status register to indicate they
+ * are ready for a new command
* @locking_ops: SPI NOR locking methods.
- * @otp: SPI NOR OTP methods.
*/
struct spi_nor_flash_parameter {
u64 size;
@@ -283,6 +282,7 @@ struct spi_nor_flash_parameter {
int (*set_4byte_addr_mode)(struct spi_nor *nor, bool enable);
u32 (*convert_addr)(struct spi_nor *nor, u32 addr);
int (*setup)(struct spi_nor *nor, const struct spi_nor_hwcaps *hwcaps);
+ int (*ready)(struct spi_nor *nor);
const struct spi_nor_locking_ops *locking_ops;
};
@@ -298,6 +298,9 @@ struct spi_nor_flash_parameter {
* parameters that could not be extracted by other means (i.e.
* when information provided by the SFDP/flash_info tables are
* incomplete or wrong).
+ * @late_init: used to initialize flash parameters that are not declared in the
+ * JESD216 SFDP standard, or where SFDP tables not defined at all.
+ * Will replace the default_init() hook.
*
* Those hooks can be used to tweak the SPI NOR configuration when the SFDP
* table is broken or not available.
@@ -308,89 +311,114 @@ struct spi_nor_fixups {
const struct sfdp_parameter_header *bfpt_header,
const struct sfdp_bfpt *bfpt);
void (*post_sfdp)(struct spi_nor *nor);
+ void (*late_init)(struct spi_nor *nor);
};
+/**
+ * struct flash_info - SPI NOR flash_info entry.
+ * @name: the name of the flash.
+ * @id: the flash's ID bytes. The first three bytes are the
+ * JEDIC ID. JEDEC ID zero means "no ID" (mostly older chips).
+ * @id_len: the number of bytes of ID.
+ * @sector_size: the size listed here is what works with SPINOR_OP_SE, which
+ * isn't necessarily called a "sector" by the vendor.
+ * @n_sectors: the number of sectors.
+ * @page_size: the flash's page size.
+ * @addr_width: the flash's address width.
+ *
+ * @parse_sfdp: true when flash supports SFDP tables. The false value has no
+ * meaning. If one wants to skip the SFDP tables, one should
+ * instead use the SPI_NOR_SKIP_SFDP sfdp_flag.
+ * @flags: flags that indicate support that is not defined by the
+ * JESD216 standard in its SFDP tables. Flag meanings:
+ * SPI_NOR_HAS_LOCK: flash supports lock/unlock via SR
+ * SPI_NOR_HAS_TB: flash SR has Top/Bottom (TB) protect bit. Must be
+ * used with SPI_NOR_HAS_LOCK.
+ * SPI_NOR_TB_SR_BIT6: Top/Bottom (TB) is bit 6 of status register.
+ * Must be used with SPI_NOR_HAS_TB.
+ * SPI_NOR_4BIT_BP: flash SR has 4 bit fields (BP0-3) for block
+ * protection.
+ * SPI_NOR_BP3_SR_BIT6: BP3 is bit 6 of status register. Must be used with
+ * SPI_NOR_4BIT_BP.
+ * SPI_NOR_SWP_IS_VOLATILE: flash has volatile software write protection bits.
+ * Usually these will power-up in a write-protected
+ * state.
+ * SPI_NOR_NO_ERASE: no erase command needed.
+ * NO_CHIP_ERASE: chip does not support chip erase.
+ * SPI_NOR_NO_FR: can't do fastread.
+ *
+ * @no_sfdp_flags: flags that indicate support that can be discovered via SFDP.
+ * Used when SFDP tables are not defined in the flash. These
+ * flags are used together with the SPI_NOR_SKIP_SFDP flag.
+ * SPI_NOR_SKIP_SFDP: skip parsing of SFDP tables.
+ * SECT_4K: SPINOR_OP_BE_4K works uniformly.
+ * SECT_4K_PMC: SPINOR_OP_BE_4K_PMC works uniformly.
+ * SPI_NOR_DUAL_READ: flash supports Dual Read.
+ * SPI_NOR_QUAD_READ: flash supports Quad Read.
+ * SPI_NOR_OCTAL_READ: flash supports Octal Read.
+ * SPI_NOR_OCTAL_DTR_READ: flash supports octal DTR Read.
+ * SPI_NOR_OCTAL_DTR_PP: flash supports Octal DTR Page Program.
+ *
+ * @fixup_flags: flags that indicate support that can be discovered via SFDP
+ * ideally, but can not be discovered for this particular flash
+ * because the SFDP table that indicates this support is not
+ * defined by the flash. In case the table for this support is
+ * defined but has wrong values, one should instead use a
+ * post_sfdp() hook to set the SNOR_F equivalent flag.
+ *
+ * SPI_NOR_4B_OPCODES: use dedicated 4byte address op codes to support
+ * memory size above 128Mib.
+ * SPI_NOR_IO_MODE_EN_VOLATILE: flash enables the best available I/O mode
+ * via a volatile bit.
+ * @mfr_flags: manufacturer private flags. Used in the manufacturer fixup
+ * hooks to differentiate support between flashes of the same
+ * manufacturer.
+ * @otp_org: flash's OTP organization.
+ * @fixups: part specific fixup hooks.
+ */
struct flash_info {
- char *name;
-
- /*
- * This array stores the ID bytes.
- * The first three bytes are the JEDIC ID.
- * JEDEC ID zero means "no ID" (mostly older chips).
- */
- u8 id[SPI_NOR_MAX_ID_LEN];
- u8 id_len;
-
- /* The size listed here is what works with SPINOR_OP_SE, which isn't
- * necessarily called a "sector" by the vendor.
- */
- unsigned sector_size;
- u16 n_sectors;
-
- u16 page_size;
- u16 addr_width;
-
- u32 flags;
-#define SECT_4K BIT(0) /* SPINOR_OP_BE_4K works uniformly */
-#define SPI_NOR_NO_ERASE BIT(1) /* No erase command needed */
-#define SST_WRITE BIT(2) /* use SST byte programming */
-#define SPI_NOR_NO_FR BIT(3) /* Can't do fastread */
-#define SECT_4K_PMC BIT(4) /* SPINOR_OP_BE_4K_PMC works uniformly */
-#define SPI_NOR_DUAL_READ BIT(5) /* Flash supports Dual Read */
-#define SPI_NOR_QUAD_READ BIT(6) /* Flash supports Quad Read */
-#define USE_FSR BIT(7) /* use flag status register */
-#define SPI_NOR_HAS_LOCK BIT(8) /* Flash supports lock/unlock via SR */
-#define SPI_NOR_HAS_TB BIT(9) /*
- * Flash SR has Top/Bottom (TB) protect
- * bit. Must be used with
- * SPI_NOR_HAS_LOCK.
- */
-#define SPI_NOR_XSR_RDY BIT(10) /*
- * S3AN flashes have specific opcode to
- * read the status register.
- */
-#define SPI_NOR_4B_OPCODES BIT(11) /*
- * Use dedicated 4byte address op codes
- * to support memory size above 128Mib.
- */
-#define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */
-#define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */
-#define USE_CLSR BIT(14) /* use CLSR command */
-#define SPI_NOR_OCTAL_READ BIT(15) /* Flash supports Octal Read */
-#define SPI_NOR_TB_SR_BIT6 BIT(16) /*
- * Top/Bottom (TB) is bit 6 of
- * status register. Must be used with
- * SPI_NOR_HAS_TB.
- */
-#define SPI_NOR_4BIT_BP BIT(17) /*
- * Flash SR has 4 bit fields (BP0-3)
- * for block protection.
- */
-#define SPI_NOR_BP3_SR_BIT6 BIT(18) /*
- * BP3 is bit 6 of status register.
- * Must be used with SPI_NOR_4BIT_BP.
- */
-#define SPI_NOR_OCTAL_DTR_READ BIT(19) /* Flash supports octal DTR Read. */
-#define SPI_NOR_OCTAL_DTR_PP BIT(20) /* Flash supports Octal DTR Page Program */
-#define SPI_NOR_IO_MODE_EN_VOLATILE BIT(21) /*
- * Flash enables the best
- * available I/O mode via a
- * volatile bit.
- */
-#define SPI_NOR_SWP_IS_VOLATILE BIT(22) /*
- * Flash has volatile software write
- * protection bits. Usually these will
- * power-up in a write-protected state.
- */
+ char *name;
+ u8 id[SPI_NOR_MAX_ID_LEN];
+ u8 id_len;
+ unsigned sector_size;
+ u16 n_sectors;
+ u16 page_size;
+ u16 addr_width;
+
+ bool parse_sfdp;
+ u16 flags;
+#define SPI_NOR_HAS_LOCK BIT(0)
+#define SPI_NOR_HAS_TB BIT(1)
+#define SPI_NOR_TB_SR_BIT6 BIT(2)
+#define SPI_NOR_4BIT_BP BIT(3)
+#define SPI_NOR_BP3_SR_BIT6 BIT(4)
+#define SPI_NOR_SWP_IS_VOLATILE BIT(5)
+#define SPI_NOR_NO_ERASE BIT(6)
+#define NO_CHIP_ERASE BIT(7)
+#define SPI_NOR_NO_FR BIT(8)
+
+ u8 no_sfdp_flags;
+#define SPI_NOR_SKIP_SFDP BIT(0)
+#define SECT_4K BIT(1)
+#define SECT_4K_PMC BIT(2)
+#define SPI_NOR_DUAL_READ BIT(3)
+#define SPI_NOR_QUAD_READ BIT(4)
+#define SPI_NOR_OCTAL_READ BIT(5)
+#define SPI_NOR_OCTAL_DTR_READ BIT(6)
+#define SPI_NOR_OCTAL_DTR_PP BIT(7)
+
+ u8 fixup_flags;
+#define SPI_NOR_4B_OPCODES BIT(0)
+#define SPI_NOR_IO_MODE_EN_VOLATILE BIT(1)
+
+ u8 mfr_flags;
const struct spi_nor_otp_organization otp_org;
-
- /* Part specific fixup hooks. */
const struct spi_nor_fixups *fixups;
};
/* Used when the "_ext_id" is two bytes at most */
-#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
+#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors) \
.id = { \
((_jedec_id) >> 16) & 0xff, \
((_jedec_id) >> 8) & 0xff, \
@@ -402,9 +430,8 @@ struct flash_info {
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = 256, \
- .flags = (_flags),
-#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
+#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors) \
.id = { \
((_jedec_id) >> 16) & 0xff, \
((_jedec_id) >> 8) & 0xff, \
@@ -417,27 +444,13 @@ struct flash_info {
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = 256, \
- .flags = (_flags),
-#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \
+#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width) \
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = (_page_size), \
.addr_width = (_addr_width), \
- .flags = (_flags),
-
-#define S3AN_INFO(_jedec_id, _n_sectors, _page_size) \
- .id = { \
- ((_jedec_id) >> 16) & 0xff, \
- ((_jedec_id) >> 8) & 0xff, \
- (_jedec_id) & 0xff \
- }, \
- .id_len = 3, \
- .sector_size = (8*_page_size), \
- .n_sectors = (_n_sectors), \
- .page_size = _page_size, \
- .addr_width = 3, \
- .flags = SPI_NOR_NO_FR | SPI_NOR_XSR_RDY,
+ .flags = SPI_NOR_NO_ERASE | SPI_NOR_NO_FR, \
#define OTP_INFO(_len, _n_regions, _base, _offset) \
.otp_org = { \
@@ -447,6 +460,21 @@ struct flash_info {
.n_regions = (_n_regions), \
},
+#define PARSE_SFDP \
+ .parse_sfdp = true, \
+
+#define FLAGS(_flags) \
+ .flags = (_flags), \
+
+#define NO_SFDP_FLAGS(_no_sfdp_flags) \
+ .no_sfdp_flags = (_no_sfdp_flags), \
+
+#define FIXUP_FLAGS(_fixup_flags) \
+ .fixup_flags = (_fixup_flags), \
+
+#define MFR_FLAGS(_mfr_flags) \
+ .mfr_flags = (_mfr_flags), \
+
/**
* struct spi_nor_manufacturer - SPI NOR manufacturer object
* @name: manufacturer name
@@ -507,12 +535,12 @@ int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor);
int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor);
int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor);
int spi_nor_read_sr(struct spi_nor *nor, u8 *sr);
+int spi_nor_sr_ready(struct spi_nor *nor);
int spi_nor_read_cr(struct spi_nor *nor, u8 *cr);
int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len);
int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1);
int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr);
-int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr);
ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len,
u8 *buf);
ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
@@ -549,12 +577,17 @@ int spi_nor_post_bfpt_fixups(struct spi_nor *nor,
void spi_nor_init_default_locking_ops(struct spi_nor *nor);
void spi_nor_try_unlock_all(struct spi_nor *nor);
-void spi_nor_register_locking_ops(struct spi_nor *nor);
-void spi_nor_otp_init(struct spi_nor *nor);
+void spi_nor_set_mtd_locking_ops(struct spi_nor *nor);
+void spi_nor_set_mtd_otp_ops(struct spi_nor *nor);
+
+int spi_nor_controller_ops_read_reg(struct spi_nor *nor, u8 opcode,
+ u8 *buf, size_t len);
+int spi_nor_controller_ops_write_reg(struct spi_nor *nor, u8 opcode,
+ const u8 *buf, size_t len);
-static struct spi_nor __maybe_unused *mtd_to_spi_nor(struct mtd_info *mtd)
+static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
{
- return mtd->priv;
+ return container_of(mtd, struct spi_nor, mtd);
}
#endif /* __LINUX_MTD_SPI_NOR_INTERNAL_H */
diff --git a/drivers/mtd/spi-nor/eon.c b/drivers/mtd/spi-nor/eon.c
index ddb8e3650835..8c1c57530281 100644
--- a/drivers/mtd/spi-nor/eon.c
+++ b/drivers/mtd/spi-nor/eon.c
@@ -8,27 +8,30 @@
#include "core.h"
-static const struct flash_info eon_parts[] = {
+static const struct flash_info eon_nor_parts[] = {
/* EON -- en25xxx */
- { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
- { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
- { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
- { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
- { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
- { "en25q80a", INFO(0x1c3014, 0, 64 * 1024, 16,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "en25qh16", INFO(0x1c7015, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64, 0) },
- { "en25qh64", INFO(0x1c7017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
- { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
- { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) },
+ { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64) },
+ { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64) },
+ { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128) },
+ { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "en25q80a", INFO(0x1c3014, 0, 64 * 1024, 16)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
+ { "en25qh16", INFO(0x1c7015, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
+ { "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64) },
+ { "en25qh64", INFO(0x1c7017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
+ { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256) },
+ { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512) },
+ { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K) },
};
const struct spi_nor_manufacturer spi_nor_eon = {
.name = "eon",
- .parts = eon_parts,
- .nparts = ARRAY_SIZE(eon_parts),
+ .parts = eon_nor_parts,
+ .nparts = ARRAY_SIZE(eon_nor_parts),
};
diff --git a/drivers/mtd/spi-nor/esmt.c b/drivers/mtd/spi-nor/esmt.c
index cfc9218c1053..79e2408f4998 100644
--- a/drivers/mtd/spi-nor/esmt.c
+++ b/drivers/mtd/spi-nor/esmt.c
@@ -8,18 +8,21 @@
#include "core.h"
-static const struct flash_info esmt_parts[] = {
+static const struct flash_info esmt_nor_parts[] = {
/* ESMT */
- { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
- { "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_HAS_LOCK) },
- { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_HAS_LOCK) },
+ { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128)
+ FLAGS(SPI_NOR_HAS_LOCK)
+ NO_SFDP_FLAGS(SECT_4K) },
};
const struct spi_nor_manufacturer spi_nor_esmt = {
.name = "esmt",
- .parts = esmt_parts,
- .nparts = ARRAY_SIZE(esmt_parts),
+ .parts = esmt_nor_parts,
+ .nparts = ARRAY_SIZE(esmt_nor_parts),
};
diff --git a/drivers/mtd/spi-nor/everspin.c b/drivers/mtd/spi-nor/everspin.c
index 04a177a32283..84a07c2e0536 100644
--- a/drivers/mtd/spi-nor/everspin.c
+++ b/drivers/mtd/spi-nor/everspin.c
@@ -8,20 +8,16 @@
#include "core.h"
-static const struct flash_info everspin_parts[] = {
+static const struct flash_info everspin_nor_parts[] = {
/* Everspin */
- { "mr25h128", CAT25_INFO(16 * 1024, 1, 256, 2,
- SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "mr25h256", CAT25_INFO(32 * 1024, 1, 256, 2,
- SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3,
- SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3,
- SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "mr25h128", CAT25_INFO(16 * 1024, 1, 256, 2) },
+ { "mr25h256", CAT25_INFO(32 * 1024, 1, 256, 2) },
+ { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3) },
+ { "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3) },
};
const struct spi_nor_manufacturer spi_nor_everspin = {
.name = "everspin",
- .parts = everspin_parts,
- .nparts = ARRAY_SIZE(everspin_parts),
+ .parts = everspin_nor_parts,
+ .nparts = ARRAY_SIZE(everspin_nor_parts),
};
diff --git a/drivers/mtd/spi-nor/fujitsu.c b/drivers/mtd/spi-nor/fujitsu.c
index e385d93e756c..69cffc5c73ef 100644
--- a/drivers/mtd/spi-nor/fujitsu.c
+++ b/drivers/mtd/spi-nor/fujitsu.c
@@ -8,13 +8,14 @@
#include "core.h"
-static const struct flash_info fujitsu_parts[] = {
+static const struct flash_info fujitsu_nor_parts[] = {
/* Fujitsu */
- { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
+ { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1)
+ FLAGS(SPI_NOR_NO_ERASE) },
};
const struct spi_nor_manufacturer spi_nor_fujitsu = {
.name = "fujitsu",
- .parts = fujitsu_parts,
- .nparts = ARRAY_SIZE(fujitsu_parts),
+ .parts = fujitsu_nor_parts,
+ .nparts = ARRAY_SIZE(fujitsu_nor_parts),
};
diff --git a/drivers/mtd/spi-nor/gigadevice.c b/drivers/mtd/spi-nor/gigadevice.c
index 447d84bb2128..119b38e6fc2a 100644
--- a/drivers/mtd/spi-nor/gigadevice.c
+++ b/drivers/mtd/spi-nor/gigadevice.c
@@ -19,41 +19,48 @@ static void gd25q256_default_init(struct spi_nor *nor)
nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable;
}
-static struct spi_nor_fixups gd25q256_fixups = {
+static const struct spi_nor_fixups gd25q256_fixups = {
.default_init = gd25q256_default_init,
};
-static const struct flash_info gigadevice_parts[] = {
- { "gd25q16", INFO(0xc84015, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
- { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
- { "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
- { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
- { "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
- { "gd25lq128d", INFO(0xc86018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
- { "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
- { "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK |
- SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6)
+static const struct flash_info gigadevice_nor_parts[] = {
+ { "gd25q16", INFO(0xc84015, 0, 64 * 1024, 32)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "gd25lq128d", INFO(0xc86018, 0, 64 * 1024, 256)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512)
+ PARSE_SFDP
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
.fixups = &gd25q256_fixups },
};
const struct spi_nor_manufacturer spi_nor_gigadevice = {
.name = "gigadevice",
- .parts = gigadevice_parts,
- .nparts = ARRAY_SIZE(gigadevice_parts),
+ .parts = gigadevice_nor_parts,
+ .nparts = ARRAY_SIZE(gigadevice_nor_parts),
};
diff --git a/drivers/mtd/spi-nor/intel.c b/drivers/mtd/spi-nor/intel.c
index 8ece9cceb3cf..9179f2d09cba 100644
--- a/drivers/mtd/spi-nor/intel.c
+++ b/drivers/mtd/spi-nor/intel.c
@@ -8,18 +8,18 @@
#include "core.h"
-static const struct flash_info intel_parts[] = {
+static const struct flash_info intel_nor_parts[] = {
/* Intel/Numonyx -- xxxs33b */
- { "160s33b", INFO(0x898911, 0, 64 * 1024, 32,
- SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
- { "320s33b", INFO(0x898912, 0, 64 * 1024, 64,
- SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
- { "640s33b", INFO(0x898913, 0, 64 * 1024, 128,
- SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
+ { "160s33b", INFO(0x898911, 0, 64 * 1024, 32)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
+ { "320s33b", INFO(0x898912, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
+ { "640s33b", INFO(0x898913, 0, 64 * 1024, 128)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
};
const struct spi_nor_manufacturer spi_nor_intel = {
.name = "intel",
- .parts = intel_parts,
- .nparts = ARRAY_SIZE(intel_parts),
+ .parts = intel_nor_parts,
+ .nparts = ARRAY_SIZE(intel_nor_parts),
};
diff --git a/drivers/mtd/spi-nor/issi.c b/drivers/mtd/spi-nor/issi.c
index 1e5bb5408b68..c012bc2486e1 100644
--- a/drivers/mtd/spi-nor/issi.c
+++ b/drivers/mtd/spi-nor/issi.c
@@ -25,58 +25,62 @@ is25lp256_post_bfpt_fixups(struct spi_nor *nor,
return 0;
}
-static struct spi_nor_fixups is25lp256_fixups = {
+static const struct spi_nor_fixups is25lp256_fixups = {
.post_bfpt = is25lp256_post_bfpt_fixups,
};
-static const struct flash_info issi_parts[] = {
+static const struct flash_info issi_nor_parts[] = {
/* ISSI */
- { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
- { "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25lp016d", INFO(0x9d6015, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 16,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25lp032", INFO(0x9d6016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "is25lp064", INFO(0x9d6017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "is25lp128", INFO(0x9d6018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "is25lp256", INFO(0x9d6019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES)
+ { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25lp016d", INFO(0x9d6015, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 16)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25lp032", INFO(0x9d6016, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
+ { "is25lp064", INFO(0x9d6017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
+ { "is25lp128", INFO(0x9d6018, 0, 64 * 1024, 256)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
+ { "is25lp256", INFO(0x9d6019, 0, 64 * 1024, 512)
+ PARSE_SFDP
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
.fixups = &is25lp256_fixups },
- { "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25wp256", INFO(0x9d7019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES)
+ { "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25wp256", INFO(0x9d7019, 0, 64 * 1024, 512)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
.fixups = &is25lp256_fixups },
/* PMC */
- { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
- { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
- { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
+ { "pm25lv512", INFO(0, 0, 32 * 1024, 2)
+ NO_SFDP_FLAGS(SECT_4K_PMC) },
+ { "pm25lv010", INFO(0, 0, 32 * 1024, 4)
+ NO_SFDP_FLAGS(SECT_4K_PMC) },
+ { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K) },
};
-static void issi_default_init(struct spi_nor *nor)
+static void issi_nor_default_init(struct spi_nor *nor)
{
nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable;
}
static const struct spi_nor_fixups issi_fixups = {
- .default_init = issi_default_init,
+ .default_init = issi_nor_default_init,
};
const struct spi_nor_manufacturer spi_nor_issi = {
.name = "issi",
- .parts = issi_parts,
- .nparts = ARRAY_SIZE(issi_parts),
+ .parts = issi_nor_parts,
+ .nparts = ARRAY_SIZE(issi_nor_parts),
.fixups = &issi_fixups,
};
diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
index 27498ed0cc0d..d81a4cb2812b 100644
--- a/drivers/mtd/spi-nor/macronix.c
+++ b/drivers/mtd/spi-nor/macronix.c
@@ -28,80 +28,93 @@ mx25l25635_post_bfpt_fixups(struct spi_nor *nor,
return 0;
}
-static struct spi_nor_fixups mx25l25635_fixups = {
+static const struct spi_nor_fixups mx25l25635_fixups = {
.post_bfpt = mx25l25635_post_bfpt_fixups,
};
-static const struct flash_info macronix_parts[] = {
+static const struct flash_info macronix_nor_parts[] = {
/* Macronix */
- { "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
- { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
- { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
- { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
- { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
- { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) },
- { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
- { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
- { "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4, SECT_4K) },
- { "mx25u3235f", INFO(0xc22536, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ |
+ { "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16) },
+ { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25u3235f", INFO(0xc22536, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
- { "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8, SECT_4K) },
- { "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16, SECT_4K) },
- { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
- { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, SECT_4K |
- SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP) },
- { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
- { "mx25r1635f", INFO(0xc22815, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ |
+ { "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256) },
+ { "mx25r1635f", INFO(0xc22815, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
- { "mx25r3235f", INFO(0xc22816, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ |
+ { "mx25r3235f", INFO(0xc22816, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
- { "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ |
+ { "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
- { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
.fixups = &mx25l25635_fixups },
- { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_4B_OPCODES) },
- { "mx25u51245g", INFO(0xc2253a, 0, 64 * 1024, 1024,
- SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "mx25v8035f", INFO(0xc22314, 0, 64 * 1024, 16,
- SECT_4K | SPI_NOR_DUAL_READ |
+ { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512)
+ NO_SFDP_FLAGS(SECT_4K)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+ { "mx25u51245g", INFO(0xc2253a, 0, 64 * 1024, 1024)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+ { "mx25v8035f", INFO(0xc22314, 0, 64 * 1024, 16)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
- { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
- { "mx66l51235f", INFO(0xc2201a, 0, 64 * 1024, 1024,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES) },
- { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024,
- SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048,
- SECT_4K | SPI_NOR_DUAL_READ |
+ { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512) },
+ { "mx66l51235f", INFO(0xc2201a, 0, 64 * 1024, 1024)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+ { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+ { "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
- { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048,
- SPI_NOR_QUAD_READ) },
- { "mx66u2g45g", INFO(0xc2253c, 0, 64 * 1024, 4096,
- SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048)
+ NO_SFDP_FLAGS(SPI_NOR_QUAD_READ) },
+ { "mx66u2g45g", INFO(0xc2253c, 0, 64 * 1024, 4096)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
};
-static void macronix_default_init(struct spi_nor *nor)
+static void macronix_nor_default_init(struct spi_nor *nor)
{
nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable;
nor->params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode;
}
-static const struct spi_nor_fixups macronix_fixups = {
- .default_init = macronix_default_init,
+static const struct spi_nor_fixups macronix_nor_fixups = {
+ .default_init = macronix_nor_default_init,
};
const struct spi_nor_manufacturer spi_nor_macronix = {
.name = "macronix",
- .parts = macronix_parts,
- .nparts = ARRAY_SIZE(macronix_parts),
- .fixups = &macronix_fixups,
+ .parts = macronix_nor_parts,
+ .nparts = ARRAY_SIZE(macronix_nor_parts),
+ .fixups = &macronix_nor_fixups,
};
diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c
index f3d19b716b7b..8a20475ce77a 100644
--- a/drivers/mtd/spi-nor/micron-st.c
+++ b/drivers/mtd/spi-nor/micron-st.c
@@ -8,15 +8,27 @@
#include "core.h"
+/* flash_info mfr_flag. Used to read proprietary FSR register. */
+#define USE_FSR BIT(0)
+
+#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */
+#define SPINOR_OP_CLFSR 0x50 /* Clear flag status register */
#define SPINOR_OP_MT_DTR_RD 0xfd /* Fast Read opcode in DTR mode */
#define SPINOR_OP_MT_RD_ANY_REG 0x85 /* Read volatile register */
#define SPINOR_OP_MT_WR_ANY_REG 0x81 /* Write volatile register */
#define SPINOR_REG_MT_CFR0V 0x00 /* For setting octal DTR mode */
#define SPINOR_REG_MT_CFR1V 0x01 /* For setting dummy cycles */
+#define SPINOR_REG_MT_CFR1V_DEF 0x1f /* Default dummy cycles */
#define SPINOR_MT_OCT_DTR 0xe7 /* Enable Octal DTR. */
#define SPINOR_MT_EXSPI 0xff /* Enable Extended SPI (default) */
-static int spi_nor_micron_octal_dtr_enable(struct spi_nor *nor, bool enable)
+/* Flag Status Register bits */
+#define FSR_READY BIT(7) /* Device status, 0 = Busy, 1 = Ready */
+#define FSR_E_ERR BIT(5) /* Erase operation status */
+#define FSR_P_ERR BIT(4) /* Program operation status */
+#define FSR_PT_ERR BIT(1) /* Protection error bit */
+
+static int micron_st_nor_octal_dtr_enable(struct spi_nor *nor, bool enable)
{
struct spi_mem_op op;
u8 *buf = nor->bouncebuf;
@@ -48,17 +60,28 @@ static int spi_nor_micron_octal_dtr_enable(struct spi_nor *nor, bool enable)
if (ret)
return ret;
- if (enable)
- *buf = SPINOR_MT_OCT_DTR;
- else
- *buf = SPINOR_MT_EXSPI;
+ if (enable) {
+ buf[0] = SPINOR_MT_OCT_DTR;
+ } else {
+ /*
+ * The register is 1-byte wide, but 1-byte transactions are not
+ * allowed in 8D-8D-8D mode. The next register is the dummy
+ * cycle configuration register. Since the transaction needs to
+ * be at least 2 bytes wide, set the next register to its
+ * default value. This also makes sense because the value was
+ * changed when enabling 8D-8D-8D mode, it should be reset when
+ * disabling.
+ */
+ buf[0] = SPINOR_MT_EXSPI;
+ buf[1] = SPINOR_REG_MT_CFR1V_DEF;
+ }
op = (struct spi_mem_op)
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_MT_WR_ANY_REG, 1),
SPI_MEM_OP_ADDR(enable ? 3 : 4,
SPINOR_REG_MT_CFR0V, 1),
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(1, buf, 1));
+ SPI_MEM_OP_DATA_OUT(enable ? 1 : 2, buf, 1));
if (!enable)
spi_nor_spimem_setup_op(nor, &op, SNOR_PROTO_8_8_8_DTR);
@@ -90,7 +113,7 @@ static int spi_nor_micron_octal_dtr_enable(struct spi_nor *nor, bool enable)
static void mt35xu512aba_default_init(struct spi_nor *nor)
{
- nor->params->octal_dtr_enable = spi_nor_micron_octal_dtr_enable;
+ nor->params->octal_dtr_enable = micron_st_nor_octal_dtr_enable;
}
static void mt35xu512aba_post_sfdp_fixup(struct spi_nor *nor)
@@ -113,128 +136,164 @@ static void mt35xu512aba_post_sfdp_fixup(struct spi_nor *nor)
nor->params->quad_enable = NULL;
}
-static struct spi_nor_fixups mt35xu512aba_fixups = {
+static const struct spi_nor_fixups mt35xu512aba_fixups = {
.default_init = mt35xu512aba_default_init,
.post_sfdp = mt35xu512aba_post_sfdp_fixup,
};
-static const struct flash_info micron_parts[] = {
- { "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512,
- SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
- SPI_NOR_4B_OPCODES | SPI_NOR_OCTAL_DTR_READ |
- SPI_NOR_OCTAL_DTR_PP |
- SPI_NOR_IO_MODE_EN_VOLATILE)
- .fixups = &mt35xu512aba_fixups},
- { "mt35xu02g", INFO(0x2c5b1c, 0, 128 * 1024, 2048,
- SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
- SPI_NOR_4B_OPCODES) },
+static const struct flash_info micron_nor_parts[] = {
+ { "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_OCTAL_READ |
+ SPI_NOR_OCTAL_DTR_READ | SPI_NOR_OCTAL_DTR_PP)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES | SPI_NOR_IO_MODE_EN_VOLATILE)
+ MFR_FLAGS(USE_FSR)
+ .fixups = &mt35xu512aba_fixups
+ },
+ { "mt35xu02g", INFO(0x2c5b1c, 0, 128 * 1024, 2048)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_OCTAL_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
+ MFR_FLAGS(USE_FSR)
+ },
};
-static const struct flash_info st_parts[] = {
- { "n25q016a", INFO(0x20bb15, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64,
- SPI_NOR_QUAD_READ) },
- { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64,
- SPI_NOR_QUAD_READ) },
- { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256,
- SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
- SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) },
- { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256,
- SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
- SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) },
- { "mt25ql256a", INFO6(0x20ba19, 0x104400, 64 * 1024, 512,
- SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K |
- USE_FSR | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "mt25qu256a", INFO6(0x20bb19, 0x104400, 64 * 1024, 512,
- SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512,
- SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
- { "mt25ql512a", INFO6(0x20ba20, 0x104400, 64 * 1024, 1024,
- SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024,
- SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
- SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) },
- { "mt25qu512a", INFO6(0x20bb20, 0x104400, 64 * 1024, 1024,
- SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024,
- SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
- SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) },
- { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048,
- SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
- SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6 |
- NO_CHIP_ERASE) },
- { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048,
- SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
- NO_CHIP_ERASE) },
- { "mt25ql02g", INFO(0x20ba22, 0, 64 * 1024, 4096,
- SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
- NO_CHIP_ERASE) },
- { "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096,
- SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
-
- { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
- { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
- { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
- { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
- { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
- { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
- { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
- { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
- { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
-
- { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
- { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
- { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) },
- { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) },
- { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) },
- { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) },
- { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) },
- { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) },
- { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) },
-
- { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
- { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
- { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
-
- { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) },
- { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
- { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
-
- { "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) },
- { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
- { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
- { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
- { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
- { "m25px80", INFO(0x207114, 0, 64 * 1024, 16, 0) },
+static const struct flash_info st_nor_parts[] = {
+ { "n25q016a", INFO(0x20bb15, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SPI_NOR_QUAD_READ) },
+ { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SPI_NOR_QUAD_READ) },
+ { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+ SPI_NOR_BP3_SR_BIT6)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_FSR)
+ },
+ { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+ SPI_NOR_BP3_SR_BIT6)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_FSR)
+ },
+ { "mt25ql256a", INFO6(0x20ba19, 0x104400, 64 * 1024, 512)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
+ MFR_FLAGS(USE_FSR)
+ },
+ { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_FSR)
+ },
+ { "mt25qu256a", INFO6(0x20bb19, 0x104400, 64 * 1024, 512)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
+ MFR_FLAGS(USE_FSR)
+ },
+ { "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_FSR)
+ },
+ { "mt25ql512a", INFO6(0x20ba20, 0x104400, 64 * 1024, 1024)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
+ MFR_FLAGS(USE_FSR)
+ },
+ { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+ SPI_NOR_BP3_SR_BIT6)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_FSR)
+ },
+ { "mt25qu512a", INFO6(0x20bb20, 0x104400, 64 * 1024, 1024)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
+ MFR_FLAGS(USE_FSR)
+ },
+ { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+ SPI_NOR_BP3_SR_BIT6)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_FSR)
+ },
+ { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+ SPI_NOR_BP3_SR_BIT6 | NO_CHIP_ERASE)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_FSR)
+ },
+ { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048)
+ FLAGS(NO_CHIP_ERASE)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_FSR)
+ },
+ { "mt25ql02g", INFO(0x20ba22, 0, 64 * 1024, 4096)
+ FLAGS(NO_CHIP_ERASE)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_FSR)
+ },
+ { "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096)
+ FLAGS(NO_CHIP_ERASE)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_FSR)
+ },
+
+ { "m25p05", INFO(0x202010, 0, 32 * 1024, 2) },
+ { "m25p10", INFO(0x202011, 0, 32 * 1024, 4) },
+ { "m25p20", INFO(0x202012, 0, 64 * 1024, 4) },
+ { "m25p40", INFO(0x202013, 0, 64 * 1024, 8) },
+ { "m25p80", INFO(0x202014, 0, 64 * 1024, 16) },
+ { "m25p16", INFO(0x202015, 0, 64 * 1024, 32) },
+ { "m25p32", INFO(0x202016, 0, 64 * 1024, 64) },
+ { "m25p64", INFO(0x202017, 0, 64 * 1024, 128) },
+ { "m25p128", INFO(0x202018, 0, 256 * 1024, 64) },
+
+ { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2) },
+ { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4) },
+ { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4) },
+ { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8) },
+ { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16) },
+ { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32) },
+ { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64) },
+ { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128) },
+ { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64) },
+
+ { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2) },
+ { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16) },
+ { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32) },
+
+ { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4) },
+ { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16) },
+ { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K) },
+
+ { "m25px16", INFO(0x207115, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "m25px32", INFO(0x207116, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "m25px64", INFO(0x207117, 0, 64 * 1024, 128) },
+ { "m25px80", INFO(0x207114, 0, 64 * 1024, 16) },
};
/**
- * st_micron_set_4byte_addr_mode() - Set 4-byte address mode for ST and Micron
- * flashes.
+ * micron_st_nor_set_4byte_addr_mode() - Set 4-byte address mode for ST and
+ * Micron flashes.
* @nor: pointer to 'struct spi_nor'.
* @enable: true to enter the 4-byte address mode, false to exit the 4-byte
* address mode.
*
* Return: 0 on success, -errno otherwise.
*/
-static int st_micron_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
+static int micron_st_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
{
int ret;
@@ -249,28 +308,154 @@ static int st_micron_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
return spi_nor_write_disable(nor);
}
-static void micron_st_default_init(struct spi_nor *nor)
+/**
+ * micron_st_nor_read_fsr() - Read the Flag Status Register.
+ * @nor: pointer to 'struct spi_nor'
+ * @fsr: pointer to a DMA-able buffer where the value of the
+ * Flag Status Register will be written. Should be at least 2
+ * bytes.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int micron_st_nor_read_fsr(struct spi_nor *nor, u8 *fsr)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 0),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(1, fsr, 0));
+
+ if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) {
+ op.addr.nbytes = nor->params->rdsr_addr_nbytes;
+ op.dummy.nbytes = nor->params->rdsr_dummy;
+ /*
+ * We don't want to read only one byte in DTR mode. So,
+ * read 2 and then discard the second byte.
+ */
+ op.data.nbytes = 2;
+ }
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDFSR, fsr,
+ 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading FSR\n", ret);
+
+ return ret;
+}
+
+/**
+ * micron_st_nor_clear_fsr() - Clear the Flag Status Register.
+ * @nor: pointer to 'struct spi_nor'.
+ */
+static void micron_st_nor_clear_fsr(struct spi_nor *nor)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 0),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_NO_DATA);
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CLFSR,
+ NULL, 0);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d clearing FSR\n", ret);
+}
+
+/**
+ * micron_st_nor_ready() - Query the Status Register as well as the Flag Status
+ * Register to see if the flash is ready for new commands. If there are any
+ * errors in the FSR clear them.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 1 if ready, 0 if not ready, -errno on errors.
+ */
+static int micron_st_nor_ready(struct spi_nor *nor)
+{
+ int sr_ready, ret;
+
+ sr_ready = spi_nor_sr_ready(nor);
+ if (sr_ready < 0)
+ return sr_ready;
+
+ ret = micron_st_nor_read_fsr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) {
+ if (nor->bouncebuf[0] & FSR_E_ERR)
+ dev_err(nor->dev, "Erase operation failed.\n");
+ else
+ dev_err(nor->dev, "Program operation failed.\n");
+
+ if (nor->bouncebuf[0] & FSR_PT_ERR)
+ dev_err(nor->dev,
+ "Attempted to modify a protected sector.\n");
+
+ micron_st_nor_clear_fsr(nor);
+
+ /*
+ * WEL bit remains set to one when an erase or page program
+ * error occurs. Issue a Write Disable command to protect
+ * against inadvertent writes that can possibly corrupt the
+ * contents of the memory.
+ */
+ ret = spi_nor_write_disable(nor);
+ if (ret)
+ return ret;
+
+ return -EIO;
+ }
+
+ return sr_ready && !!(nor->bouncebuf[0] & FSR_READY);
+}
+
+static void micron_st_nor_default_init(struct spi_nor *nor)
{
nor->flags |= SNOR_F_HAS_LOCK;
nor->flags &= ~SNOR_F_HAS_16BIT_SR;
nor->params->quad_enable = NULL;
- nor->params->set_4byte_addr_mode = st_micron_set_4byte_addr_mode;
+ nor->params->set_4byte_addr_mode = micron_st_nor_set_4byte_addr_mode;
+}
+
+static void micron_st_nor_late_init(struct spi_nor *nor)
+{
+ if (nor->info->mfr_flags & USE_FSR)
+ nor->params->ready = micron_st_nor_ready;
}
-static const struct spi_nor_fixups micron_st_fixups = {
- .default_init = micron_st_default_init,
+static const struct spi_nor_fixups micron_st_nor_fixups = {
+ .default_init = micron_st_nor_default_init,
+ .late_init = micron_st_nor_late_init,
};
const struct spi_nor_manufacturer spi_nor_micron = {
.name = "micron",
- .parts = micron_parts,
- .nparts = ARRAY_SIZE(micron_parts),
- .fixups = &micron_st_fixups,
+ .parts = micron_nor_parts,
+ .nparts = ARRAY_SIZE(micron_nor_parts),
+ .fixups = &micron_st_nor_fixups,
};
const struct spi_nor_manufacturer spi_nor_st = {
.name = "st",
- .parts = st_parts,
- .nparts = ARRAY_SIZE(st_parts),
- .fixups = &micron_st_fixups,
+ .parts = st_nor_parts,
+ .nparts = ARRAY_SIZE(st_nor_parts),
+ .fixups = &micron_st_nor_fixups,
};
diff --git a/drivers/mtd/spi-nor/otp.c b/drivers/mtd/spi-nor/otp.c
index 983e40b19134..fa63d8571218 100644
--- a/drivers/mtd/spi-nor/otp.c
+++ b/drivers/mtd/spi-nor/otp.c
@@ -480,7 +480,7 @@ out:
return ret;
}
-void spi_nor_otp_init(struct spi_nor *nor)
+void spi_nor_set_mtd_otp_ops(struct spi_nor *nor)
{
struct mtd_info *mtd = &nor->mtd;
diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c
index c500c2118a5d..a5211543d30d 100644
--- a/drivers/mtd/spi-nor/sfdp.c
+++ b/drivers/mtd/spi-nor/sfdp.c
@@ -1229,6 +1229,25 @@ out:
}
/**
+ * spi_nor_post_sfdp_fixups() - Updates the flash's parameters and settings
+ * after SFDP has been parsed. Called only for flashes that define JESD216 SFDP
+ * tables.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Used to tweak various flash parameters when information provided by the SFDP
+ * tables are wrong.
+ */
+static void spi_nor_post_sfdp_fixups(struct spi_nor *nor)
+{
+ if (nor->manufacturer && nor->manufacturer->fixups &&
+ nor->manufacturer->fixups->post_sfdp)
+ nor->manufacturer->fixups->post_sfdp(nor);
+
+ if (nor->info->fixups && nor->info->fixups->post_sfdp)
+ nor->info->fixups->post_sfdp(nor);
+}
+
+/**
* spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters.
* @nor: pointer to a 'struct spi_nor'
*
@@ -1408,6 +1427,7 @@ int spi_nor_parse_sfdp(struct spi_nor *nor)
}
}
+ spi_nor_post_sfdp_fixups(nor);
exit:
kfree(param_headers);
return err;
diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
index ee82dcd75310..f24e546e04a5 100644
--- a/drivers/mtd/spi-nor/spansion.c
+++ b/drivers/mtd/spi-nor/spansion.c
@@ -8,6 +8,10 @@
#include "core.h"
+/* flash_info mfr_flag. Used to clear sticky prorietary SR bits. */
+#define USE_CLSR BIT(0)
+
+#define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */
#define SPINOR_OP_RD_ANY_REG 0x65 /* Read any register */
#define SPINOR_OP_WR_ANY_REG 0x71 /* Write any register */
#define SPINOR_REG_CYPRESS_CFR2V 0x00800003
@@ -20,7 +24,7 @@
#define SPINOR_OP_CYPRESS_RD_FAST 0xee
/**
- * spi_nor_cypress_octal_dtr_enable() - Enable octal DTR on Cypress flashes.
+ * cypress_nor_octal_dtr_enable() - Enable octal DTR on Cypress flashes.
* @nor: pointer to a 'struct spi_nor'
* @enable: whether to enable or disable Octal DTR
*
@@ -29,7 +33,7 @@
*
* Return: 0 on success, -errno otherwise.
*/
-static int spi_nor_cypress_octal_dtr_enable(struct spi_nor *nor, bool enable)
+static int cypress_nor_octal_dtr_enable(struct spi_nor *nor, bool enable)
{
struct spi_mem_op op;
u8 *buf = nor->bouncebuf;
@@ -65,10 +69,18 @@ static int spi_nor_cypress_octal_dtr_enable(struct spi_nor *nor, bool enable)
if (ret)
return ret;
- if (enable)
- *buf = SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_EN;
- else
- *buf = SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_DS;
+ if (enable) {
+ buf[0] = SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_EN;
+ } else {
+ /*
+ * The register is 1-byte wide, but 1-byte transactions are not
+ * allowed in 8D-8D-8D mode. Since there is no register at the
+ * next location, just initialize the value to 0 and let the
+ * transaction go on.
+ */
+ buf[0] = SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_DS;
+ buf[1] = 0;
+ }
op = (struct spi_mem_op)
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WR_ANY_REG, 1),
@@ -76,7 +88,7 @@ static int spi_nor_cypress_octal_dtr_enable(struct spi_nor *nor, bool enable)
SPINOR_REG_CYPRESS_CFR5V,
1),
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(1, buf, 1));
+ SPI_MEM_OP_DATA_OUT(enable ? 1 : 2, buf, 1));
if (!enable)
spi_nor_spimem_setup_op(nor, &op, SNOR_PROTO_8_8_8_DTR);
@@ -108,7 +120,7 @@ static int spi_nor_cypress_octal_dtr_enable(struct spi_nor *nor, bool enable)
static void s28hs512t_default_init(struct spi_nor *nor)
{
- nor->params->octal_dtr_enable = spi_nor_cypress_octal_dtr_enable;
+ nor->params->octal_dtr_enable = cypress_nor_octal_dtr_enable;
nor->params->writesize = 16;
}
@@ -168,16 +180,16 @@ static int s28hs512t_post_bfpt_fixup(struct spi_nor *nor,
return 0;
}
-static struct spi_nor_fixups s28hs512t_fixups = {
+static const struct spi_nor_fixups s28hs512t_fixups = {
.default_init = s28hs512t_default_init,
.post_sfdp = s28hs512t_post_sfdp_fixup,
.post_bfpt = s28hs512t_post_bfpt_fixup,
};
static int
-s25fs_s_post_bfpt_fixups(struct spi_nor *nor,
- const struct sfdp_parameter_header *bfpt_header,
- const struct sfdp_bfpt *bfpt)
+s25fs_s_nor_post_bfpt_fixups(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt)
{
/*
* The S25FS-S chip family reports 512-byte pages in BFPT but
@@ -190,110 +202,198 @@ s25fs_s_post_bfpt_fixups(struct spi_nor *nor,
return 0;
}
-static struct spi_nor_fixups s25fs_s_fixups = {
- .post_bfpt = s25fs_s_post_bfpt_fixups,
+static const struct spi_nor_fixups s25fs_s_nor_fixups = {
+ .post_bfpt = s25fs_s_nor_post_bfpt_fixups,
};
-static const struct flash_info spansion_parts[] = {
+static const struct flash_info spansion_nor_parts[] = {
/* Spansion/Cypress -- single (large) sector size only, at least
* for the chips listed here (without boot sectors).
*/
- { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- USE_CLSR) },
- { "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- USE_CLSR) },
- { "s25fl256s0", INFO6(0x010219, 0x4d0080, 256 * 1024, 128,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- USE_CLSR) },
- { "s25fl256s1", INFO6(0x010219, 0x4d0180, 64 * 1024, 512,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- USE_CLSR) },
- { "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | USE_CLSR) },
- { "s25fs128s1", INFO6(0x012018, 0x4d0181, 64 * 1024, 256,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR)
- .fixups = &s25fs_s_fixups, },
- { "s25fs256s0", INFO6(0x010219, 0x4d0081, 256 * 1024, 128,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- USE_CLSR) },
- { "s25fs256s1", INFO6(0x010219, 0x4d0181, 64 * 1024, 512,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- USE_CLSR) },
- { "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR)
- .fixups = &s25fs_s_fixups, },
- { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
- { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
- { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- USE_CLSR) },
- { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- USE_CLSR) },
- { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
- { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
- { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
- { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
- { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
- { "s25fl004k", INFO(0xef4013, 0, 64 * 1024, 8,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) },
- { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) },
- { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES) },
- { "s25fl128l", INFO(0x016018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES) },
- { "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES) },
- { "cy15x104q", INFO6(0x042cc2, 0x7f7f7f, 512 * 1024, 1,
- SPI_NOR_NO_ERASE) },
- { "s28hs512t", INFO(0x345b1a, 0, 256 * 1024, 256,
- SECT_4K | SPI_NOR_OCTAL_DTR_READ |
+ { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_CLSR)
+ },
+ { "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_CLSR)
+ },
+ { "s25fl256s0", INFO6(0x010219, 0x4d0080, 256 * 1024, 128)
+ NO_SFDP_FLAGS(SPI_NOR_SKIP_SFDP | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_CLSR)
+ },
+ { "s25fl256s1", INFO6(0x010219, 0x4d0180, 64 * 1024, 512)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_CLSR)
+ },
+ { "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256)
+ FLAGS(SPI_NOR_HAS_LOCK)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_CLSR)
+ },
+ { "s25fs128s1", INFO6(0x012018, 0x4d0181, 64 * 1024, 256)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_CLSR)
+ .fixups = &s25fs_s_nor_fixups, },
+ { "s25fs256s0", INFO6(0x010219, 0x4d0081, 256 * 1024, 128)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_CLSR)
+ },
+ { "s25fs256s1", INFO6(0x010219, 0x4d0181, 64 * 1024, 512)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_CLSR)
+ },
+ { "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_CLSR)
+ .fixups = &s25fs_s_nor_fixups, },
+ { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64) },
+ { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256) },
+ { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_CLSR)
+ },
+ { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ MFR_FLAGS(USE_CLSR)
+ },
+ { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8) },
+ { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16) },
+ { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32) },
+ { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64) },
+ { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128) },
+ { "s25fl004k", INFO(0xef4013, 0, 64 * 1024, 8)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
+ { "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
+ { "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+ { "s25fl128l", INFO(0x016018, 0, 64 * 1024, 256)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+ { "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+ { "cy15x104q", INFO6(0x042cc2, 0x7f7f7f, 512 * 1024, 1)
+ FLAGS(SPI_NOR_NO_ERASE) },
+ { "s28hs512t", INFO(0x345b1a, 0, 256 * 1024, 256)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_OCTAL_DTR_READ |
SPI_NOR_OCTAL_DTR_PP)
- .fixups = &s28hs512t_fixups,
+ .fixups = &s28hs512t_fixups,
},
};
-static void spansion_post_sfdp_fixups(struct spi_nor *nor)
+/**
+ * spansion_nor_clear_sr() - Clear the Status Register.
+ * @nor: pointer to 'struct spi_nor'.
+ */
+static void spansion_nor_clear_sr(struct spi_nor *nor)
{
- if (nor->params->size <= SZ_16M)
- return;
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 0),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_NO_DATA);
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CLSR,
+ NULL, 0);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d clearing SR\n", ret);
+}
+
+/**
+ * spansion_nor_sr_ready_and_clear() - Query the Status Register to see if the
+ * flash is ready for new commands and clear it if there are any errors.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 1 if ready, 0 if not ready, -errno on errors.
+ */
+static int spansion_nor_sr_ready_and_clear(struct spi_nor *nor)
+{
+ int ret;
+
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] & (SR_E_ERR | SR_P_ERR)) {
+ if (nor->bouncebuf[0] & SR_E_ERR)
+ dev_err(nor->dev, "Erase Error occurred\n");
+ else
+ dev_err(nor->dev, "Programming Error occurred\n");
+
+ spansion_nor_clear_sr(nor);
+
+ /*
+ * WEL bit remains set to one when an erase or page program
+ * error occurs. Issue a Write Disable command to protect
+ * against inadvertent writes that can possibly corrupt the
+ * contents of the memory.
+ */
+ ret = spi_nor_write_disable(nor);
+ if (ret)
+ return ret;
+
+ return -EIO;
+ }
+
+ return !(nor->bouncebuf[0] & SR_WIP);
+}
+
+static void spansion_nor_late_init(struct spi_nor *nor)
+{
+ if (nor->params->size > SZ_16M) {
+ nor->flags |= SNOR_F_4B_OPCODES;
+ /* No small sector erase for 4-byte command set */
+ nor->erase_opcode = SPINOR_OP_SE;
+ nor->mtd.erasesize = nor->info->sector_size;
+ }
- nor->flags |= SNOR_F_4B_OPCODES;
- /* No small sector erase for 4-byte command set */
- nor->erase_opcode = SPINOR_OP_SE;
- nor->mtd.erasesize = nor->info->sector_size;
+ if (nor->info->mfr_flags & USE_CLSR)
+ nor->params->ready = spansion_nor_sr_ready_and_clear;
}
-static const struct spi_nor_fixups spansion_fixups = {
- .post_sfdp = spansion_post_sfdp_fixups,
+static const struct spi_nor_fixups spansion_nor_fixups = {
+ .late_init = spansion_nor_late_init,
};
const struct spi_nor_manufacturer spi_nor_spansion = {
.name = "spansion",
- .parts = spansion_parts,
- .nparts = ARRAY_SIZE(spansion_parts),
- .fixups = &spansion_fixups,
+ .parts = spansion_nor_parts,
+ .nparts = ARRAY_SIZE(spansion_nor_parts),
+ .fixups = &spansion_nor_fixups,
};
diff --git a/drivers/mtd/spi-nor/sst.c b/drivers/mtd/spi-nor/sst.c
index 980f4c09c91d..63bcc97bf978 100644
--- a/drivers/mtd/spi-nor/sst.c
+++ b/drivers/mtd/spi-nor/sst.c
@@ -8,14 +8,17 @@
#include "core.h"
+/* SST flash_info mfr_flag. Used to specify SST byte programming. */
+#define SST_WRITE BIT(0)
+
#define SST26VF_CR_BPNV BIT(3)
-static int sst26vf_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int sst26vf_nor_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
return -EOPNOTSUPP;
}
-static int sst26vf_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int sst26vf_nor_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
int ret;
@@ -35,64 +38,87 @@ static int sst26vf_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
return spi_nor_global_block_unlock(nor);
}
-static int sst26vf_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int sst26vf_nor_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
return -EOPNOTSUPP;
}
-static const struct spi_nor_locking_ops sst26vf_locking_ops = {
- .lock = sst26vf_lock,
- .unlock = sst26vf_unlock,
- .is_locked = sst26vf_is_locked,
+static const struct spi_nor_locking_ops sst26vf_nor_locking_ops = {
+ .lock = sst26vf_nor_lock,
+ .unlock = sst26vf_nor_unlock,
+ .is_locked = sst26vf_nor_is_locked,
};
-static void sst26vf_default_init(struct spi_nor *nor)
+static void sst26vf_nor_late_init(struct spi_nor *nor)
{
- nor->params->locking_ops = &sst26vf_locking_ops;
+ nor->params->locking_ops = &sst26vf_nor_locking_ops;
}
-static const struct spi_nor_fixups sst26vf_fixups = {
- .default_init = sst26vf_default_init,
+static const struct spi_nor_fixups sst26vf_nor_fixups = {
+ .late_init = sst26vf_nor_late_init,
};
-static const struct flash_info sst_parts[] = {
+static const struct flash_info sst_nor_parts[] = {
/* SST -- large erase sizes are "overlays", "sectors" are 4K */
- { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8,
- SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
- { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16,
- SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
- { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32,
- SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
- { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64,
- SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
- { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_4BIT_BP | SPI_NOR_HAS_LOCK |
- SPI_NOR_SWP_IS_VOLATILE) },
- { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1,
- SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
- { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2,
- SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
- { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4,
- SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
- { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4, SECT_4K | SPI_NOR_HAS_LOCK) },
- { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_HAS_LOCK) },
- { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8,
- SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
- { "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16,
- SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
- { "sst26wf016b", INFO(0xbf2651, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ |
+ { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ MFR_FLAGS(SST_WRITE) },
+ { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ MFR_FLAGS(SST_WRITE) },
+ { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ MFR_FLAGS(SST_WRITE) },
+ { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ MFR_FLAGS(SST_WRITE) },
+ { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP |
+ SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ MFR_FLAGS(SST_WRITE) },
+ { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ MFR_FLAGS(SST_WRITE) },
+ { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ MFR_FLAGS(SST_WRITE) },
+ { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4)
+ FLAGS(SPI_NOR_HAS_LOCK)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8)
+ FLAGS(SPI_NOR_HAS_LOCK)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ MFR_FLAGS(SST_WRITE) },
+ { "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ MFR_FLAGS(SST_WRITE) },
+ { "sst26wf016b", INFO(0xbf2651, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
- { "sst26vf016b", INFO(0xbf2641, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- .fixups = &sst26vf_fixups },
+ { "sst26vf016b", INFO(0xbf2641, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
+ { "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ .fixups = &sst26vf_nor_fixups },
};
-static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
+static int sst_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
size_t actual = 0;
@@ -177,19 +203,19 @@ out:
return ret;
}
-static void sst_post_sfdp_fixups(struct spi_nor *nor)
+static void sst_nor_late_init(struct spi_nor *nor)
{
- if (nor->info->flags & SST_WRITE)
- nor->mtd._write = sst_write;
+ if (nor->info->mfr_flags & SST_WRITE)
+ nor->mtd._write = sst_nor_write;
}
-static const struct spi_nor_fixups sst_fixups = {
- .post_sfdp = sst_post_sfdp_fixups,
+static const struct spi_nor_fixups sst_nor_fixups = {
+ .late_init = sst_nor_late_init,
};
const struct spi_nor_manufacturer spi_nor_sst = {
.name = "sst",
- .parts = sst_parts,
- .nparts = ARRAY_SIZE(sst_parts),
- .fixups = &sst_fixups,
+ .parts = sst_nor_parts,
+ .nparts = ARRAY_SIZE(sst_nor_parts),
+ .fixups = &sst_nor_fixups,
};
diff --git a/drivers/mtd/spi-nor/swp.c b/drivers/mtd/spi-nor/swp.c
index 8594bcbb7dbe..1f178313ba8f 100644
--- a/drivers/mtd/spi-nor/swp.c
+++ b/drivers/mtd/spi-nor/swp.c
@@ -414,7 +414,7 @@ void spi_nor_try_unlock_all(struct spi_nor *nor)
dev_dbg(nor->dev, "Failed to unlock the entire flash memory array\n");
}
-void spi_nor_register_locking_ops(struct spi_nor *nor)
+void spi_nor_set_mtd_locking_ops(struct spi_nor *nor)
{
struct mtd_info *mtd = &nor->mtd;
diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c
index 96573f61caf5..fe80dffc2e70 100644
--- a/drivers/mtd/spi-nor/winbond.c
+++ b/drivers/mtd/spi-nor/winbond.c
@@ -28,91 +28,117 @@ w25q256_post_bfpt_fixups(struct spi_nor *nor,
return 0;
}
-static struct spi_nor_fixups w25q256_fixups = {
+static const struct spi_nor_fixups w25q256_fixups = {
.post_bfpt = w25q256_post_bfpt_fixups,
};
-static const struct flash_info winbond_parts[] = {
+static const struct flash_info winbond_nor_parts[] = {
/* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
- { "w25x05", INFO(0xef3010, 0, 64 * 1024, 1, SECT_4K) },
- { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
- { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
- { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
- { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
- { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
- { "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
- { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
- { "w25q16jv-im/jm", INFO(0xef7015, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK |
- SPI_NOR_HAS_TB) },
- { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) },
- { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) },
- { "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4, SECT_4K) },
- { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
- { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- OTP_INFO(256, 3, 0x1000, 0x1000)
- },
-
- { "w25q32jv", INFO(0xef7016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- { "w25q32jwm", INFO(0xef8016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- OTP_INFO(256, 3, 0x1000, 0x1000) },
- { "w25q64jwm", INFO(0xef8017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
- { "w25q128jwm", INFO(0xef8018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
- { "w25q256jwm", INFO(0xef8019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
- { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
- { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
- { "w25q64jvm", INFO(0xef7017, 0, 64 * 1024, 128, SECT_4K) },
- { "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
- { "w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
- { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
- { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
- { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
- { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- .fixups = &w25q256_fixups },
- { "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024,
- SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) },
- { "w25q512jvq", INFO(0xef4020, 0, 64 * 1024, 1024,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "w25x05", INFO(0xef3010, 0, 64 * 1024, 1)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q16jv-im/jm", INFO(0xef7015, 0, 64 * 1024, 32)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ OTP_INFO(256, 3, 0x1000, 0x1000) },
+ { "w25q32jv", INFO(0xef7016, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25q32jwm", INFO(0xef8016, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ OTP_INFO(256, 3, 0x1000, 0x1000) },
+ { "w25q64jwm", INFO(0xef8017, 0, 64 * 1024, 128)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25q128jwm", INFO(0xef8018, 0, 64 * 1024, 256)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25q256jwm", INFO(0xef8019, 0, 64 * 1024, 512)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25q64jvm", INFO(0xef7017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ .fixups = &w25q256_fixups },
+ { "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512)
+ PARSE_SFDP },
+ { "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ |
+ SPI_NOR_DUAL_READ) },
+ { "w25q512jvq", INFO(0xef4020, 0, 64 * 1024, 1024)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
};
/**
- * winbond_set_4byte_addr_mode() - Set 4-byte address mode for Winbond flashes.
+ * winbond_nor_set_4byte_addr_mode() - Set 4-byte address mode for Winbond
+ * flashes.
* @nor: pointer to 'struct spi_nor'.
* @enable: true to enter the 4-byte address mode, false to exit the 4-byte
* address mode.
*
* Return: 0 on success, -errno otherwise.
*/
-static int winbond_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
+static int winbond_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
{
int ret;
@@ -136,7 +162,7 @@ static int winbond_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
return spi_nor_write_disable(nor);
}
-static const struct spi_nor_otp_ops winbond_otp_ops = {
+static const struct spi_nor_otp_ops winbond_nor_otp_ops = {
.read = spi_nor_otp_read_secr,
.write = spi_nor_otp_write_secr,
.erase = spi_nor_otp_erase_secr,
@@ -144,20 +170,25 @@ static const struct spi_nor_otp_ops winbond_otp_ops = {
.is_locked = spi_nor_otp_is_locked_sr2,
};
-static void winbond_default_init(struct spi_nor *nor)
+static void winbond_nor_default_init(struct spi_nor *nor)
+{
+ nor->params->set_4byte_addr_mode = winbond_nor_set_4byte_addr_mode;
+}
+
+static void winbond_nor_late_init(struct spi_nor *nor)
{
- nor->params->set_4byte_addr_mode = winbond_set_4byte_addr_mode;
if (nor->params->otp.org->n_regions)
- nor->params->otp.ops = &winbond_otp_ops;
+ nor->params->otp.ops = &winbond_nor_otp_ops;
}
-static const struct spi_nor_fixups winbond_fixups = {
- .default_init = winbond_default_init,
+static const struct spi_nor_fixups winbond_nor_fixups = {
+ .default_init = winbond_nor_default_init,
+ .late_init = winbond_nor_late_init,
};
const struct spi_nor_manufacturer spi_nor_winbond = {
.name = "winbond",
- .parts = winbond_parts,
- .nparts = ARRAY_SIZE(winbond_parts),
- .fixups = &winbond_fixups,
+ .parts = winbond_nor_parts,
+ .nparts = ARRAY_SIZE(winbond_nor_parts),
+ .fixups = &winbond_nor_fixups,
};
diff --git a/drivers/mtd/spi-nor/xilinx.c b/drivers/mtd/spi-nor/xilinx.c
index 1138bdbf4199..9459ac2609dc 100644
--- a/drivers/mtd/spi-nor/xilinx.c
+++ b/drivers/mtd/spi-nor/xilinx.c
@@ -8,7 +8,28 @@
#include "core.h"
-static const struct flash_info xilinx_parts[] = {
+#define XILINX_OP_SE 0x50 /* Sector erase */
+#define XILINX_OP_PP 0x82 /* Page program */
+#define XILINX_OP_RDSR 0xd7 /* Read status register */
+
+#define XSR_PAGESIZE BIT(0) /* Page size in Po2 or Linear */
+#define XSR_RDY BIT(7) /* Ready */
+
+#define S3AN_INFO(_jedec_id, _n_sectors, _page_size) \
+ .id = { \
+ ((_jedec_id) >> 16) & 0xff, \
+ ((_jedec_id) >> 8) & 0xff, \
+ (_jedec_id) & 0xff \
+ }, \
+ .id_len = 3, \
+ .sector_size = (8 * (_page_size)), \
+ .n_sectors = (_n_sectors), \
+ .page_size = (_page_size), \
+ .addr_width = 3, \
+ .flags = SPI_NOR_NO_FR
+
+/* Xilinx S3AN share MFR with Atmel SPI NOR */
+static const struct flash_info xilinx_nor_parts[] = {
/* Xilinx S3AN Internal Flash */
{ "3S50AN", S3AN_INFO(0x1f2200, 64, 264) },
{ "3S200AN", S3AN_INFO(0x1f2400, 256, 264) },
@@ -26,28 +47,81 @@ static const struct flash_info xilinx_parts[] = {
* Addr can safely be unsigned int, the biggest S3AN device is smaller than
* 4 MiB.
*/
-static u32 s3an_convert_addr(struct spi_nor *nor, u32 addr)
+static u32 s3an_nor_convert_addr(struct spi_nor *nor, u32 addr)
{
+ u32 page_size = nor->params->page_size;
u32 offset, page;
- offset = addr % nor->page_size;
- page = addr / nor->page_size;
- page <<= (nor->page_size > 512) ? 10 : 9;
+ offset = addr % page_size;
+ page = addr / page_size;
+ page <<= (page_size > 512) ? 10 : 9;
return page | offset;
}
+/**
+ * xilinx_nor_read_sr() - Read the Status Register on S3AN flashes.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr: pointer to a DMA-able buffer where the value of the
+ * Status Register will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int xilinx_nor_read_sr(struct spi_nor *nor, u8 *sr)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(XILINX_OP_RDSR, 0),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(1, sr, 0));
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = spi_nor_controller_ops_read_reg(nor, XILINX_OP_RDSR, sr,
+ 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading SR\n", ret);
+
+ return ret;
+}
+
+/**
+ * xilinx_nor_sr_ready() - Query the Status Register of the S3AN flash to see
+ * if the flash is ready for new commands.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 1 if ready, 0 if not ready, -errno on errors.
+ */
+static int xilinx_nor_sr_ready(struct spi_nor *nor)
+{
+ int ret;
+
+ ret = xilinx_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ return !!(nor->bouncebuf[0] & XSR_RDY);
+}
+
static int xilinx_nor_setup(struct spi_nor *nor,
const struct spi_nor_hwcaps *hwcaps)
{
+ u32 page_size;
int ret;
- ret = spi_nor_xread_sr(nor, nor->bouncebuf);
+ ret = xilinx_nor_read_sr(nor, nor->bouncebuf);
if (ret)
return ret;
- nor->erase_opcode = SPINOR_OP_XSE;
- nor->program_opcode = SPINOR_OP_XPP;
+ nor->erase_opcode = XILINX_OP_SE;
+ nor->program_opcode = XILINX_OP_PP;
nor->read_opcode = SPINOR_OP_READ;
nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
@@ -64,31 +138,33 @@ static int xilinx_nor_setup(struct spi_nor *nor,
*/
if (nor->bouncebuf[0] & XSR_PAGESIZE) {
/* Flash in Power of 2 mode */
- nor->page_size = (nor->page_size == 264) ? 256 : 512;
- nor->mtd.writebufsize = nor->page_size;
- nor->mtd.size = 8 * nor->page_size * nor->info->n_sectors;
- nor->mtd.erasesize = 8 * nor->page_size;
+ page_size = (nor->params->page_size == 264) ? 256 : 512;
+ nor->params->page_size = page_size;
+ nor->mtd.writebufsize = page_size;
+ nor->params->size = 8 * page_size * nor->info->n_sectors;
+ nor->mtd.erasesize = 8 * page_size;
} else {
/* Flash in Default addressing mode */
- nor->params->convert_addr = s3an_convert_addr;
+ nor->params->convert_addr = s3an_nor_convert_addr;
nor->mtd.erasesize = nor->info->sector_size;
}
return 0;
}
-static void xilinx_post_sfdp_fixups(struct spi_nor *nor)
+static void xilinx_nor_late_init(struct spi_nor *nor)
{
nor->params->setup = xilinx_nor_setup;
+ nor->params->ready = xilinx_nor_sr_ready;
}
-static const struct spi_nor_fixups xilinx_fixups = {
- .post_sfdp = xilinx_post_sfdp_fixups,
+static const struct spi_nor_fixups xilinx_nor_fixups = {
+ .late_init = xilinx_nor_late_init,
};
const struct spi_nor_manufacturer spi_nor_xilinx = {
.name = "xilinx",
- .parts = xilinx_parts,
- .nparts = ARRAY_SIZE(xilinx_parts),
- .fixups = &xilinx_fixups,
+ .parts = xilinx_nor_parts,
+ .nparts = ARRAY_SIZE(xilinx_nor_parts),
+ .fixups = &xilinx_nor_fixups,
};
diff --git a/drivers/mtd/spi-nor/xmc.c b/drivers/mtd/spi-nor/xmc.c
index 2c7773b68993..051411e86339 100644
--- a/drivers/mtd/spi-nor/xmc.c
+++ b/drivers/mtd/spi-nor/xmc.c
@@ -8,16 +8,18 @@
#include "core.h"
-static const struct flash_info xmc_parts[] = {
+static const struct flash_info xmc_nor_parts[] = {
/* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */
- { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
};
const struct spi_nor_manufacturer spi_nor_xmc = {
.name = "xmc",
- .parts = xmc_parts,
- .nparts = ARRAY_SIZE(xmc_parts),
+ .parts = xmc_nor_parts,
+ .nparts = ARRAY_SIZE(xmc_nor_parts),
};
diff --git a/drivers/mtd/tests/speedtest.c b/drivers/mtd/tests/speedtest.c
index 93e76648f676..c9ec7086bfa1 100644
--- a/drivers/mtd/tests/speedtest.c
+++ b/drivers/mtd/tests/speedtest.c
@@ -160,14 +160,13 @@ static inline void stop_timing(void)
static long calc_speed(void)
{
- uint64_t k;
- long ms;
+ uint64_t k, us;
- ms = ktime_ms_delta(finish, start);
- if (ms == 0)
+ us = ktime_us_delta(finish, start);
+ if (us == 0)
return 0;
- k = (uint64_t)goodebcnt * (mtd->erasesize / 1024) * 1000;
- do_div(k, ms);
+ k = (uint64_t)goodebcnt * (mtd->erasesize / 1024) * 1000000;
+ do_div(k, us);
return k;
}
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 062e6c2c45f5..a78fdf3b30f7 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -294,6 +294,8 @@ static void ubiblock_do_work(struct work_struct *work)
int ret;
struct ubiblock_pdu *pdu = container_of(work, struct ubiblock_pdu, work);
struct request *req = blk_mq_rq_from_pdu(pdu);
+ struct req_iterator iter;
+ struct bio_vec bvec;
blk_mq_start_request(req);
@@ -305,7 +307,9 @@ static void ubiblock_do_work(struct work_struct *work)
blk_rq_map_sg(req->q, req, pdu->usgl.sg);
ret = ubiblock_read(pdu);
- rq_flush_dcache_pages(req);
+
+ rq_for_each_segment(bvec, req, iter)
+ flush_dcache_page(bvec.bv_page);
blk_mq_end_request(req, errno_to_blk_status(ret));
}
@@ -426,6 +430,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
ret = -ENODEV;
goto out_cleanup_disk;
}
+ gd->flags |= GENHD_FL_NO_PART;
gd->private_data = dev;
sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id);
set_capacity(gd, disk_capacity);