mediatek: add a new spi-nand driver for kernel 5.10

This patch adds a new spi-nand driver which implements the SNFI of mt7622
and mt7629.

Unlike the existing snfi driver which makes use of the spi-mem framework
and the spi-nand framework with modified ecc support, this driver is
implemented directly on the mtd framework with other components untouched,
and provides better performance, and behaves exactly the same as the nand
framework.

Signed-off-by: Weijie Gao <hackpascal@gmail.com>
This commit is contained in:
Weijie Gao 2021-05-25 21:25:14 +08:00 committed by Chuanhong Guo
parent 7119fd32d3
commit 050621aa01
11 changed files with 3992 additions and 0 deletions

View File

@ -0,0 +1,13 @@
# SPDX-License-Identifier: GPL-2.0
#
# Copyright (C) 2020 MediaTek Inc. All rights reserved.
# Author: Weijie Gao <weijie.gao@mediatek.com>
#
config MTK_SPI_NAND
tristate "MediaTek SPI NAND flash controller driver"
depends on MTD
default n
help
This option enables access to SPI-NAND flashes through the
MTD interface of MediaTek SPI NAND Flash Controller

View File

@ -0,0 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
#
# Copyright (C) 2020 MediaTek Inc. All rights reserved.
# Author: Weijie Gao <weijie.gao@mediatek.com>
#
obj-y += mtk-snand.o mtk-snand-ecc.o mtk-snand-ids.o mtk-snand-os.o \
mtk-snand-mtd.o
ccflags-y += -DPRIVATE_MTK_SNAND_HEADER

View File

@ -0,0 +1,268 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
*
* Author: Weijie Gao <weijie.gao@mediatek.com>
*/
#ifndef _MTK_SNAND_DEF_H_
#define _MTK_SNAND_DEF_H_
#include "mtk-snand-os.h"
#ifdef PRIVATE_MTK_SNAND_HEADER
#include "mtk-snand.h"
#else
#include <mtk-snand.h>
#endif
struct mtk_snand_plat_dev;
enum snand_flash_io {
SNAND_IO_1_1_1,
SNAND_IO_1_1_2,
SNAND_IO_1_2_2,
SNAND_IO_1_1_4,
SNAND_IO_1_4_4,
__SNAND_IO_MAX
};
#define SPI_IO_1_1_1 BIT(SNAND_IO_1_1_1)
#define SPI_IO_1_1_2 BIT(SNAND_IO_1_1_2)
#define SPI_IO_1_2_2 BIT(SNAND_IO_1_2_2)
#define SPI_IO_1_1_4 BIT(SNAND_IO_1_1_4)
#define SPI_IO_1_4_4 BIT(SNAND_IO_1_4_4)
struct snand_opcode {
uint8_t opcode;
uint8_t dummy;
};
struct snand_io_cap {
uint8_t caps;
struct snand_opcode opcodes[__SNAND_IO_MAX];
};
#define SNAND_OP(_io, _opcode, _dummy) [_io] = { .opcode = (_opcode), \
.dummy = (_dummy) }
#define SNAND_IO_CAP(_name, _caps, ...) \
struct snand_io_cap _name = { .caps = (_caps), \
.opcodes = { __VA_ARGS__ } }
#define SNAND_MAX_ID_LEN 4
enum snand_id_type {
SNAND_ID_DYMMY,
SNAND_ID_ADDR = SNAND_ID_DYMMY,
SNAND_ID_DIRECT,
__SNAND_ID_TYPE_MAX
};
struct snand_id {
uint8_t type; /* enum snand_id_type */
uint8_t len;
uint8_t id[SNAND_MAX_ID_LEN];
};
#define SNAND_ID(_type, ...) \
{ .type = (_type), .id = { __VA_ARGS__ }, \
.len = sizeof((uint8_t[]) { __VA_ARGS__ }) }
struct snand_mem_org {
uint16_t pagesize;
uint16_t sparesize;
uint16_t pages_per_block;
uint16_t blocks_per_die;
uint16_t planes_per_die;
uint16_t ndies;
};
#define SNAND_MEMORG(_ps, _ss, _ppb, _bpd, _ppd, _nd) \
{ .pagesize = (_ps), .sparesize = (_ss), .pages_per_block = (_ppb), \
.blocks_per_die = (_bpd), .planes_per_die = (_ppd), .ndies = (_nd) }
typedef int (*snand_select_die_t)(struct mtk_snand *snf, uint32_t dieidx);
struct snand_flash_info {
const char *model;
struct snand_id id;
const struct snand_mem_org memorg;
const struct snand_io_cap *cap_rd;
const struct snand_io_cap *cap_pl;
snand_select_die_t select_die;
};
#define SNAND_INFO(_model, _id, _memorg, _cap_rd, _cap_pl, ...) \
{ .model = (_model), .id = _id, .memorg = _memorg, \
.cap_rd = (_cap_rd), .cap_pl = (_cap_pl), __VA_ARGS__ }
const struct snand_flash_info *snand_flash_id_lookup(enum snand_id_type type,
const uint8_t *id);
struct mtk_snand_soc_data {
uint16_t sector_size;
uint16_t max_sectors;
uint16_t fdm_size;
uint16_t fdm_ecc_size;
uint16_t fifo_size;
bool bbm_swap;
bool empty_page_check;
uint32_t mastersta_mask;
const uint8_t *spare_sizes;
uint32_t num_spare_size;
};
enum mtk_ecc_regs {
ECC_DECDONE,
};
struct mtk_ecc_soc_data {
const uint8_t *ecc_caps;
uint32_t num_ecc_cap;
const uint32_t *regs;
uint16_t mode_shift;
uint8_t errnum_bits;
uint8_t errnum_shift;
};
struct mtk_snand {
struct mtk_snand_plat_dev *pdev;
void __iomem *nfi_base;
void __iomem *ecc_base;
enum mtk_snand_soc soc;
const struct mtk_snand_soc_data *nfi_soc;
const struct mtk_ecc_soc_data *ecc_soc;
bool snfi_quad_spi;
bool quad_spi_op;
const char *model;
uint64_t size;
uint64_t die_size;
uint32_t erasesize;
uint32_t writesize;
uint32_t oobsize;
uint32_t num_dies;
snand_select_die_t select_die;
uint8_t opcode_rfc;
uint8_t opcode_pl;
uint8_t dummy_rfc;
uint8_t mode_rfc;
uint8_t mode_pl;
uint32_t writesize_mask;
uint32_t writesize_shift;
uint32_t erasesize_mask;
uint32_t erasesize_shift;
uint64_t die_mask;
uint32_t die_shift;
uint32_t spare_per_sector;
uint32_t raw_sector_size;
uint32_t ecc_strength;
uint32_t ecc_steps;
uint32_t ecc_bytes;
uint32_t ecc_parity_bits;
uint8_t *page_cache; /* Used by read/write page */
uint8_t *buf_cache; /* Used by block bad/markbad & auto_oob */
int *sect_bf; /* Used by ECC correction */
};
enum mtk_snand_log_category {
SNAND_LOG_NFI,
SNAND_LOG_SNFI,
SNAND_LOG_ECC,
SNAND_LOG_CHIP,
__SNAND_LOG_CAT_MAX
};
int mtk_ecc_setup(struct mtk_snand *snf, void *fmdaddr, uint32_t max_ecc_bytes,
uint32_t msg_size);
int mtk_snand_ecc_encoder_start(struct mtk_snand *snf);
void mtk_snand_ecc_encoder_stop(struct mtk_snand *snf);
int mtk_snand_ecc_decoder_start(struct mtk_snand *snf);
void mtk_snand_ecc_decoder_stop(struct mtk_snand *snf);
int mtk_ecc_wait_decoder_done(struct mtk_snand *snf);
int mtk_ecc_check_decode_error(struct mtk_snand *snf);
int mtk_ecc_fixup_empty_sector(struct mtk_snand *snf, uint32_t sect);
int mtk_snand_mac_io(struct mtk_snand *snf, const uint8_t *out, uint32_t outlen,
uint8_t *in, uint32_t inlen);
int mtk_snand_set_feature(struct mtk_snand *snf, uint32_t addr, uint32_t val);
int mtk_snand_log(struct mtk_snand_plat_dev *pdev,
enum mtk_snand_log_category cat, const char *fmt, ...);
#define snand_log_nfi(pdev, fmt, ...) \
mtk_snand_log(pdev, SNAND_LOG_NFI, fmt, ##__VA_ARGS__)
#define snand_log_snfi(pdev, fmt, ...) \
mtk_snand_log(pdev, SNAND_LOG_SNFI, fmt, ##__VA_ARGS__)
#define snand_log_ecc(pdev, fmt, ...) \
mtk_snand_log(pdev, SNAND_LOG_ECC, fmt, ##__VA_ARGS__)
#define snand_log_chip(pdev, fmt, ...) \
mtk_snand_log(pdev, SNAND_LOG_CHIP, fmt, ##__VA_ARGS__)
/* ffs64 */
static inline int mtk_snand_ffs64(uint64_t x)
{
if (!x)
return 0;
if (!(x & 0xffffffff))
return ffs((uint32_t)(x >> 32)) + 32;
return ffs((uint32_t)(x & 0xffffffff));
}
/* NFI dummy commands */
#define NFI_CMD_DUMMY_READ 0x00
#define NFI_CMD_DUMMY_WRITE 0x80
/* SPI-NAND opcodes */
#define SNAND_CMD_RESET 0xff
#define SNAND_CMD_BLOCK_ERASE 0xd8
#define SNAND_CMD_READ_FROM_CACHE_QUAD 0xeb
#define SNAND_CMD_WINBOND_SELECT_DIE 0xc2
#define SNAND_CMD_READ_FROM_CACHE_DUAL 0xbb
#define SNAND_CMD_READID 0x9f
#define SNAND_CMD_READ_FROM_CACHE_X4 0x6b
#define SNAND_CMD_READ_FROM_CACHE_X2 0x3b
#define SNAND_CMD_PROGRAM_LOAD_X4 0x32
#define SNAND_CMD_SET_FEATURE 0x1f
#define SNAND_CMD_READ_TO_CACHE 0x13
#define SNAND_CMD_PROGRAM_EXECUTE 0x10
#define SNAND_CMD_GET_FEATURE 0x0f
#define SNAND_CMD_READ_FROM_CACHE 0x0b
#define SNAND_CMD_WRITE_ENABLE 0x06
#define SNAND_CMD_PROGRAM_LOAD 0x02
/* SPI-NAND feature addresses */
#define SNAND_FEATURE_MICRON_DIE_ADDR 0xd0
#define SNAND_MICRON_DIE_SEL_1 BIT(6)
#define SNAND_FEATURE_STATUS_ADDR 0xc0
#define SNAND_STATUS_OIP BIT(0)
#define SNAND_STATUS_WEL BIT(1)
#define SNAND_STATUS_ERASE_FAIL BIT(2)
#define SNAND_STATUS_PROGRAM_FAIL BIT(3)
#define SNAND_FEATURE_CONFIG_ADDR 0xb0
#define SNAND_FEATURE_QUAD_ENABLE BIT(0)
#define SNAND_FEATURE_ECC_EN BIT(4)
#define SNAND_FEATURE_PROTECT_ADDR 0xa0
#endif /* _MTK_SNAND_DEF_H_ */

View File

@ -0,0 +1,379 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
*
* Author: Weijie Gao <weijie.gao@mediatek.com>
*/
#include "mtk-snand-def.h"
/* ECC registers */
#define ECC_ENCCON 0x000
#define ENC_EN BIT(0)
#define ECC_ENCCNFG 0x004
#define ENC_MS_S 16
#define ENC_BURST_EN BIT(8)
#define ENC_TNUM_S 0
#define ECC_ENCIDLE 0x00c
#define ENC_IDLE BIT(0)
#define ECC_DECCON 0x100
#define DEC_EN BIT(0)
#define ECC_DECCNFG 0x104
#define DEC_EMPTY_EN BIT(31)
#define DEC_CS_S 16
#define DEC_CON_S 12
#define DEC_CON_CORRECT 3
#define DEC_BURST_EN BIT(8)
#define DEC_TNUM_S 0
#define ECC_DECIDLE 0x10c
#define DEC_IDLE BIT(0)
#define ECC_DECENUM0 0x114
#define ECC_DECENUM(n) (ECC_DECENUM0 + (n) * 4)
/* ECC_ENCIDLE & ECC_DECIDLE */
#define ECC_IDLE BIT(0)
/* ENC_MODE & DEC_MODE */
#define ECC_MODE_NFI 1
#define ECC_TIMEOUT 500000
static const uint8_t mt7622_ecc_caps[] = { 4, 6, 8, 10, 12 };
static const uint32_t mt7622_ecc_regs[] = {
[ECC_DECDONE] = 0x11c,
};
static const struct mtk_ecc_soc_data mtk_ecc_socs[__SNAND_SOC_MAX] = {
[SNAND_SOC_MT7622] = {
.ecc_caps = mt7622_ecc_caps,
.num_ecc_cap = ARRAY_SIZE(mt7622_ecc_caps),
.regs = mt7622_ecc_regs,
.mode_shift = 4,
.errnum_bits = 5,
.errnum_shift = 5,
},
[SNAND_SOC_MT7629] = {
.ecc_caps = mt7622_ecc_caps,
.num_ecc_cap = ARRAY_SIZE(mt7622_ecc_caps),
.regs = mt7622_ecc_regs,
.mode_shift = 4,
.errnum_bits = 5,
.errnum_shift = 5,
},
};
static inline uint32_t ecc_read32(struct mtk_snand *snf, uint32_t reg)
{
return readl(snf->ecc_base + reg);
}
static inline void ecc_write32(struct mtk_snand *snf, uint32_t reg,
uint32_t val)
{
writel(val, snf->ecc_base + reg);
}
static inline void ecc_write16(struct mtk_snand *snf, uint32_t reg,
uint16_t val)
{
writew(val, snf->ecc_base + reg);
}
static int mtk_ecc_poll(struct mtk_snand *snf, uint32_t reg, uint32_t bits)
{
uint32_t val;
return read16_poll_timeout(snf->ecc_base + reg, val, (val & bits), 0,
ECC_TIMEOUT);
}
static int mtk_ecc_wait_idle(struct mtk_snand *snf, uint32_t reg)
{
int ret;
ret = mtk_ecc_poll(snf, reg, ECC_IDLE);
if (ret) {
snand_log_ecc(snf->pdev, "ECC engine is busy\n");
return -EBUSY;
}
return 0;
}
int mtk_ecc_setup(struct mtk_snand *snf, void *fmdaddr, uint32_t max_ecc_bytes,
uint32_t msg_size)
{
uint32_t i, val, ecc_msg_bits, ecc_strength;
int ret;
snf->ecc_soc = &mtk_ecc_socs[snf->soc];
snf->ecc_parity_bits = fls(1 + 8 * msg_size);
ecc_strength = max_ecc_bytes * 8 / snf->ecc_parity_bits;
for (i = snf->ecc_soc->num_ecc_cap - 1; i >= 0; i--) {
if (snf->ecc_soc->ecc_caps[i] <= ecc_strength)
break;
}
if (unlikely(i < 0)) {
snand_log_ecc(snf->pdev, "Page size %u+%u is not supported\n",
snf->writesize, snf->oobsize);
return -ENOTSUPP;
}
snf->ecc_strength = snf->ecc_soc->ecc_caps[i];
snf->ecc_bytes = DIV_ROUND_UP(snf->ecc_strength * snf->ecc_parity_bits,
8);
/* Encoder config */
ecc_write16(snf, ECC_ENCCON, 0);
ret = mtk_ecc_wait_idle(snf, ECC_ENCIDLE);
if (ret)
return ret;
ecc_msg_bits = msg_size * 8;
val = (ecc_msg_bits << ENC_MS_S) |
(ECC_MODE_NFI << snf->ecc_soc->mode_shift) | i;
ecc_write32(snf, ECC_ENCCNFG, val);
/* Decoder config */
ecc_write16(snf, ECC_DECCON, 0);
ret = mtk_ecc_wait_idle(snf, ECC_DECIDLE);
if (ret)
return ret;
ecc_msg_bits += snf->ecc_strength * snf->ecc_parity_bits;
val = DEC_EMPTY_EN | (ecc_msg_bits << DEC_CS_S) |
(DEC_CON_CORRECT << DEC_CON_S) |
(ECC_MODE_NFI << snf->ecc_soc->mode_shift) | i;
ecc_write32(snf, ECC_DECCNFG, val);
return 0;
}
int mtk_snand_ecc_encoder_start(struct mtk_snand *snf)
{
int ret;
ret = mtk_ecc_wait_idle(snf, ECC_ENCIDLE);
if (ret) {
ecc_write16(snf, ECC_ENCCON, 0);
mtk_ecc_wait_idle(snf, ECC_ENCIDLE);
}
ecc_write16(snf, ECC_ENCCON, ENC_EN);
return 0;
}
void mtk_snand_ecc_encoder_stop(struct mtk_snand *snf)
{
mtk_ecc_wait_idle(snf, ECC_ENCIDLE);
ecc_write16(snf, ECC_ENCCON, 0);
}
int mtk_snand_ecc_decoder_start(struct mtk_snand *snf)
{
int ret;
ret = mtk_ecc_wait_idle(snf, ECC_DECIDLE);
if (ret) {
ecc_write16(snf, ECC_DECCON, 0);
mtk_ecc_wait_idle(snf, ECC_DECIDLE);
}
ecc_write16(snf, ECC_DECCON, DEC_EN);
return 0;
}
void mtk_snand_ecc_decoder_stop(struct mtk_snand *snf)
{
mtk_ecc_wait_idle(snf, ECC_DECIDLE);
ecc_write16(snf, ECC_DECCON, 0);
}
int mtk_ecc_wait_decoder_done(struct mtk_snand *snf)
{
uint16_t val, step_mask = (1 << snf->ecc_steps) - 1;
uint32_t reg = snf->ecc_soc->regs[ECC_DECDONE];
int ret;
ret = read16_poll_timeout(snf->ecc_base + reg, val,
(val & step_mask) == step_mask, 0,
ECC_TIMEOUT);
if (ret)
snand_log_ecc(snf->pdev, "ECC decoder is busy\n");
return ret;
}
int mtk_ecc_check_decode_error(struct mtk_snand *snf)
{
uint32_t i, regi, fi, errnum;
uint32_t errnum_shift = snf->ecc_soc->errnum_shift;
uint32_t errnum_mask = (1 << snf->ecc_soc->errnum_bits) - 1;
int ret = 0;
for (i = 0; i < snf->ecc_steps; i++) {
regi = i / 4;
fi = i % 4;
errnum = ecc_read32(snf, ECC_DECENUM(regi));
errnum = (errnum >> (fi * errnum_shift)) & errnum_mask;
if (errnum <= snf->ecc_strength) {
snf->sect_bf[i] = errnum;
} else {
snf->sect_bf[i] = -1;
ret = -EBADMSG;
}
}
return ret;
}
static int mtk_ecc_check_buf_bitflips(struct mtk_snand *snf, const void *buf,
size_t len, uint32_t bitflips)
{
const uint8_t *buf8 = buf;
const uint32_t *buf32;
uint32_t d, weight;
while (len && ((uintptr_t)buf8) % sizeof(uint32_t)) {
weight = hweight8(*buf8);
bitflips += BITS_PER_BYTE - weight;
buf8++;
len--;
if (bitflips > snf->ecc_strength)
return -EBADMSG;
}
buf32 = (const uint32_t *)buf8;
while (len >= sizeof(uint32_t)) {
d = *buf32;
if (d != ~0) {
weight = hweight32(d);
bitflips += sizeof(uint32_t) * BITS_PER_BYTE - weight;
}
buf32++;
len -= sizeof(uint32_t);
if (bitflips > snf->ecc_strength)
return -EBADMSG;
}
buf8 = (const uint8_t *)buf32;
while (len) {
weight = hweight8(*buf8);
bitflips += BITS_PER_BYTE - weight;
buf8++;
len--;
if (bitflips > snf->ecc_strength)
return -EBADMSG;
}
return bitflips;
}
static int mtk_ecc_check_parity_bitflips(struct mtk_snand *snf, const void *buf,
uint32_t bits, uint32_t bitflips)
{
uint32_t len, i;
uint8_t b;
int rc;
len = bits >> 3;
bits &= 7;
rc = mtk_ecc_check_buf_bitflips(snf, buf, len, bitflips);
if (!bits || rc < 0)
return rc;
bitflips = rc;
/* We want a precise count of bits */
b = ((const uint8_t *)buf)[len];
for (i = 0; i < bits; i++) {
if (!(b & BIT(i)))
bitflips++;
}
if (bitflips > snf->ecc_strength)
return -EBADMSG;
return bitflips;
}
static void mtk_ecc_reset_parity(void *buf, uint32_t bits)
{
uint32_t len;
len = bits >> 3;
bits &= 7;
memset(buf, 0xff, len);
/* Only reset bits protected by ECC to 1 */
if (bits)
((uint8_t *)buf)[len] |= GENMASK(bits - 1, 0);
}
int mtk_ecc_fixup_empty_sector(struct mtk_snand *snf, uint32_t sect)
{
uint32_t ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
uint8_t *oob = snf->page_cache + snf->writesize;
uint8_t *data_ptr, *fdm_ptr, *ecc_ptr;
int bitflips = 0, ecc_bits, parity_bits;
parity_bits = fls(snf->nfi_soc->sector_size * 8);
ecc_bits = snf->ecc_strength * parity_bits;
data_ptr = snf->page_cache + sect * snf->nfi_soc->sector_size;
fdm_ptr = oob + sect * snf->nfi_soc->fdm_size;
ecc_ptr = oob + snf->ecc_steps * snf->nfi_soc->fdm_size +
sect * ecc_bytes;
/*
* Check whether DATA + FDM + ECC of a sector contains correctable
* bitflips
*/
bitflips = mtk_ecc_check_buf_bitflips(snf, data_ptr,
snf->nfi_soc->sector_size,
bitflips);
if (bitflips < 0)
return -EBADMSG;
bitflips = mtk_ecc_check_buf_bitflips(snf, fdm_ptr,
snf->nfi_soc->fdm_ecc_size,
bitflips);
if (bitflips < 0)
return -EBADMSG;
bitflips = mtk_ecc_check_parity_bitflips(snf, ecc_ptr, ecc_bits,
bitflips);
if (bitflips < 0)
return -EBADMSG;
if (!bitflips)
return 0;
/* Reset the data of this sector to 0xff */
memset(data_ptr, 0xff, snf->nfi_soc->sector_size);
memset(fdm_ptr, 0xff, snf->nfi_soc->fdm_ecc_size);
mtk_ecc_reset_parity(ecc_ptr, ecc_bits);
return bitflips;
}

View File

@ -0,0 +1,511 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
*
* Author: Weijie Gao <weijie.gao@mediatek.com>
*/
#include "mtk-snand-def.h"
static int mtk_snand_winbond_select_die(struct mtk_snand *snf, uint32_t dieidx);
static int mtk_snand_micron_select_die(struct mtk_snand *snf, uint32_t dieidx);
#define SNAND_MEMORG_512M_2K_64 SNAND_MEMORG(2048, 64, 64, 512, 1, 1)
#define SNAND_MEMORG_1G_2K_64 SNAND_MEMORG(2048, 64, 64, 1024, 1, 1)
#define SNAND_MEMORG_2G_2K_64 SNAND_MEMORG(2048, 64, 64, 2048, 1, 1)
#define SNAND_MEMORG_2G_2K_120 SNAND_MEMORG(2048, 120, 64, 2048, 1, 1)
#define SNAND_MEMORG_4G_2K_64 SNAND_MEMORG(2048, 64, 64, 4096, 1, 1)
#define SNAND_MEMORG_1G_2K_120 SNAND_MEMORG(2048, 120, 64, 1024, 1, 1)
#define SNAND_MEMORG_1G_2K_128 SNAND_MEMORG(2048, 128, 64, 1024, 1, 1)
#define SNAND_MEMORG_2G_2K_128 SNAND_MEMORG(2048, 128, 64, 2048, 1, 1)
#define SNAND_MEMORG_4G_2K_128 SNAND_MEMORG(2048, 128, 64, 4096, 1, 1)
#define SNAND_MEMORG_4G_4K_240 SNAND_MEMORG(4096, 240, 64, 2048, 1, 1)
#define SNAND_MEMORG_4G_4K_256 SNAND_MEMORG(4096, 256, 64, 2048, 1, 1)
#define SNAND_MEMORG_8G_4K_256 SNAND_MEMORG(4096, 256, 64, 4096, 1, 1)
#define SNAND_MEMORG_2G_2K_64_2P SNAND_MEMORG(2048, 64, 64, 2048, 2, 1)
#define SNAND_MEMORG_2G_2K_64_2D SNAND_MEMORG(2048, 64, 64, 1024, 1, 2)
#define SNAND_MEMORG_2G_2K_128_2P SNAND_MEMORG(2048, 128, 64, 2048, 2, 1)
#define SNAND_MEMORG_4G_2K_64_2P SNAND_MEMORG(2048, 64, 64, 4096, 2, 1)
#define SNAND_MEMORG_4G_2K_128_2P_2D SNAND_MEMORG(2048, 128, 64, 2048, 2, 2)
#define SNAND_MEMORG_8G_4K_256_2D SNAND_MEMORG(4096, 256, 64, 2048, 1, 2)
static const SNAND_IO_CAP(snand_cap_read_from_cache_quad,
SPI_IO_1_1_1 | SPI_IO_1_1_2 | SPI_IO_1_2_2 | SPI_IO_1_1_4 |
SPI_IO_1_4_4,
SNAND_OP(SNAND_IO_1_1_1, SNAND_CMD_READ_FROM_CACHE, 8),
SNAND_OP(SNAND_IO_1_1_2, SNAND_CMD_READ_FROM_CACHE_X2, 8),
SNAND_OP(SNAND_IO_1_2_2, SNAND_CMD_READ_FROM_CACHE_DUAL, 4),
SNAND_OP(SNAND_IO_1_1_4, SNAND_CMD_READ_FROM_CACHE_X4, 8),
SNAND_OP(SNAND_IO_1_4_4, SNAND_CMD_READ_FROM_CACHE_QUAD, 4));
static const SNAND_IO_CAP(snand_cap_read_from_cache_quad_q2d,
SPI_IO_1_1_1 | SPI_IO_1_1_2 | SPI_IO_1_2_2 | SPI_IO_1_1_4 |
SPI_IO_1_4_4,
SNAND_OP(SNAND_IO_1_1_1, SNAND_CMD_READ_FROM_CACHE, 8),
SNAND_OP(SNAND_IO_1_1_2, SNAND_CMD_READ_FROM_CACHE_X2, 8),
SNAND_OP(SNAND_IO_1_2_2, SNAND_CMD_READ_FROM_CACHE_DUAL, 4),
SNAND_OP(SNAND_IO_1_1_4, SNAND_CMD_READ_FROM_CACHE_X4, 8),
SNAND_OP(SNAND_IO_1_4_4, SNAND_CMD_READ_FROM_CACHE_QUAD, 2));
static const SNAND_IO_CAP(snand_cap_read_from_cache_quad_a8d,
SPI_IO_1_1_1 | SPI_IO_1_1_2 | SPI_IO_1_2_2 | SPI_IO_1_1_4 |
SPI_IO_1_4_4,
SNAND_OP(SNAND_IO_1_1_1, SNAND_CMD_READ_FROM_CACHE, 8),
SNAND_OP(SNAND_IO_1_1_2, SNAND_CMD_READ_FROM_CACHE_X2, 8),
SNAND_OP(SNAND_IO_1_2_2, SNAND_CMD_READ_FROM_CACHE_DUAL, 8),
SNAND_OP(SNAND_IO_1_1_4, SNAND_CMD_READ_FROM_CACHE_X4, 8),
SNAND_OP(SNAND_IO_1_4_4, SNAND_CMD_READ_FROM_CACHE_QUAD, 8));
static const SNAND_IO_CAP(snand_cap_read_from_cache_x4,
SPI_IO_1_1_1 | SPI_IO_1_1_2 | SPI_IO_1_1_4,
SNAND_OP(SNAND_IO_1_1_1, SNAND_CMD_READ_FROM_CACHE, 8),
SNAND_OP(SNAND_IO_1_1_2, SNAND_CMD_READ_FROM_CACHE_X2, 8),
SNAND_OP(SNAND_IO_1_1_4, SNAND_CMD_READ_FROM_CACHE_X4, 8));
static const SNAND_IO_CAP(snand_cap_read_from_cache_x4_only,
SPI_IO_1_1_1 | SPI_IO_1_1_4,
SNAND_OP(SNAND_IO_1_1_1, SNAND_CMD_READ_FROM_CACHE, 8),
SNAND_OP(SNAND_IO_1_1_4, SNAND_CMD_READ_FROM_CACHE_X4, 8));
static const SNAND_IO_CAP(snand_cap_program_load_x1,
SPI_IO_1_1_1,
SNAND_OP(SNAND_IO_1_1_1, SNAND_CMD_PROGRAM_LOAD, 0));
static const SNAND_IO_CAP(snand_cap_program_load_x4,
SPI_IO_1_1_1 | SPI_IO_1_1_4,
SNAND_OP(SNAND_IO_1_1_1, SNAND_CMD_PROGRAM_LOAD, 0),
SNAND_OP(SNAND_IO_1_1_4, SNAND_CMD_PROGRAM_LOAD_X4, 0));
static const struct snand_flash_info snand_flash_ids[] = {
SNAND_INFO("W25N512GV", SNAND_ID(SNAND_ID_DYMMY, 0xef, 0xaa, 0x20),
SNAND_MEMORG_512M_2K_64,
&snand_cap_read_from_cache_quad,
&snand_cap_program_load_x4),
SNAND_INFO("W25N01GV", SNAND_ID(SNAND_ID_DYMMY, 0xef, 0xaa, 0x21),
SNAND_MEMORG_1G_2K_64,
&snand_cap_read_from_cache_quad,
&snand_cap_program_load_x4),
SNAND_INFO("W25M02GV", SNAND_ID(SNAND_ID_DYMMY, 0xef, 0xab, 0x21),
SNAND_MEMORG_2G_2K_64_2D,
&snand_cap_read_from_cache_quad,
&snand_cap_program_load_x4,
mtk_snand_winbond_select_die),
SNAND_INFO("W25N02KV", SNAND_ID(SNAND_ID_DYMMY, 0xef, 0xaa, 0x22),
SNAND_MEMORG_2G_2K_128,
&snand_cap_read_from_cache_quad,
&snand_cap_program_load_x4),
SNAND_INFO("GD5F1GQ4UAWxx", SNAND_ID(SNAND_ID_ADDR, 0xc8, 0x10),
SNAND_MEMORG_1G_2K_64,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("GD5F1GQ4UExIG", SNAND_ID(SNAND_ID_ADDR, 0xc8, 0xd1),
SNAND_MEMORG_1G_2K_128,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("GD5F1GQ4UExxH", SNAND_ID(SNAND_ID_ADDR, 0xc8, 0xd9),
SNAND_MEMORG_1G_2K_64,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("GD5F1GQ4xAYIG", SNAND_ID(SNAND_ID_ADDR, 0xc8, 0xf1),
SNAND_MEMORG_1G_2K_64,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("GD5F2GQ4UExIG", SNAND_ID(SNAND_ID_ADDR, 0xc8, 0xd2),
SNAND_MEMORG_2G_2K_128,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("GD5F2GQ5UExxH", SNAND_ID(SNAND_ID_ADDR, 0xc8, 0x32),
SNAND_MEMORG_2G_2K_64,
&snand_cap_read_from_cache_quad_a8d,
&snand_cap_program_load_x4),
SNAND_INFO("GD5F2GQ4xAYIG", SNAND_ID(SNAND_ID_ADDR, 0xc8, 0xf2),
SNAND_MEMORG_2G_2K_64,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("GD5F4GQ4UBxIG", SNAND_ID(SNAND_ID_ADDR, 0xc8, 0xd4),
SNAND_MEMORG_4G_4K_256,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("GD5F4GQ4xAYIG", SNAND_ID(SNAND_ID_ADDR, 0xc8, 0xf4),
SNAND_MEMORG_4G_2K_64,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("GD5F2GQ5UExxG", SNAND_ID(SNAND_ID_DYMMY, 0xc8, 0x52),
SNAND_MEMORG_2G_2K_128,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("GD5F4GQ4UCxIG", SNAND_ID(SNAND_ID_DYMMY, 0xc8, 0xb4),
SNAND_MEMORG_4G_4K_256,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("MX35LF1GE4AB", SNAND_ID(SNAND_ID_DYMMY, 0xc2, 0x12),
SNAND_MEMORG_1G_2K_64,
&snand_cap_read_from_cache_x4,
&snand_cap_program_load_x4),
SNAND_INFO("MX35LF1G24AD", SNAND_ID(SNAND_ID_DYMMY, 0xc2, 0x14),
SNAND_MEMORG_1G_2K_128,
&snand_cap_read_from_cache_quad,
&snand_cap_program_load_x4),
SNAND_INFO("MX31LF1GE4BC", SNAND_ID(SNAND_ID_DYMMY, 0xc2, 0x1e),
SNAND_MEMORG_1G_2K_64,
&snand_cap_read_from_cache_x4,
&snand_cap_program_load_x4),
SNAND_INFO("MX35LF2GE4AB", SNAND_ID(SNAND_ID_DYMMY, 0xc2, 0x22),
SNAND_MEMORG_2G_2K_64,
&snand_cap_read_from_cache_x4,
&snand_cap_program_load_x4),
SNAND_INFO("MX35LF2G24AD", SNAND_ID(SNAND_ID_DYMMY, 0xc2, 0x24),
SNAND_MEMORG_2G_2K_128,
&snand_cap_read_from_cache_quad,
&snand_cap_program_load_x4),
SNAND_INFO("MX35LF2GE4AD", SNAND_ID(SNAND_ID_DYMMY, 0xc2, 0x26),
SNAND_MEMORG_2G_2K_128,
&snand_cap_read_from_cache_x4,
&snand_cap_program_load_x4),
SNAND_INFO("MX35LF2G14AC", SNAND_ID(SNAND_ID_DYMMY, 0xc2, 0x20),
SNAND_MEMORG_2G_2K_64,
&snand_cap_read_from_cache_x4,
&snand_cap_program_load_x4),
SNAND_INFO("MX35LF4G24AD", SNAND_ID(SNAND_ID_DYMMY, 0xc2, 0x35),
SNAND_MEMORG_4G_4K_256,
&snand_cap_read_from_cache_quad,
&snand_cap_program_load_x4),
SNAND_INFO("MX35LF4GE4AD", SNAND_ID(SNAND_ID_DYMMY, 0xc2, 0x37),
SNAND_MEMORG_4G_4K_256,
&snand_cap_read_from_cache_x4,
&snand_cap_program_load_x4),
SNAND_INFO("MT29F1G01AAADD", SNAND_ID(SNAND_ID_DYMMY, 0x2c, 0x12),
SNAND_MEMORG_1G_2K_64,
&snand_cap_read_from_cache_x4,
&snand_cap_program_load_x1),
SNAND_INFO("MT29F1G01ABAFD", SNAND_ID(SNAND_ID_DYMMY, 0x2c, 0x14),
SNAND_MEMORG_1G_2K_128,
&snand_cap_read_from_cache_quad,
&snand_cap_program_load_x4),
SNAND_INFO("MT29F2G01AAAED", SNAND_ID(SNAND_ID_DYMMY, 0x2c, 0x9f),
SNAND_MEMORG_2G_2K_64_2P,
&snand_cap_read_from_cache_x4,
&snand_cap_program_load_x1),
SNAND_INFO("MT29F2G01ABAGD", SNAND_ID(SNAND_ID_DYMMY, 0x2c, 0x24),
SNAND_MEMORG_2G_2K_128_2P,
&snand_cap_read_from_cache_quad,
&snand_cap_program_load_x4),
SNAND_INFO("MT29F4G01AAADD", SNAND_ID(SNAND_ID_DYMMY, 0x2c, 0x32),
SNAND_MEMORG_4G_2K_64_2P,
&snand_cap_read_from_cache_x4,
&snand_cap_program_load_x1),
SNAND_INFO("MT29F4G01ABAFD", SNAND_ID(SNAND_ID_DYMMY, 0x2c, 0x34),
SNAND_MEMORG_4G_4K_256,
&snand_cap_read_from_cache_quad,
&snand_cap_program_load_x4),
SNAND_INFO("MT29F4G01ADAGD", SNAND_ID(SNAND_ID_DYMMY, 0x2c, 0x36),
SNAND_MEMORG_4G_2K_128_2P_2D,
&snand_cap_read_from_cache_quad,
&snand_cap_program_load_x4,
mtk_snand_micron_select_die),
SNAND_INFO("MT29F8G01ADAFD", SNAND_ID(SNAND_ID_DYMMY, 0x2c, 0x46),
SNAND_MEMORG_8G_4K_256_2D,
&snand_cap_read_from_cache_quad,
&snand_cap_program_load_x4,
mtk_snand_micron_select_die),
SNAND_INFO("TC58CVG0S3HRAIG", SNAND_ID(SNAND_ID_DYMMY, 0x98, 0xc2),
SNAND_MEMORG_1G_2K_128,
&snand_cap_read_from_cache_x4,
&snand_cap_program_load_x1),
SNAND_INFO("TC58CVG1S3HRAIG", SNAND_ID(SNAND_ID_DYMMY, 0x98, 0xcb),
SNAND_MEMORG_2G_2K_128,
&snand_cap_read_from_cache_x4,
&snand_cap_program_load_x1),
SNAND_INFO("TC58CVG2S0HRAIG", SNAND_ID(SNAND_ID_DYMMY, 0x98, 0xcd),
SNAND_MEMORG_4G_4K_256,
&snand_cap_read_from_cache_x4,
&snand_cap_program_load_x1),
SNAND_INFO("TC58CVG0S3HRAIJ", SNAND_ID(SNAND_ID_DYMMY, 0x98, 0xe2),
SNAND_MEMORG_1G_2K_128,
&snand_cap_read_from_cache_x4,
&snand_cap_program_load_x4),
SNAND_INFO("TC58CVG1S3HRAIJ", SNAND_ID(SNAND_ID_DYMMY, 0x98, 0xeb),
SNAND_MEMORG_2G_2K_128,
&snand_cap_read_from_cache_x4,
&snand_cap_program_load_x4),
SNAND_INFO("TC58CVG2S0HRAIJ", SNAND_ID(SNAND_ID_DYMMY, 0x98, 0xed),
SNAND_MEMORG_4G_4K_256,
&snand_cap_read_from_cache_x4,
&snand_cap_program_load_x4),
SNAND_INFO("TH58CVG3S0HRAIJ", SNAND_ID(SNAND_ID_DYMMY, 0x98, 0xe4),
SNAND_MEMORG_8G_4K_256,
&snand_cap_read_from_cache_x4,
&snand_cap_program_load_x4),
SNAND_INFO("F50L512M41A", SNAND_ID(SNAND_ID_DYMMY, 0xc8, 0x20),
SNAND_MEMORG_512M_2K_64,
&snand_cap_read_from_cache_x4,
&snand_cap_program_load_x4),
SNAND_INFO("F50L1G41A", SNAND_ID(SNAND_ID_DYMMY, 0xc8, 0x21),
SNAND_MEMORG_1G_2K_64,
&snand_cap_read_from_cache_x4,
&snand_cap_program_load_x4),
SNAND_INFO("F50L1G41LB", SNAND_ID(SNAND_ID_DYMMY, 0xc8, 0x01),
SNAND_MEMORG_1G_2K_64,
&snand_cap_read_from_cache_quad,
&snand_cap_program_load_x4),
SNAND_INFO("F50L2G41LB", SNAND_ID(SNAND_ID_DYMMY, 0xc8, 0x0a),
SNAND_MEMORG_2G_2K_64_2D,
&snand_cap_read_from_cache_quad,
&snand_cap_program_load_x4,
mtk_snand_winbond_select_die),
SNAND_INFO("CS11G0T0A0AA", SNAND_ID(SNAND_ID_DYMMY, 0x6b, 0x00),
SNAND_MEMORG_1G_2K_128,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("CS11G0G0A0AA", SNAND_ID(SNAND_ID_DYMMY, 0x6b, 0x10),
SNAND_MEMORG_1G_2K_128,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("CS11G0S0A0AA", SNAND_ID(SNAND_ID_DYMMY, 0x6b, 0x20),
SNAND_MEMORG_1G_2K_64,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("CS11G1T0A0AA", SNAND_ID(SNAND_ID_DYMMY, 0x6b, 0x01),
SNAND_MEMORG_2G_2K_128,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("CS11G1S0A0AA", SNAND_ID(SNAND_ID_DYMMY, 0x6b, 0x21),
SNAND_MEMORG_2G_2K_64,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("CS11G2T0A0AA", SNAND_ID(SNAND_ID_DYMMY, 0x6b, 0x02),
SNAND_MEMORG_4G_2K_128,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("CS11G2S0A0AA", SNAND_ID(SNAND_ID_DYMMY, 0x6b, 0x22),
SNAND_MEMORG_4G_2K_64,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73B044VCA", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x01),
SNAND_MEMORG_512M_2K_64,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73C044SNB", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x11),
SNAND_MEMORG_1G_2K_120,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73C044SNF", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x09),
SNAND_MEMORG_1G_2K_128,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73C044VCA", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x18),
SNAND_MEMORG_1G_2K_64,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73C044SNA", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x19),
SNAND_MEMORG(2048, 64, 128, 512, 1, 1),
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73C044VCD", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x1c),
SNAND_MEMORG_1G_2K_64,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73C044SND", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x1d),
SNAND_MEMORG_1G_2K_64,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73D044SND", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x1e),
SNAND_MEMORG_2G_2K_64,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73C044VCC", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x22),
SNAND_MEMORG_1G_2K_64,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73C044VCF", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x25),
SNAND_MEMORG_1G_2K_64,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73C044SNC", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x31),
SNAND_MEMORG_1G_2K_128,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73D044SNC", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x0a),
SNAND_MEMORG_2G_2K_120,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73D044SNA", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x12),
SNAND_MEMORG_2G_2K_128,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73D044SNF", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x10),
SNAND_MEMORG_2G_2K_128,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73D044VCA", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x13),
SNAND_MEMORG_2G_2K_128,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73D044VCB", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x14),
SNAND_MEMORG_2G_2K_64,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73D044VCD", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x17),
SNAND_MEMORG_2G_2K_128,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73D044VCH", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x1b),
SNAND_MEMORG_2G_2K_64,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73D044SND", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x1d),
SNAND_MEMORG_2G_2K_64,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73D044VCG", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x1f),
SNAND_MEMORG_2G_2K_64,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73D044VCE", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x20),
SNAND_MEMORG_2G_2K_64,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73D044VCL", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x2e),
SNAND_MEMORG_2G_2K_128,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73D044SNB", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x32),
SNAND_MEMORG_2G_2K_128,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73E044SNA", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x03),
SNAND_MEMORG_4G_4K_256,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73E044SND", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x0b),
SNAND_MEMORG_4G_4K_240,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73E044SNB", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x23),
SNAND_MEMORG_4G_4K_256,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73E044VCA", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x2c),
SNAND_MEMORG_4G_4K_256,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73E044VCB", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x2f),
SNAND_MEMORG_4G_2K_128,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73F044SNA", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x24),
SNAND_MEMORG_8G_4K_256,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73F044VCA", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x2d),
SNAND_MEMORG_8G_4K_256,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73E044SNE", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x0e),
SNAND_MEMORG_8G_4K_256,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73C044SNG", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x0c),
SNAND_MEMORG_1G_2K_120,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("EM73D044VCN", SNAND_ID(SNAND_ID_DYMMY, 0xd5, 0x0f),
SNAND_MEMORG_2G_2K_64,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("FM35Q1GA", SNAND_ID(SNAND_ID_DYMMY, 0xe5, 0x71),
SNAND_MEMORG_1G_2K_64,
&snand_cap_read_from_cache_x4,
&snand_cap_program_load_x4),
SNAND_INFO("PN26G01A", SNAND_ID(SNAND_ID_DYMMY, 0xa1, 0xe1),
SNAND_MEMORG_1G_2K_128,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("PN26G02A", SNAND_ID(SNAND_ID_DYMMY, 0xa1, 0xe2),
SNAND_MEMORG_2G_2K_128,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("IS37SML01G1", SNAND_ID(SNAND_ID_DYMMY, 0xc8, 0x21),
SNAND_MEMORG_1G_2K_64,
&snand_cap_read_from_cache_x4,
&snand_cap_program_load_x4),
SNAND_INFO("ATO25D1GA", SNAND_ID(SNAND_ID_DYMMY, 0x9b, 0x12),
SNAND_MEMORG_1G_2K_64,
&snand_cap_read_from_cache_x4_only,
&snand_cap_program_load_x4),
SNAND_INFO("HYF1GQ4U", SNAND_ID(SNAND_ID_DYMMY, 0xc9, 0x51),
SNAND_MEMORG_1G_2K_128,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
SNAND_INFO("HYF2GQ4U", SNAND_ID(SNAND_ID_DYMMY, 0xc9, 0x52),
SNAND_MEMORG_2G_2K_128,
&snand_cap_read_from_cache_quad_q2d,
&snand_cap_program_load_x4),
};
static int mtk_snand_winbond_select_die(struct mtk_snand *snf, uint32_t dieidx)
{
uint8_t op[2];
if (dieidx > 1) {
snand_log_chip(snf->pdev, "Invalid die index %u\n", dieidx);
return -EINVAL;
}
op[0] = SNAND_CMD_WINBOND_SELECT_DIE;
op[1] = (uint8_t)dieidx;
return mtk_snand_mac_io(snf, op, sizeof(op), NULL, 0);
}
static int mtk_snand_micron_select_die(struct mtk_snand *snf, uint32_t dieidx)
{
int ret;
if (dieidx > 1) {
snand_log_chip(snf->pdev, "Invalid die index %u\n", dieidx);
return -EINVAL;
}
ret = mtk_snand_set_feature(snf, SNAND_FEATURE_MICRON_DIE_ADDR,
SNAND_MICRON_DIE_SEL_1);
if (ret) {
snand_log_chip(snf->pdev,
"Failed to set die selection feature\n");
return ret;
}
return 0;
}
const struct snand_flash_info *snand_flash_id_lookup(enum snand_id_type type,
const uint8_t *id)
{
const struct snand_id *fid;
uint32_t i;
for (i = 0; i < ARRAY_SIZE(snand_flash_ids); i++) {
if (snand_flash_ids[i].id.type != type)
continue;
fid = &snand_flash_ids[i].id;
if (memcmp(fid->id, id, fid->len))
continue;
return &snand_flash_ids[i];
}
return NULL;
}

View File

@ -0,0 +1,677 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
*
* Author: Weijie Gao <weijie.gao@mediatek.com>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/wait.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/of_platform.h>
#include "mtk-snand.h"
#include "mtk-snand-os.h"
struct mtk_snand_of_id {
enum mtk_snand_soc soc;
};
struct mtk_snand_mtd {
struct mtk_snand_plat_dev pdev;
struct clk *nfi_clk;
struct clk *pad_clk;
struct clk *ecc_clk;
void __iomem *nfi_regs;
void __iomem *ecc_regs;
int irq;
bool quad_spi;
enum mtk_snand_soc soc;
struct mtd_info mtd;
struct mtk_snand *snf;
struct mtk_snand_chip_info cinfo;
uint8_t *page_cache;
struct mutex lock;
};
#define mtd_to_msm(mtd) container_of(mtd, struct mtk_snand_mtd, mtd)
static int mtk_snand_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
u64 start_addr, end_addr;
int ret;
/* Do not allow write past end of device */
if ((instr->addr + instr->len) > mtd->size) {
dev_err(msm->pdev.dev,
"attempt to erase beyond end of device\n");
return -EINVAL;
}
start_addr = instr->addr & (~mtd->erasesize_mask);
end_addr = instr->addr + instr->len;
if (end_addr & mtd->erasesize_mask) {
end_addr = (end_addr + mtd->erasesize_mask) &
(~mtd->erasesize_mask);
}
mutex_lock(&msm->lock);
while (start_addr < end_addr) {
if (mtk_snand_block_isbad(msm->snf, start_addr)) {
instr->fail_addr = start_addr;
ret = -EIO;
break;
}
ret = mtk_snand_erase_block(msm->snf, start_addr);
if (ret) {
instr->fail_addr = start_addr;
break;
}
start_addr += mtd->erasesize;
}
mutex_unlock(&msm->lock);
return ret;
}
static int mtk_snand_mtd_read_data(struct mtk_snand_mtd *msm, uint64_t addr,
struct mtd_oob_ops *ops)
{
struct mtd_info *mtd = &msm->mtd;
size_t len, ooblen, maxooblen, chklen;
uint32_t col, ooboffs;
uint8_t *datcache, *oobcache;
bool ecc_failed = false, raw = ops->mode == MTD_OPS_RAW ? true : false;
int ret, max_bitflips = 0;
col = addr & mtd->writesize_mask;
addr &= ~mtd->writesize_mask;
maxooblen = mtd_oobavail(mtd, ops);
ooboffs = ops->ooboffs;
ooblen = ops->ooblen;
len = ops->len;
datcache = len ? msm->page_cache : NULL;
oobcache = ooblen ? msm->page_cache + mtd->writesize : NULL;
ops->oobretlen = 0;
ops->retlen = 0;
while (len || ooblen) {
if (ops->mode == MTD_OPS_AUTO_OOB)
ret = mtk_snand_read_page_auto_oob(msm->snf, addr,
datcache, oobcache, maxooblen, NULL, raw);
else
ret = mtk_snand_read_page(msm->snf, addr, datcache,
oobcache, raw);
if (ret < 0 && ret != -EBADMSG)
return ret;
if (ret == -EBADMSG) {
mtd->ecc_stats.failed++;
ecc_failed = true;
} else {
mtd->ecc_stats.corrected += ret;
max_bitflips = max_t(int, ret, max_bitflips);
}
if (len) {
/* Move data */
chklen = mtd->writesize - col;
if (chklen > len)
chklen = len;
memcpy(ops->datbuf + ops->retlen, datcache + col,
chklen);
len -= chklen;
col = 0; /* (col + chklen) % */
ops->retlen += chklen;
}
if (ooblen) {
/* Move oob */
chklen = maxooblen - ooboffs;
if (chklen > ooblen)
chklen = ooblen;
memcpy(ops->oobbuf + ops->oobretlen, oobcache + ooboffs,
chklen);
ooblen -= chklen;
ooboffs = 0; /* (ooboffs + chklen) % maxooblen; */
ops->oobretlen += chklen;
}
addr += mtd->writesize;
}
return ecc_failed ? -EBADMSG : max_bitflips;
}
static int mtk_snand_mtd_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
uint32_t maxooblen;
int ret;
if (!ops->oobbuf && !ops->datbuf) {
if (ops->ooblen || ops->len)
return -EINVAL;
return 0;
}
switch (ops->mode) {
case MTD_OPS_PLACE_OOB:
case MTD_OPS_AUTO_OOB:
case MTD_OPS_RAW:
break;
default:
dev_err(msm->pdev.dev, "unsupported oob mode: %u\n", ops->mode);
return -EINVAL;
}
maxooblen = mtd_oobavail(mtd, ops);
/* Do not allow read past end of device */
if (ops->datbuf && (from + ops->len) > mtd->size) {
dev_err(msm->pdev.dev,
"attempt to read beyond end of device\n");
return -EINVAL;
}
if (unlikely(ops->ooboffs >= maxooblen)) {
dev_err(msm->pdev.dev, "attempt to start read outside oob\n");
return -EINVAL;
}
if (unlikely(from >= mtd->size ||
ops->ooboffs + ops->ooblen > ((mtd->size >> mtd->writesize_shift) -
(from >> mtd->writesize_shift)) * maxooblen)) {
dev_err(msm->pdev.dev,
"attempt to read beyond end of device\n");
return -EINVAL;
}
mutex_lock(&msm->lock);
ret = mtk_snand_mtd_read_data(msm, from, ops);
mutex_unlock(&msm->lock);
return ret;
}
static int mtk_snand_mtd_write_data(struct mtk_snand_mtd *msm, uint64_t addr,
struct mtd_oob_ops *ops)
{
struct mtd_info *mtd = &msm->mtd;
size_t len, ooblen, maxooblen, chklen, oobwrlen;
uint32_t col, ooboffs;
uint8_t *datcache, *oobcache;
bool raw = ops->mode == MTD_OPS_RAW ? true : false;
int ret;
col = addr & mtd->writesize_mask;
addr &= ~mtd->writesize_mask;
maxooblen = mtd_oobavail(mtd, ops);
ooboffs = ops->ooboffs;
ooblen = ops->ooblen;
len = ops->len;
datcache = len ? msm->page_cache : NULL;
oobcache = ooblen ? msm->page_cache + mtd->writesize : NULL;
ops->oobretlen = 0;
ops->retlen = 0;
while (len || ooblen) {
if (len) {
/* Move data */
chklen = mtd->writesize - col;
if (chklen > len)
chklen = len;
memset(datcache, 0xff, col);
memcpy(datcache + col, ops->datbuf + ops->retlen,
chklen);
memset(datcache + col + chklen, 0xff,
mtd->writesize - col - chklen);
len -= chklen;
col = 0; /* (col + chklen) % */
ops->retlen += chklen;
}
oobwrlen = 0;
if (ooblen) {
/* Move oob */
chklen = maxooblen - ooboffs;
if (chklen > ooblen)
chklen = ooblen;
memset(oobcache, 0xff, ooboffs);
memcpy(oobcache + ooboffs,
ops->oobbuf + ops->oobretlen, chklen);
memset(oobcache + ooboffs + chklen, 0xff,
mtd->oobsize - ooboffs - chklen);
oobwrlen = chklen + ooboffs;
ooblen -= chklen;
ooboffs = 0; /* (ooboffs + chklen) % maxooblen; */
ops->oobretlen += chklen;
}
if (ops->mode == MTD_OPS_AUTO_OOB)
ret = mtk_snand_write_page_auto_oob(msm->snf, addr,
datcache, oobcache, oobwrlen, NULL, raw);
else
ret = mtk_snand_write_page(msm->snf, addr, datcache,
oobcache, raw);
if (ret)
return ret;
addr += mtd->writesize;
}
return 0;
}
static int mtk_snand_mtd_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
uint32_t maxooblen;
int ret;
if (!ops->oobbuf && !ops->datbuf) {
if (ops->ooblen || ops->len)
return -EINVAL;
return 0;
}
switch (ops->mode) {
case MTD_OPS_PLACE_OOB:
case MTD_OPS_AUTO_OOB:
case MTD_OPS_RAW:
break;
default:
dev_err(msm->pdev.dev, "unsupported oob mode: %u\n", ops->mode);
return -EINVAL;
}
maxooblen = mtd_oobavail(mtd, ops);
/* Do not allow write past end of device */
if (ops->datbuf && (to + ops->len) > mtd->size) {
dev_err(msm->pdev.dev,
"attempt to write beyond end of device\n");
return -EINVAL;
}
if (unlikely(ops->ooboffs >= maxooblen)) {
dev_err(msm->pdev.dev,
"attempt to start write outside oob\n");
return -EINVAL;
}
if (unlikely(to >= mtd->size ||
ops->ooboffs + ops->ooblen > ((mtd->size >> mtd->writesize_shift) -
(to >> mtd->writesize_shift)) * maxooblen)) {
dev_err(msm->pdev.dev,
"attempt to write beyond end of device\n");
return -EINVAL;
}
mutex_lock(&msm->lock);
ret = mtk_snand_mtd_write_data(msm, to, ops);
mutex_unlock(&msm->lock);
return ret;
}
static int mtk_snand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
{
struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
int ret;
mutex_lock(&msm->lock);
ret = mtk_snand_block_isbad(msm->snf, offs);
mutex_unlock(&msm->lock);
return ret;
}
static int mtk_snand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
{
struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
int ret;
mutex_lock(&msm->lock);
ret = mtk_snand_block_markbad(msm->snf, offs);
mutex_unlock(&msm->lock);
return ret;
}
static int mtk_snand_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobecc)
{
struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
if (section)
return -ERANGE;
oobecc->offset = msm->cinfo.fdm_size * msm->cinfo.num_sectors;
oobecc->length = mtd->oobsize - oobecc->offset;
return 0;
}
static int mtk_snand_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobfree)
{
struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
if (section >= msm->cinfo.num_sectors)
return -ERANGE;
oobfree->length = msm->cinfo.fdm_size - 1;
oobfree->offset = section * msm->cinfo.fdm_size + 1;
return 0;
}
static irqreturn_t mtk_snand_irq(int irq, void *id)
{
struct mtk_snand_mtd *msm = id;
int ret;
ret = mtk_snand_irq_process(msm->snf);
if (ret > 0)
return IRQ_HANDLED;
return IRQ_NONE;
}
static int mtk_snand_enable_clk(struct mtk_snand_mtd *msm)
{
int ret;
ret = clk_prepare_enable(msm->nfi_clk);
if (ret) {
dev_err(msm->pdev.dev, "unable to enable nfi clk\n");
return ret;
}
ret = clk_prepare_enable(msm->pad_clk);
if (ret) {
dev_err(msm->pdev.dev, "unable to enable pad clk\n");
clk_disable_unprepare(msm->nfi_clk);
return ret;
}
ret = clk_prepare_enable(msm->ecc_clk);
if (ret) {
dev_err(msm->pdev.dev, "unable to enable ecc clk\n");
clk_disable_unprepare(msm->nfi_clk);
clk_disable_unprepare(msm->pad_clk);
return ret;
}
return 0;
}
static void mtk_snand_disable_clk(struct mtk_snand_mtd *msm)
{
clk_disable_unprepare(msm->nfi_clk);
clk_disable_unprepare(msm->pad_clk);
clk_disable_unprepare(msm->ecc_clk);
}
static const struct mtd_ooblayout_ops mtk_snand_ooblayout = {
.ecc = mtk_snand_ooblayout_ecc,
.free = mtk_snand_ooblayout_free,
};
static struct mtk_snand_of_id mt7622_soc_id = { .soc = SNAND_SOC_MT7622 };
static struct mtk_snand_of_id mt7629_soc_id = { .soc = SNAND_SOC_MT7629 };
static const struct of_device_id mtk_snand_ids[] = {
{ .compatible = "mediatek,mt7622-snand", .data = &mt7622_soc_id },
{ .compatible = "mediatek,mt7629-snand", .data = &mt7629_soc_id },
{ },
};
MODULE_DEVICE_TABLE(of, mtk_snand_ids);
static int mtk_snand_probe(struct platform_device *pdev)
{
struct mtk_snand_platdata mtk_snand_pdata = {};
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *of_soc_id;
const struct mtk_snand_of_id *soc_id;
struct mtk_snand_mtd *msm;
struct mtd_info *mtd;
struct resource *r;
uint32_t size;
int ret;
of_soc_id = of_match_node(mtk_snand_ids, np);
if (!of_soc_id)
return -EINVAL;
soc_id = of_soc_id->data;
msm = devm_kzalloc(&pdev->dev, sizeof(*msm), GFP_KERNEL);
if (!msm)
return -ENOMEM;
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nfi");
msm->nfi_regs = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(msm->nfi_regs)) {
ret = PTR_ERR(msm->nfi_regs);
goto errout1;
}
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ecc");
msm->ecc_regs = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(msm->ecc_regs)) {
ret = PTR_ERR(msm->ecc_regs);
goto errout1;
}
msm->pdev.dev = &pdev->dev;
msm->quad_spi = of_property_read_bool(np, "mediatek,quad-spi");
msm->soc = soc_id->soc;
msm->nfi_clk = devm_clk_get(msm->pdev.dev, "nfi_clk");
if (IS_ERR(msm->nfi_clk)) {
ret = PTR_ERR(msm->nfi_clk);
dev_err(msm->pdev.dev, "unable to get nfi_clk, err = %d\n",
ret);
goto errout1;
}
msm->ecc_clk = devm_clk_get(msm->pdev.dev, "ecc_clk");
if (IS_ERR(msm->ecc_clk)) {
ret = PTR_ERR(msm->ecc_clk);
dev_err(msm->pdev.dev, "unable to get ecc_clk, err = %d\n",
ret);
goto errout1;
}
msm->pad_clk = devm_clk_get(msm->pdev.dev, "pad_clk");
if (IS_ERR(msm->pad_clk)) {
ret = PTR_ERR(msm->pad_clk);
dev_err(msm->pdev.dev, "unable to get pad_clk, err = %d\n",
ret);
goto errout1;
}
ret = mtk_snand_enable_clk(msm);
if (ret)
goto errout1;
/* Probe SPI-NAND Flash */
mtk_snand_pdata.soc = msm->soc;
mtk_snand_pdata.quad_spi = msm->quad_spi;
mtk_snand_pdata.nfi_base = msm->nfi_regs;
mtk_snand_pdata.ecc_base = msm->ecc_regs;
ret = mtk_snand_init(&msm->pdev, &mtk_snand_pdata, &msm->snf);
if (ret)
goto errout1;
msm->irq = platform_get_irq(pdev, 0);
if (msm->irq >= 0) {
ret = devm_request_irq(msm->pdev.dev, msm->irq, mtk_snand_irq,
0x0, "mtk-snand", msm);
if (ret) {
dev_err(msm->pdev.dev, "failed to request snfi irq\n");
goto errout2;
}
ret = dma_set_mask(msm->pdev.dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(msm->pdev.dev, "failed to set dma mask\n");
goto errout3;
}
}
mtk_snand_get_chip_info(msm->snf, &msm->cinfo);
size = msm->cinfo.pagesize + msm->cinfo.sparesize;
msm->page_cache = devm_kmalloc(msm->pdev.dev, size, GFP_KERNEL);
if (!msm->page_cache) {
dev_err(msm->pdev.dev, "failed to allocate page cache\n");
ret = -ENOMEM;
goto errout3;
}
mutex_init(&msm->lock);
dev_info(msm->pdev.dev,
"chip is %s, size %lluMB, page size %u, oob size %u\n",
msm->cinfo.model, msm->cinfo.chipsize >> 20,
msm->cinfo.pagesize, msm->cinfo.sparesize);
/* Initialize mtd for SPI-NAND */
mtd = &msm->mtd;
mtd->owner = THIS_MODULE;
mtd->dev.parent = &pdev->dev;
mtd->type = MTD_NANDFLASH;
mtd->flags = MTD_CAP_NANDFLASH;
mtd_set_of_node(mtd, np);
mtd->size = msm->cinfo.chipsize;
mtd->erasesize = msm->cinfo.blocksize;
mtd->writesize = msm->cinfo.pagesize;
mtd->writebufsize = mtd->writesize;
mtd->oobsize = msm->cinfo.sparesize;
mtd->oobavail = msm->cinfo.num_sectors * (msm->cinfo.fdm_size - 1);
mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
mtd->writesize_shift = ffs(mtd->writesize) - 1;
mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
mtd->ooblayout = &mtk_snand_ooblayout;
mtd->ecc_strength = msm->cinfo.ecc_strength;
mtd->bitflip_threshold = (mtd->ecc_strength * 3) / 4;
mtd->ecc_step_size = msm->cinfo.sector_size;
mtd->_erase = mtk_snand_mtd_erase;
mtd->_read_oob = mtk_snand_mtd_read_oob;
mtd->_write_oob = mtk_snand_mtd_write_oob;
mtd->_block_isbad = mtk_snand_mtd_block_isbad;
mtd->_block_markbad = mtk_snand_mtd_block_markbad;
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(msm->pdev.dev, "failed to register mtd partition\n");
goto errout4;
}
platform_set_drvdata(pdev, msm);
return 0;
errout4:
devm_kfree(msm->pdev.dev, msm->page_cache);
errout3:
if (msm->irq >= 0)
devm_free_irq(msm->pdev.dev, msm->irq, msm);
errout2:
mtk_snand_cleanup(msm->snf);
errout1:
devm_kfree(msm->pdev.dev, msm);
platform_set_drvdata(pdev, NULL);
return ret;
}
static int mtk_snand_remove(struct platform_device *pdev)
{
struct mtk_snand_mtd *msm = platform_get_drvdata(pdev);
struct mtd_info *mtd = &msm->mtd;
int ret;
ret = mtd_device_unregister(mtd);
if (ret)
return ret;
mtk_snand_cleanup(msm->snf);
if (msm->irq >= 0)
devm_free_irq(msm->pdev.dev, msm->irq, msm);
mtk_snand_disable_clk(msm);
devm_kfree(msm->pdev.dev, msm->page_cache);
devm_kfree(msm->pdev.dev, msm);
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver mtk_snand_driver = {
.probe = mtk_snand_probe,
.remove = mtk_snand_remove,
.driver = {
.name = "mtk-snand",
.of_match_table = mtk_snand_ids,
},
};
module_platform_driver(mtk_snand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Weijie Gao <weijie.gao@mediatek.com>");
MODULE_DESCRIPTION("MeidaTek SPI-NAND Flash Controller Driver");

View File

@ -0,0 +1,48 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
*
* Author: Weijie Gao <weijie.gao@mediatek.com>
*/
#include "mtk-snand-def.h"
int mtk_snand_log(struct mtk_snand_plat_dev *pdev,
enum mtk_snand_log_category cat, const char *fmt, ...)
{
const char *catname = "";
va_list ap;
char *msg;
switch (cat) {
case SNAND_LOG_NFI:
catname = "NFI";
break;
case SNAND_LOG_SNFI:
catname = "SNFI";
break;
case SNAND_LOG_ECC:
catname = "ECC";
break;
default:
break;
}
va_start(ap, fmt);
msg = kvasprintf(GFP_KERNEL, fmt, ap);
va_end(ap);
if (!msg) {
dev_warn(pdev->dev, "unable to print log\n");
return -1;
}
if (*catname)
dev_warn(pdev->dev, "%s: %s", catname, msg);
else
dev_warn(pdev->dev, "%s", msg);
kfree(msg);
return 0;
}

View File

@ -0,0 +1,127 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
*
* Author: Weijie Gao <weijie.gao@mediatek.com>
*/
#ifndef _MTK_SNAND_OS_H_
#define _MTK_SNAND_OS_H_
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/limits.h>
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/sizes.h>
#include <linux/iopoll.h>
#include <linux/hrtimer.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <asm/div64.h>
struct mtk_snand_plat_dev {
struct device *dev;
struct completion done;
};
/* Polling helpers */
#define read16_poll_timeout(addr, val, cond, sleep_us, timeout_us) \
readw_poll_timeout((addr), (val), (cond), (sleep_us), (timeout_us))
#define read32_poll_timeout(addr, val, cond, sleep_us, timeout_us) \
readl_poll_timeout((addr), (val), (cond), (sleep_us), (timeout_us))
/* Timer helpers */
#define mtk_snand_time_t ktime_t
static inline mtk_snand_time_t timer_get_ticks(void)
{
return ktime_get();
}
static inline mtk_snand_time_t timer_time_to_tick(uint32_t timeout_us)
{
return ktime_add_us(ktime_set(0, 0), timeout_us);
}
static inline bool timer_is_timeout(mtk_snand_time_t start_tick,
mtk_snand_time_t timeout_tick)
{
ktime_t tmo = ktime_add(start_tick, timeout_tick);
return ktime_compare(ktime_get(), tmo) > 0;
}
/* Memory helpers */
static inline void *generic_mem_alloc(struct mtk_snand_plat_dev *pdev,
size_t size)
{
return devm_kzalloc(pdev->dev, size, GFP_KERNEL);
}
static inline void generic_mem_free(struct mtk_snand_plat_dev *pdev, void *ptr)
{
devm_kfree(pdev->dev, ptr);
}
static inline void *dma_mem_alloc(struct mtk_snand_plat_dev *pdev, size_t size)
{
return kzalloc(size, GFP_KERNEL);
}
static inline void dma_mem_free(struct mtk_snand_plat_dev *pdev, void *ptr)
{
kfree(ptr);
}
static inline int dma_mem_map(struct mtk_snand_plat_dev *pdev, void *vaddr,
uintptr_t *dma_addr, size_t size, bool to_device)
{
dma_addr_t addr;
int ret;
addr = dma_map_single(pdev->dev, vaddr, size,
to_device ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
ret = dma_mapping_error(pdev->dev, addr);
if (ret)
return ret;
*dma_addr = (uintptr_t)addr;
return 0;
}
static inline void dma_mem_unmap(struct mtk_snand_plat_dev *pdev,
uintptr_t dma_addr, size_t size,
bool to_device)
{
dma_unmap_single(pdev->dev, dma_addr, size,
to_device ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
}
/* Interrupt helpers */
static inline void irq_completion_done(struct mtk_snand_plat_dev *pdev)
{
complete(&pdev->done);
}
static inline void irq_completion_init(struct mtk_snand_plat_dev *pdev)
{
init_completion(&pdev->done);
}
static inline int irq_completion_wait(struct mtk_snand_plat_dev *pdev,
void __iomem *reg, uint32_t bit,
uint32_t timeout_us)
{
int ret;
ret = wait_for_completion_timeout(&pdev->done,
usecs_to_jiffies(timeout_us));
if (!ret)
return -ETIMEDOUT;
return 0;
}
#endif /* _MTK_SNAND_OS_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,76 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
*
* Author: Weijie Gao <weijie.gao@mediatek.com>
*/
#ifndef _MTK_SNAND_H_
#define _MTK_SNAND_H_
#ifndef PRIVATE_MTK_SNAND_HEADER
#include <stddef.h>
#include <stdint.h>
#include <stdbool.h>
#endif
enum mtk_snand_soc {
SNAND_SOC_MT7622,
SNAND_SOC_MT7629,
__SNAND_SOC_MAX
};
struct mtk_snand_platdata {
void *nfi_base;
void *ecc_base;
enum mtk_snand_soc soc;
bool quad_spi;
};
struct mtk_snand_chip_info {
const char *model;
uint64_t chipsize;
uint32_t blocksize;
uint32_t pagesize;
uint32_t sparesize;
uint32_t spare_per_sector;
uint32_t fdm_size;
uint32_t fdm_ecc_size;
uint32_t num_sectors;
uint32_t sector_size;
uint32_t ecc_strength;
uint32_t ecc_bytes;
};
struct mtk_snand;
struct snand_flash_info;
int mtk_snand_init(void *dev, const struct mtk_snand_platdata *pdata,
struct mtk_snand **psnf);
int mtk_snand_cleanup(struct mtk_snand *snf);
int mtk_snand_chip_reset(struct mtk_snand *snf);
int mtk_snand_read_page(struct mtk_snand *snf, uint64_t addr, void *buf,
void *oob, bool raw);
int mtk_snand_write_page(struct mtk_snand *snf, uint64_t addr, const void *buf,
const void *oob, bool raw);
int mtk_snand_erase_block(struct mtk_snand *snf, uint64_t addr);
int mtk_snand_block_isbad(struct mtk_snand *snf, uint64_t addr);
int mtk_snand_block_markbad(struct mtk_snand *snf, uint64_t addr);
int mtk_snand_fill_oob(struct mtk_snand *snf, uint8_t *oobraw,
const uint8_t *oobbuf, size_t ooblen);
int mtk_snand_transfer_oob(struct mtk_snand *snf, uint8_t *oobbuf,
size_t ooblen, const uint8_t *oobraw);
int mtk_snand_read_page_auto_oob(struct mtk_snand *snf, uint64_t addr,
void *buf, void *oob, size_t ooblen,
size_t *actualooblen, bool raw);
int mtk_snand_write_page_auto_oob(struct mtk_snand *snf, uint64_t addr,
const void *buf, const void *oob,
size_t ooblen, size_t *actualooblen,
bool raw);
int mtk_snand_get_chip_info(struct mtk_snand *snf,
struct mtk_snand_chip_info *info);
int mtk_snand_irq_process(struct mtk_snand *snf);
#endif /* _MTK_SNAND_H_ */

View File

@ -0,0 +1,21 @@
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -238,6 +238,8 @@ source "drivers/mtd/ubi/Kconfig"
source "drivers/mtd/hyperbus/Kconfig"
+source "drivers/mtd/mtk-snand/Kconfig"
+
source "drivers/mtd/composite/Kconfig"
endif # MTD
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -34,5 +34,7 @@ obj-$(CONFIG_MTD_SPI_NOR) += spi-nor/
obj-$(CONFIG_MTD_UBI) += ubi/
obj-$(CONFIG_MTD_HYPERBUS) += hyperbus/
+obj-$(CONFIG_MTK_SPI_NAND) += mtk-snand/
+
# Composite drivers must be loaded last
obj-y += composite/