--- zzzz-none-000/linux-4.4.60/drivers/mtd/devices/m25p80.c 2017-04-08 07:53:53.000000000 +0000 +++ wasp-540e-714/linux-4.4.60/drivers/mtd/devices/m25p80.c 2019-07-03 09:21:34.000000000 +0000 @@ -34,6 +34,391 @@ u8 command[MAX_CMD_SIZE]; }; +#if defined(CONFIG_TFFS_DEV_LEGACY) + +#include +#include +#include +#include +#include +#include "../../spi/spi_qup.h" +#include "../../char/tffs/linux_tffs.h" + +#define OPCODE_RDSR 0x05 /* Read status register */ +#define OPCODE_RDID 0x9f /* Read JEDEC ID */ +#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */ + +bool use_spi_sync = false; + +static inline struct m25p *mtd_to_m25p(struct mtd_info *mtd) +{ + return container_of(mtd, struct m25p, spi_nor.mtd); +} + +/* + * Functions for directly accessing SPI + */ +static inline bool spi_qup_is_valid_state(struct spi_qup *dd) +{ + u32 spi_op = readl(dd->base + SPI_STATE); + + return spi_op & SPI_OP_STATE_VALID; +} + +static inline int spi_qup_wait_valid(struct spi_qup *dd) +{ + unsigned long delay; + + /* + * For small delay values, the default timeout would + * be one jiffy + */ + delay = SPI_DELAY_THRESHOLD; + /* Use some random high number here that just works. */ + if (delay < 16) { + delay = 166; + } + + while (!spi_qup_is_valid_state(dd) && --delay) { + mdelay(1); + } + + if (!spi_qup_is_valid_state(dd)) { + dev_err(dd->dev, "%s: SPI operational state" + " not valid\n", __func__); + return -ETIMEDOUT; + } + + return 0; +} + +static inline int spi_qup_get_state(struct spi_qup *dd) { + if (spi_qup_wait_valid(dd)) { + pr_err("[%s] spi_qup_wait_valid failed\n", __func__); + return -EIO; + } + + return (readl(dd->base + SPI_STATE) & SPI_OP_STATE); +} + +static inline int spi_qup_set_state(struct spi_qup *dd, + enum spi_qup_state state) +{ + enum spi_qup_state cur_state; + int tmp; + + if (spi_qup_wait_valid(dd)) { + return -EIO; + } + + tmp = readl(dd->base + SPI_STATE); + cur_state = tmp & SPI_OP_STATE; + + /* Per spec: + For PAUSE_STATE to RESET_STATE, two writes of (10) are required */ + if ((cur_state == SPI_OP_STATE_PAUSE) && (state == SPI_OP_STATE_RESET)) { + writel(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE); + writel(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE); + } else { + writel((tmp & ~SPI_OP_STATE) | state, + dd->base + SPI_STATE); + } + + if (spi_qup_wait_valid(dd)) { + return -EIO; + } + + return 0; +} + +static inline int spi_qup_stop(struct spi_device *spi) { + struct spi_qup *dd = spi_master_get_devdata(spi->master); + int retval; + // Wait until done sending (should be done here anyways...) + retval = readl(dd->base + SPI_OPERATIONAL); + while (retval & SPI_OP_OP_FIFO_NOT_EMPTY) { + retval = readl(dd->base + SPI_OPERATIONAL); + } + + // Force CS inactive + retval = readl(dd->base + SPI_IO_CONTROL); + writel(retval & ~SPI_IO_C_FORCE_CS, dd->base + SPI_IO_CONTROL); + if (gpio_is_valid(spi->cs_gpio)) { + gpio_set_value(spi->cs_gpio, true); + } + + retval = spi_qup_set_state(dd, SPI_OP_STATE_RESET); + if (retval) { + dev_err(dd->dev, + "[%s] Error setting QUP to reset-state\n", + __func__); + } + + return retval; +} + +static inline int spi_qup_start(struct spi_device *spi) { + struct spi_qup *dd = spi_master_get_devdata(spi->master); + int retval; + int tmp; + + spi_qup_stop(spi); + + retval = spi_qup_prepare_for_write(dd); + if (retval) { + dev_err(dd->dev, "[%s] Prepare_for_write failed\n", __func__); + } + + // Force CS active + tmp = readl(dd->base + SPI_IO_CONTROL); + writel(tmp | SPI_IO_C_FORCE_CS, dd->base + SPI_IO_CONTROL); + + if (gpio_is_valid(spi->cs_gpio)) { + gpio_set_value(spi->cs_gpio, false); + } + + return retval; +} + +static int spi_rw_sync(struct spi_device *spi, const void *txbuf, + void *rxbuf, unsigned buf_size) { + struct spi_qup *dd = spi_master_get_devdata(spi->master); + + if (!buf_size) { + return 0; + } + + dd->tx_buf = txbuf; + dd->tx_bytes = buf_size; + dd->rx_buf = rxbuf; + dd->rx_bytes = buf_size; + + if (spi_qup_get_state(dd) != SPI_OP_STATE_RUN) { + spi_qup_start(spi); + } + + writel(dd->tx_buf?((unsigned int)*(dd->tx_buf++) << 0x18):0, dd->base + SPI_OUTPUT_FIFO); + dd->tx_bytes--; + + spi_qup_set_state(dd, SPI_OP_STATE_RUN); + while (dd->rx_bytes) { + uint32_t op = readl(dd->base + SPI_OPERATIONAL); + + if ((!(op & SPI_OP_OUTPUT_FIFO_FULL)) && dd->tx_bytes) { + writel(dd->tx_buf?((unsigned int)*(dd->tx_buf++) << 0x18):0, dd->base + SPI_OUTPUT_FIFO); + dd->tx_bytes--; + } + + if (op & SPI_OP_IP_FIFO_NOT_EMPTY) { + volatile u32 tmp_r = readl(dd->base + SPI_INPUT_FIFO); + if (dd->rx_buf) *(dd->rx_buf++) = tmp_r; + dd->rx_bytes--; + } + } + + return 0; +} + +static int spi_write_then_read_sync(struct spi_device *spi, + const void *txbuf, unsigned n_tx, void *rxbuf, + unsigned n_rx) { + int retval; + + retval = spi_rw_sync(spi, txbuf, NULL, n_tx); + if (retval) return retval; + return spi_rw_sync(spi, NULL, rxbuf, n_rx); +} + +static int spi_write_sync(struct spi_device *spi, + const void *buf, size_t len) { + return spi_rw_sync(spi, buf, NULL, len); +} + +static int wait_till_ready(struct m25p *flash); +/*--- TFFS stuff ---*/ + +static DEFINE_SPINLOCK(panic_locking); + +static inline void panic_lock(unsigned long *flags) +{ + spin_lock_irqsave(&panic_locking, *flags); +} + +static inline void panic_unlock(unsigned long flags) +{ + spin_unlock_irqrestore(&panic_locking, flags); +} + +static int read_sr(struct m25p *flash) +{ + ssize_t retval; + u8 code = OPCODE_RDSR; + u8 val; + + retval = spi_write_then_read_sync(flash->spi, + &code, 1, &val, 1); + spi_qup_stop(flash->spi); + + return val; +} + +static int wait_till_ready(struct m25p *flash) +{ + unsigned long deadline; + int sr; + unsigned long delay_cnt = MAX_READY_WAIT_JIFFIES << 2; + + deadline = jiffies + MAX_READY_WAIT_JIFFIES; + // Just wait some time... + do { + if ((sr = read_sr(flash)) < 0) + break; + else if (!(sr & SR_WIP)) + return 0; + } while(--delay_cnt); + pr_err("SPI: Flash not ready!\n"); + return 1; +} + +static struct mtd_info *panic_reinit(struct mtd_info *mtd) { + unsigned long flags; + struct m25p *flash = mtd_to_m25p(mtd); + struct spi_device *spi = flash->spi; + struct spi_qup *dd = spi_master_get_devdata(spi->master); + int retval; + u8 code = OPCODE_RDID; + u8 id[3]; + + pm_runtime_irq_safe(dd->dev); + pm_runtime_get_sync(dd->dev); + clk_set_rate(dd->cclk, spi->max_speed_hz); + +// TODO: Expand this driver to remember incomplete write operations and complete them here to increase data consistency. + pr_err("[%s] called for mtd %s\n", __func__, mtd->name); + panic_lock(&flags); + // Disable IRQs + writel(QUP_OP_MASK_OUTPUT_SERVICE_FLAG | QUP_OP_MASK_INPUT_SERVICE_FLAG, + dd->base + QUP_OPERATIONAL_MASK); + disable_irq(dd->irq); + use_spi_sync = 1; + // Reset QUP core + writel(1, dd->base + SPI_SW_RESET); + // Wait for valid state + spi_qup_wait_valid(dd); + // Disable IRQs + writel(QUP_OP_MASK_OUTPUT_SERVICE_FLAG | QUP_OP_MASK_INPUT_SERVICE_FLAG, + dd->base + QUP_OPERATIONAL_MASK); + // Setup SPI mini core: + // N=7 (8 bit); SPI core; + writel(7 | (1 << 8), dd->base + QUP_CONFIG); + // Setup I/O modes register: FIFO for input and output (0) - a5 schreibt eigentlich in RO-Bereich... + writel(0, dd->base + SPI_IO_MODES); + // Disable error flags. Don't need them when working synchronously. + writel(0, dd->base + SPI_ERROR_FLAGS_EN); + // Leave all MX_COUNT registers in reset state (0) + // SPI configuration: Master, not HighSpeed ... + writel((spi->mode & SPI_CPHA)?0:SPI_CFG_INPUT_FIRST, dd->base + SPI_CONFIG); + // SPI I/O control: set CPOL, select cs line, (force chip select) + // set CS polarity (active LOW = 0), clock polarity... + writel(SPI_IO_C_NO_TRI_STATE | ((spi->mode & SPI_CPOL)?SPI_IO_C_CLK_IDLE_HIGH:0) | + (spi->chip_select << 2) | /* SPI_IO_C_FORCE_CS | */ SPI_IO_C_MX_CS_MODE + , dd->base + SPI_IO_CONTROL); + writel(0, dd->base + QUP_ERROR_FLAGS_EN); + + retval = wait_till_ready(flash); + if (retval) { + pr_err("[%s] Failed to read flash status (2) -- giving up.\n", __func__); + return NULL; + } + + // read ID + if (spi_write_then_read_sync(spi, &code, 1, id, 3)) { + spi_qup_stop(spi); + pr_err("[%s] Failed to read flash jedec ID\n", __func__); + panic_unlock(flags); + return NULL; + } + spi_qup_stop(spi); + retval = 0; + retval |= id[0]; + retval <<= 8; + retval |= id[1]; + retval <<= 8; + retval |= id[2]; + pr_err("%x\n", retval); +#if 0 + pr_err("%x >< %x\n", re) + if (info->jedec_id != retval) + pr_warn("[%s] Flash jedec ID mismatch (read: 0x%08x, expected: 0x%08x)\n", + __func__, retval, info->jedec_id); +#endif + panic_unlock(flags); + pr_info("[%s] ok; sync_spi = %x\n", __func__, use_spi_sync); + return mtd; + +} + +static int avm_spi_write_then_read(struct spi_device *spi, const void *txbuf, unsigned n_tx, void *rxbuf, unsigned n_rx) +{ + int ret; + + if (unlikely(use_spi_sync)) { + ret = spi_write_then_read_sync(spi, txbuf, n_tx, rxbuf, n_rx); + spi_qup_stop(spi); + } + else { + ret = spi_write_then_read(spi, txbuf, n_tx, rxbuf, n_rx); + } + + return ret; +} + +static int avm_spi_write(struct spi_device *spi, const void *txbuf, unsigned n_tx) { + int ret; + + if (unlikely(use_spi_sync)) { + ret = spi_write_sync(spi, txbuf, n_tx); + spi_qup_stop(spi); + } + else { + ret = spi_write(spi, txbuf, n_tx); + } + + return ret; +} + +static int avm_spi_sync(struct spi_device *spi, struct spi_message *msg) { + int ret; + struct spi_transfer *t; + unsigned len = 0; + + if (unlikely(use_spi_sync)) { + list_for_each_entry(t, &msg->transfers, transfer_list) { + ret = spi_rw_sync(spi, t->tx_buf, t->rx_buf, t->len); + + if (ret < 0) { + break; + } + + len += t->len; + } + + msg->actual_length = len; + + spi_qup_stop(spi); + } + else { + ret = spi_sync(spi, msg); + } + + return ret; +} + +#define spi_write_then_read avm_spi_write_then_read +#define spi_write avm_spi_write +#define spi_sync avm_spi_sync +#endif + static int m25p80_read_reg(struct spi_nor *nor, u8 code, u8 *val, int len) { struct m25p *flash = nor->priv; @@ -137,10 +522,15 @@ flash->command[0] = nor->read_opcode; m25p_addr2cmd(nor, from, flash->command); + if (dummy == 1) + t[0].dummy = true; + + t[0].type = SPI_TRANSFER_FLASH_READ_CMD; t[0].tx_buf = flash->command; t[0].len = m25p_cmdsz(nor) + dummy; spi_message_add_tail(&t[0], &m); + t[1].type = SPI_TRANSFER_FLASH_READ_DATA; t[1].rx_buf = buf; t[1].rx_nbits = m25p80_rx_nbits(nor); t[1].len = len; @@ -227,9 +617,17 @@ if (ret) return ret; + memset(&ppdata, '\0', sizeof(ppdata)); ppdata.of_node = spi->dev.of_node; - return mtd_device_parse_register(&nor->mtd, NULL, &ppdata, +// AVM +#if defined(CONFIG_TFFS_DEV_LEGACY) + TFFS3_Register_Panic_CB(&flash->spi_nor.mtd, panic_reinit); +#endif + + return mtd_device_parse_register(&nor->mtd, + data ? data->part_probes : NULL, + &ppdata, data ? data->parts : NULL, data ? data->nr_parts : 0); } @@ -243,6 +641,14 @@ return mtd_device_unregister(&flash->spi_nor.mtd); } +static void m25p_shutdown(struct spi_device *spi) +{ + struct m25p *flash = spi_get_drvdata(spi); + + /* Ensure no pending flash operation in progress */ + spi_nor_wait_till_ready(&flash->spi_nor); +} + /* * Do NOT add to this array without reading the following: * @@ -306,6 +712,7 @@ .id_table = m25p_ids, .probe = m25p_probe, .remove = m25p_remove, + .shutdown = m25p_shutdown, /* REVISIT: many of these chips have deep power-down modes, which * should clearly be entered on suspend() to minimize power use.