/* * ath_avmnand.c * * Created on: 06.08.2014 * Author: hschillert */ /*------------------------------------------------------------------------------------------*\ * * Copyright (C) 2004-2014 AVM GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA \*------------------------------------------------------------------------------------------*/ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define DRV_NAME "ath-avmnand" #define DRV_VERSION "1.0" #define DRV_AUTHOR "AVM/Atheros" #define DRV_DESC "Atheros on-chip NAND FLash Controller Driver" #define ATH_NF_TIMING_ASYN 0x11 #define ATH_NF_STATUS_OK 0x40 //0xc0 #define ATH_NF_RD_STATUS_MASK 0x47 //0xc7 #define ATH_NF_STATUS_RETRY 1000 #define ATH_NAND_BLK_DONT_KNOW 0x0 #define ATH_NAND_BLK_GOOD 0x1 #define ATH_NAND_BLK_BAD 0x2 #define ATH_NAND_BLK_ERASED 0x3 /* * Note: The byte positions might not match the spec. * It is to handle the endianness issues. */ #define ONFI_NUM_ADDR_CYCLES 102 /* see note */ #define ONFI_DEV_DESC 32 #define ONFI_DEV_DESC_SZ 32 #define ONFI_PAGE_SIZE 80 #define ONFI_SPARE_SIZE 86 /* see note */ #define ONFI_PAGES_PER_BLOCK 92 #define ONFI_BLOCKS_PER_LUN 96 #define ONFI_NUM_LUNS 103 /* see note */ #define ONFI_RD_PARAM_PAGE_SZ 128 #define READ_PARAM_STATUS_OK 0x40 #define READ_PARAM_STATUS_MASK 0x41 #define ATH_USE_IRQ /*--- #define ATH_NAND_IO_DBG ---*/ /*--- #define ATH_NAND_OOB_DBG ---*/ #if defined(ATH_NAND_IO_DBG) # define iodbg(fmt, arg...) printk(KERN_ERR fmt, ##arg); #else # define iodbg(...) #endif #if defined(ATH_NAND_OOB_DBG) # define oobdbg printk #else # define oobdbg(...) #endif /* * Data structures for ath nand flash controller driver */ typedef union { uint8_t byte_id[8]; struct { uint8_t sa1 : 1, // Serial access time (bit 1) org : 1, // Organisation bs : 2, // Block size sa0 : 1, // Serial access time (bit 0) ss : 1, // Spare size per 512 bytes ps : 2, // Page Size wc : 1, // Write Cache ilp : 1, // Interleaved Programming nsp : 2, // No. of simult prog pages ct : 2, // Cell type dp : 2, // Die/Package did, // Device id vid, // Vendor id res1 : 2, // Reserved pls : 2, // Plane size pn : 2, // Plane number res2 : 2; // Reserved } __details; } ath_nand_id_t; uint64_t ath_plane_size[] = { 64 << 20, 1 << 30, 2 << 30, 4 << 30, 8 << 30 }; typedef struct { uint8_t vid, did, b3, addrcyc, small, spare; // for small block; uint16_t pgsz; // for small block uint32_t blk; // for small block } ath_nand_vend_data_t; ath_nand_vend_data_t ath_nand_arr[] = { { 0x20, 0xda, 0x10, 5, }, // NU2g3B2D { 0x20, 0xf1, 0x00, 4, }, // NU1g3B2C { 0x20, 0xdc, 0x10, 5, }, // NU4g3B2D { 0x20, 0xd3, 0x10, 5, }, // NU8g3F2A { 0x20, 0xd3, 0x14, 5, }, // NU8g3C2B { 0xad, 0xf1, 0x00, 4, }, // HY1g2b { 0xad, 0xda, 0x10, 5, }, // HY2g2b { 0xec, 0xf1, 0x00, 4, }, // Samsung 3,3V 8-bit [128MB] { 0x98, 0xd1, 0x90, 4, }, // Toshiba { 0x98, 0xf1, 0x80, 4, }, // Toshiba TC58BVG0S3HTA { 0xad, 0x76, 0xad, 5, 1, 16, 512, 16 << 10 }, // Hynix 64MB NAND Flash { 0xad, 0x36, 0xad, 5, 1, 16, 512, 16 << 10 }, // Hynix 64MB NAND Flash { 0x20, 0x76, 0x20, 5, 1, 16, 512, 16 << 10 }, // ST Micro 64MB NAND Flash }; #define NUM_ARRAY_ENTRIES(a) (sizeof((a)) / sizeof((a)[0])) /* ath nand info */ typedef struct { struct mtd_info mtd; /* mtd info */ unsigned int *bbt; ath_nand_vend_data_t *entry; ath_nand_id_t __id; // for readid uint8_t onfi[ONFI_RD_PARAM_PAGE_SZ]; unsigned char *tmpbuffer; wait_queue_head_t wait_q; volatile unsigned long wait_flag; } ath_nand_sc_t; #define NAND_WAIT_EVENT 1 /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static ath_nand_sc_t *ath_nand_sc; static DEFINE_SEMAPHORE(ath_nand_sem); #define nid __id.__details #define bid __id.byte_id static int ath_nand_block_isbad(struct mtd_info *mtd, loff_t ofs); #if defined(CONFIG_MTD_PARTITIONS) && defined(CONFIG_MTD_CMDLINE_PARTS) static const char *part_probes[] __initdata = { "cmdlinepart", "RedBoot", NULL }; #endif /*--- #if defined(CONFIG_MTD_PARTITIONS) && defined(CONFIG_MTD_CMDLINE_PARTS); ---*/ /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static int ath_nand_probe(struct platform_device *dev); static int ath_nand_remove(struct platform_device *dev); static struct platform_driver ath_avmnand_driver = { .probe = ath_nand_probe, .remove = ath_nand_remove, .driver = { .name = DRV_NAME, }, }; /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ #define bbt_index (sizeof(*sc->bbt) * 8 / 2) inline unsigned ath_nand_get_blk_state(struct mtd_info *mtd, loff_t b) { unsigned x, y; ath_nand_sc_t *sc = mtd->priv; if (!sc->bbt) return ATH_NAND_BLK_DONT_KNOW; b = b >> mtd->erasesize_shift; x = b / bbt_index; y = b % bbt_index; return (sc->bbt[x] >> (y * 2)) & 0x3; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ inline void ath_nand_set_blk_state(struct mtd_info *mtd, loff_t b, unsigned state) { unsigned x, y; ath_nand_sc_t *sc = mtd->priv; if (!sc->bbt) return; b = b >> mtd->erasesize_shift; x = b / bbt_index; y = b % bbt_index; sc->bbt[x] = (sc->bbt[x] & ~(3 << (y * 2))) | (state << (y * 2)); } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ #if defined(ATH_USE_IRQ) static irqreturn_t ath_nand_intr_handler(int _irq_ __attribute__((unused)), void *dev) { struct ath_nand *const Nand = (struct ath_nand *)&(*(volatile unsigned int *)(KSEG1ADDR(ATH_NAND_FLASH_BASE))); ath_nand_sc_t *ath_nand = (ath_nand_sc_t *)dev; unsigned int isr, imr, irq; isr = Nand->irq_status.Register; imr = Nand->irq_mask.Register; irq = isr & imr; if (irq & ATH_NF_PROT_INT) { printk(KERN_ERR "{%s} ATH_NF_PROT_INT irq 0x%x imr 0x%x\n", __func__, isr, imr); } else if (irq & ATH_NF_CMD_END_INT) { Nand->irq_mask.Bits.cmd_end = 0; Nand->irq_status.Bits.cmd_end = 0; set_bit(NAND_WAIT_EVENT, &ath_nand->wait_flag); wake_up(&ath_nand->wait_q); } else if (irq & ATH_NF_ECC_FATAL_ERR) { } else if (irq & ATH_NF_ECC_TRSH_ERR) { } else if (irq & ATH_NF_MEM0_RDY) { /*--- handle all MEM_RDY - irqs ---*/ Nand->irq_mask.Bits.mem0_rdy = 0; Nand->irq_status.Bits.mem0_rdy = 0; set_bit(NAND_WAIT_EVENT, &ath_nand->wait_flag); wake_up(&ath_nand->wait_q); } else if (irq & ATH_NF_FIFO_ERR) { printk(KERN_ERR "{%s} ATH_NF_FIFO_ERR irq 0x%x imr 0x%x\n", __func__, isr, imr); } return IRQ_HANDLED; } #endif /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static int ath_get_device(struct mtd_info *mtd, int new_state) { struct nand_chip *chip = mtd->priv; spinlock_t *lock = &chip->controller->lock; wait_queue_head_t *wq = &chip->controller->wq; DECLARE_WAITQUEUE(wait, current); retry: spin_lock(lock); /* Hardware controller shared among independent devices */ if (!chip->controller->active) chip->controller->active = chip; if (chip->controller->active == chip && chip->state == FL_READY) { chip->state = new_state; spin_unlock(lock); return 0; } set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(wq, &wait); spin_unlock(lock); schedule(); remove_wait_queue(wq, &wait); goto retry; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static void ath_release_device(struct mtd_info *mtd) { struct nand_chip *chip = mtd->priv; /* De-select the NAND device */ /*--- chip->select_chip(mtd, -1); ---*/ /* Release the controller and the chip */ spin_lock(&chip->controller->lock); chip->controller->active = NULL; chip->state = FL_READY; wake_up(&chip->controller->wq); spin_unlock(&chip->controller->lock); } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static unsigned int ath_nand_status(struct nand_chip *chip) { struct ath_nand *const Nand = (struct ath_nand *)&(*(volatile unsigned int *)(KSEG1ADDR(ATH_NAND_FLASH_BASE))); Nand->cmd.Register = (NAND_CMD_STATUS << 8) | ATH_NF_COMMAND_CMD_SEQ_4; /*--- Read Status ---*/ while (Nand->status.Bits.ctrl) ; /*--- wait for cmd ---*/ while ( ! Nand->status.Bits.mem0) ; /*--- wait for busy on mem0 ---*/ return Nand->read_status.Bits.status & 0xFF; } /*------------------------------------------------------------------------------------------*\ * special for TOSHIBA BENAND \*------------------------------------------------------------------------------------------*/ static unsigned int ath_nand_eccstatus(struct nand_chip *chip) { struct ath_nand *const Nand = (struct ath_nand *)&(*(volatile unsigned int *)(KSEG1ADDR(ATH_NAND_FLASH_BASE))); unsigned int eccstatus, corrected_bits = 0; unsigned int i; Nand->cmd.Register = (NAND_CMD_ECCSTATUS << 8) | ATH_NF_COMMAND_CMD_SEQ_4; /*--- Read Status ---*/ while (Nand->status.Bits.ctrl) ; /*--- wait for cmd ---*/ while ( ! Nand->status.Bits.mem0) ; /*--- wait for busy on mem0 ---*/ for (i = 0; i < 4; i++) { eccstatus = Nand->read_status.Bits.status & 0xFF; if (eccstatus) { printk(KERN_ERR "[%s] %d Bit-Errors Block %d\n", __func__, (eccstatus >> 4) + 1, eccstatus & 0xF); corrected_bits += eccstatus & 0xF; } } return corrected_bits; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static void ath_nand_command(enum ath_nand_cmd cmd, int page, unsigned char *buffer, unsigned int len, ath_nand_sc_t *sc) { struct ath_nand *const Nand = (struct ath_nand *)&(*(volatile unsigned int *)(KSEG1ADDR(ATH_NAND_FLASH_BASE))); unsigned int dma_ctrl = 0; Nand->irq_status.Register = 0; /*--- clear irq-Status ---*/ Nand->addr0_0.Register = (page << 16); if (page>>16) printk("[%s] ERROR: nur 4 Addresszyklen\n", __func__); Nand->addr1_0.Register = (page >> 16) & 0xF; Nand->pagesize.Register = Nand->dma_cnt.Register = len; wmb(); #if 0 if ((cmd > 3) && (cmd != 9)) printk(KERN_ERR "{%s} cmd %d page 0x%x len %d\n", __func__, cmd, page, len); #endif switch (cmd) { case ATH_NAND_CMD_READID: #if defined(ATH_USE_IRQ) Nand->irq_mask.Bits.mem0_rdy = 1; #endif dma_ctrl = ATH_NF_DMA_CTRL_DMA_START | ATH_NF_DMA_CTRL_DMA_DIR_READ | ATH_NF_DMA_CTRL_DMA_BURST_3; cmd = (NAND_CMD_READID << 8) | ATH_NAND_INPUT_DMA | ATH_NF_COMMAND_CMD_SEQ_1; break; case ATH_NAND_CMD_READ: /*--- lesen einer kompletten Page ---*/ #if defined(ATH_USE_IRQ) Nand->irq_mask.Bits.mem0_rdy = 1; #endif dma_ctrl = ATH_NF_DMA_CTRL_DMA_START | ATH_NF_DMA_CTRL_DMA_DIR_READ | ATH_NF_DMA_CTRL_DMA_BURST_3; cmd = (NAND_CMD_READSTART << 16) | (NAND_CMD_READ0 << 8) | ATH_NAND_INPUT_DMA | ATH_NF_COMMAND_CMD_SEQ_10; break; case ATH_NAND_CMD_READ_INIT: /*--- Sequenz zum Lesen der Page, ohne die Daten ---*/ #if defined(ATH_USE_IRQ) Nand->irq_mask.Bits.mem0_rdy = 1; #endif dma_ctrl = ATH_NF_DMA_CTRL_DMA_START | ATH_NF_DMA_CTRL_DMA_DIR_READ | ATH_NF_DMA_CTRL_DMA_BURST_3; cmd = (NAND_CMD_READSTART << 16) | (NAND_CMD_READ0 << 8) | ATH_NAND_INPUT_DMA | ATH_NF_COMMAND_CMD_SEQ_9; break; case ATH_NAND_CMD_READ_START: #if defined(ATH_USE_IRQ) Nand->irq_mask.Bits.mem0_rdy = 1; #endif dma_ctrl = ATH_NF_DMA_CTRL_DMA_START | ATH_NF_DMA_CTRL_DMA_DIR_READ | ATH_NF_DMA_CTRL_DMA_BURST_3; cmd = (NAND_CMD_READ0 << 8) | ATH_NAND_INPUT_DMA | ATH_NF_COMMAND_CMD_SEQ_11; break; case ATH_NAND_CMD_READ_COLUMN: #if defined(ATH_USE_IRQ) Nand->irq_mask.Bits.mem0_rdy = 1; #endif Nand->addr0_0.Register = page; dma_ctrl = ATH_NF_DMA_CTRL_DMA_START | ATH_NF_DMA_CTRL_DMA_DIR_READ | ATH_NF_DMA_CTRL_DMA_BURST_3; cmd = (NAND_CMD_RNDOUTSTART << 16) | (NAND_CMD_RNDOUT << 8) | ATH_NAND_INPUT_DMA | ATH_NF_COMMAND_CMD_SEQ_6; break; case ATH_NAND_CMD_PROGRAM: if (len > sc->mtd.writesize) Nand->ctrl.Bits.spare_en = 1; #if defined(ATH_USE_IRQ) Nand->irq_mask.Bits.cmd_end = 1; #endif dma_ctrl = ATH_NF_DMA_CTRL_DMA_START | ATH_NF_DMA_CTRL_DMA_DIR_WRITE | ATH_NF_DMA_CTRL_DMA_BURST_3; cmd = (NAND_CMD_PAGEPROG << 16) | (NAND_CMD_SEQIN << 8) | ATH_NAND_INPUT_DMA | ATH_NF_COMMAND_CMD_SEQ_12; break; case ATH_NAND_CMD_PROGRAM_INIT: #if defined(ATH_USE_IRQ) Nand->irq_mask.Bits.cmd_end = 1; #endif dma_ctrl = ATH_NF_DMA_CTRL_DMA_START | ATH_NF_DMA_CTRL_DMA_DIR_WRITE | ATH_NF_DMA_CTRL_DMA_BURST_3; cmd = (NAND_CMD_SEQIN << 8) | ATH_NAND_INPUT_DMA | ATH_NF_COMMAND_CMD_SEQ_13; break; case ATH_NAND_CMD_PROGRAM_COLUMN: #if defined(ATH_USE_IRQ) Nand->irq_mask.Bits.cmd_end = 1; #endif Nand->addr0_0.Register = page; dma_ctrl = ATH_NF_DMA_CTRL_DMA_START | ATH_NF_DMA_CTRL_DMA_DIR_WRITE | ATH_NF_DMA_CTRL_DMA_BURST_3; cmd = (NAND_CMD_RNDIN << 8) | ATH_NAND_INPUT_DMA | ATH_NF_COMMAND_CMD_SEQ_8; break; case ATH_NAND_CMD_PROGRAM_START: #if defined(ATH_USE_IRQ) Nand->irq_mask.Bits.cmd_end = 1; #endif cmd = (NAND_CMD_PAGEPROG << 8) | ATH_NAND_INPUT_DMA | ATH_NF_COMMAND_CMD_SEQ_0; break; case ATH_NAND_CMD_PROGRAMOOB: Nand->addr0_0.Register |= sc->mtd.writesize; #if defined(ATH_USE_IRQ) Nand->irq_mask.Bits.mem0_rdy = 1; #endif dma_ctrl = ATH_NF_DMA_CTRL_DMA_START | ATH_NF_DMA_CTRL_DMA_DIR_WRITE | ATH_NF_DMA_CTRL_DMA_BURST_3; cmd = (NAND_CMD_PAGEPROG << 16) | (NAND_CMD_SEQIN << 8) | ATH_NAND_INPUT_DMA | ATH_NF_COMMAND_CMD_SEQ_12; break; case ATH_NAND_CMD_ERASE: #if defined(ATH_USE_IRQ) Nand->irq_mask.Bits.cmd_end = 1; #endif cmd = (NAND_CMD_ERASE2 << 16) | (NAND_CMD_ERASE1 << 8) | ATH_NF_COMMAND_CMD_SEQ_14; break; case ATH_NAND_CMD_READOOB: Nand->addr0_0.Register |= sc->mtd.writesize; #if defined(ATH_USE_IRQ) Nand->irq_mask.Bits.mem0_rdy = 1; #endif dma_ctrl = ATH_NF_DMA_CTRL_DMA_START | ATH_NF_DMA_CTRL_DMA_DIR_READ | ATH_NF_DMA_CTRL_DMA_BURST_3; cmd = (NAND_CMD_READSTART << 16) | (NAND_CMD_READ0 << 8) | ATH_NAND_INPUT_DMA | ATH_NF_COMMAND_CMD_SEQ_10; break; case ATH_NAND_CMD_READONFI: #if defined(ATH_USE_IRQ) Nand->irq_mask.Bits.mem0_rdy = 1; #endif dma_ctrl = ATH_NF_DMA_CTRL_DMA_START | ATH_NF_DMA_CTRL_DMA_DIR_READ | ATH_NF_DMA_CTRL_DMA_BURST_3; cmd = (0xEC << 8) | ATH_NAND_INPUT_DMA | ATH_NF_COMMAND_CMD_SEQ_2; break; } if (dma_ctrl) { Nand->dma_addr = (unsigned int)buffer; Nand->dma_ctrl.Register = dma_ctrl; } wmb(); Nand->cmd.Register = cmd; wmb(); #if defined(ATH_USE_IRQ) wait_event(sc->wait_q, test_and_clear_bit(NAND_WAIT_EVENT, &sc->wait_flag)); while ( ! Nand->dma_ctrl.Bits.ready) ; return; #else while (Nand->status.Bits.ctrl) ; /*--- wait for cmd ---*/ while ( ! Nand->status.Bits.mem0) ; while ( ! Nand->dma_ctrl.Bits.ready) ; return; #endif } /*------------------------------------------------------------------------------------------*\ * we read a page from Toshiba BENAND \*------------------------------------------------------------------------------------------*/ static int ath_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int oob_required, int page) { ath_nand_sc_t *sc = (ath_nand_sc_t *)chip->priv; uint8_t *pa; unsigned int status, corrected_bits; pa = (uint8_t *)dma_map_single(NULL, buf, mtd->writesize, DMA_FROM_DEVICE); ath_nand_command(ATH_NAND_CMD_READ_INIT, page, pa, mtd->writesize, sc); status = ath_nand_status(chip); if (status & NAND_STATUS_FAIL) { mtd->ecc_stats.failed++; printk("[%s] ERROR: ECC failed page 0x%08x\n", __func__, page); return -EIO; } if (status & NAND_STATUS_CRITICAL_BLOCK) { corrected_bits = ath_nand_eccstatus(chip); mtd->ecc_stats.corrected += corrected_bits; } ath_nand_command(ATH_NAND_CMD_READ_START, page, pa, mtd->writesize, sc); dma_unmap_single(NULL, (dma_addr_t)pa, mtd->writesize, DMA_FROM_DEVICE); pa = (uint8_t *)dma_map_single(NULL, chip->oob_poi, mtd->oobsize, DMA_FROM_DEVICE); ath_nand_command(ATH_NAND_CMD_READ_COLUMN, mtd->writesize, pa, mtd->oobsize, sc); dma_unmap_single(NULL, (dma_addr_t)pa, mtd->oobsize, DMA_FROM_DEVICE); return 0; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static int ath_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip, int page) { ath_nand_sc_t *sc = (ath_nand_sc_t *)chip->priv; uint8_t *pa; pa = (uint8_t *)dma_map_single(NULL, chip->oob_poi, mtd->oobsize, DMA_FROM_DEVICE); ath_nand_command(ATH_NAND_CMD_READOOB, page, pa, mtd->oobsize, sc); dma_unmap_single(NULL, (dma_addr_t)pa, mtd->oobsize, DMA_FROM_DEVICE); return 0; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static void ath_nand_sync(struct mtd_info *mtd) { return; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static uint8_t *ath_nand_transfer_oob(struct nand_chip *chip, uint8_t *oob, struct mtd_oob_ops *ops, size_t len) { switch(ops->mode) { case MTD_OPS_PLACE_OOB: case MTD_OPS_RAW: memcpy(oob, chip->oob_poi + ops->ooboffs, len); return oob + len; case MTD_OPS_AUTO_OOB: { struct nand_oobfree *free = chip->ecc.layout->oobfree; uint32_t boffs = 0, roffs = ops->ooboffs; size_t bytes = 0; for(; free->length && len; free++, len -= bytes) { /* Read request not from offset 0 ? */ if (unlikely(roffs)) { if (roffs >= free->length) { roffs -= free->length; continue; } boffs = free->offset + roffs; bytes = min_t(size_t, len, (free->length - roffs)); roffs = 0; } else { bytes = min_t(size_t, len, free->length); boffs = free->offset; } memcpy(oob, chip->oob_poi + boffs, bytes); oob += bytes; } return oob; } default: BUG(); } return NULL; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static int ath_nand_do_read_ops(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) { int page, realpage, col, bytes, aligned; struct nand_chip *chip = mtd->priv; struct mtd_ecc_stats stats; int ret = 0; uint32_t readlen = ops->len; uint32_t oobreadlen = ops->ooblen; uint8_t *bufpoi, *oob, *buf; stats = mtd->ecc_stats; realpage = (int)(from >> chip->page_shift); page = realpage & chip->pagemask; col = (int)(from & (mtd->writesize - 1)); buf = ops->datbuf; oob = ops->oobbuf; while(1) { bytes = min(mtd->writesize - col, readlen); aligned = (bytes == mtd->writesize); bufpoi = aligned ? buf : chip->buffers->databuf; ret = chip->ecc.read_page(mtd, chip, bufpoi, 0, page); if (ret < 0) break; /* Transfer not aligned data */ if (!aligned) { memcpy(buf, chip->buffers->databuf + col, bytes); } buf += bytes; if (unlikely(oob)) { /* Raw mode does data:oob:data:oob */ if (ops->mode != MTD_OPS_RAW) { int toread = min(oobreadlen, chip->ecc.layout->oobavail); if (toread) { oob = ath_nand_transfer_oob(chip, oob, ops, toread); oobreadlen -= toread; } } else buf = ath_nand_transfer_oob(chip, buf, ops, mtd->oobsize); } readlen -= bytes; if (!readlen) break; /* For subsequent reads align to page boundary. */ col = 0; /* Increment page address */ realpage++; page = realpage & chip->pagemask; } ops->retlen = ops->len - (size_t)readlen; if (oob) ops->oobretlen = ops->ooblen - oobreadlen; if (ret) return ret; if (mtd->ecc_stats.failed - stats.failed) return -EBADMSG; return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static int ath_nand_do_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) { int page, realpage, sndcmd = 1; struct nand_chip *chip = mtd->priv; int readlen = ops->ooblen; int len; uint8_t *buf = ops->oobbuf; if (ops->mode == MTD_OPS_AUTO_OOB) len = chip->ecc.layout->oobavail; else len = mtd->oobsize; if (unlikely(ops->ooboffs >= len)) { printk("[%s] ERROR: Attempt to start read outside oob\n", __func__); return -EINVAL; } /* Do not allow reads past end of device */ if (unlikely(from >= mtd->size || (ops->ooboffs + readlen) > ((mtd->size >> chip->page_shift) - (from >> chip->page_shift)) * len)) { printk("[%s] ERROR: Attempt read beyond end of device\n", __func__); return -EINVAL; } /* Shift to get page */ realpage = (int)(from >> chip->page_shift); page = realpage & chip->pagemask; while(1) { sndcmd = chip->ecc.read_oob(mtd, chip, page); len = min(len, readlen); buf = ath_nand_transfer_oob(chip, buf, ops, len); readlen -= len; if (!readlen) break; /* Increment page address */ realpage++; } ops->oobretlen = ops->ooblen; return 0; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static int ath_nand_mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) { int ret = -ENOTSUPP; /*--- printk("{%s} 0x%llx len %d\n", __func__, from, ops->len); ---*/ ops->retlen = 0; if (ops->datbuf && (from + ops->len) > mtd->size) { printk("[%s] ERROR: Attempt read beyond end of device\n", __func__); return -EINVAL; } /*--- down(&ath_nand_sem); ---*/ ath_get_device(mtd, FL_READING); switch(ops->mode) { case MTD_OPS_PLACE_OOB: case MTD_OPS_AUTO_OOB: case MTD_OPS_RAW: break; default: goto out; } if (!ops->datbuf) ret = ath_nand_do_read_oob(mtd, from, ops); else ret = ath_nand_do_read_ops(mtd, from, ops); out: /*--- up(&ath_nand_sem); ---*/ ath_release_device(mtd); return ret; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static unsigned int ath_nand_check_wp(struct mtd_info *mtd) { struct nand_chip *chip = mtd->priv; return (ath_nand_status(chip) & NAND_STATUS_WP) ? 0 : 1; /* Check the WP bit */ } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static uint8_t *ath_nand_fill_oob(struct nand_chip *chip, uint8_t *oob, struct mtd_oob_ops *ops) { size_t len = ops->ooblen; switch(ops->mode) { case MTD_OPS_PLACE_OOB: case MTD_OPS_RAW: memcpy(chip->oob_poi + ops->ooboffs, oob, len); return oob + len; case MTD_OPS_AUTO_OOB: { struct nand_oobfree *free = chip->ecc.layout->oobfree; uint32_t boffs = 0, woffs = ops->ooboffs; size_t bytes = 0; for(; free->length && len; free++, len -= bytes) { /* Write request not from offset 0 ? */ if (unlikely(woffs)) { if (woffs >= free->length) { woffs -= free->length; continue; } boffs = free->offset + woffs; bytes = min_t(size_t, len, (free->length - woffs)); woffs = 0; } else { bytes = min_t(size_t, len, free->length); boffs = free->offset; } memcpy(chip->oob_poi + boffs, oob, bytes); oob += bytes; } return oob; } default: BUG(); } return NULL; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static int ath_nand_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page) { ath_nand_sc_t *sc = (ath_nand_sc_t *)chip->priv; unsigned int status; uint8_t *pa; pa = (uint8_t *)dma_map_single(NULL, chip->oob_poi, mtd->oobsize, DMA_TO_DEVICE); ath_nand_command(ATH_NAND_CMD_PROGRAMOOB, page, pa, mtd->oobsize, sc); dma_unmap_single(NULL, (dma_addr_t)pa, mtd->oobsize, DMA_TO_DEVICE); status = ath_nand_status(chip); return status & NAND_STATUS_FAIL ? -EIO : 0; } #ifdef CONFIG_MTD_NAND_VERIFY_WRITE /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static int ath_nand_verify_buf(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int page) { ath_nand_sc_t *sc = (ath_nand_sc_t *)chip->priv; uint8_t *pa, *tmpbuffer = sc->tmpbuffer; pa = (uint8_t *)dma_map_single(NULL, tmpbuffer, mtd->writesize, DMA_FROM_DEVICE); ath_nand_command(ATH_NAND_CMD_READ, page, pa, mtd->writesize, sc); dma_unmap_single(NULL, (dma_addr_t)pa, mtd->writesize, DMA_FROM_DEVICE); if (memcmp(buf, tmpbuffer, mtd->writesize) ) { unsigned int i; printk(KERN_ERR "[%s] verify failed on page 0x%x\n", __func__, page); for (i = 0; i < mtd->writesize; i++) if (tmpbuffer[i] != buf[i]) printk("<0x%x-0x%x> ", buf[i], tmpbuffer[i]); else printk("0x%x-0x%x ", buf[i], tmpbuffer[i]); return -1; } return 0; } #endif /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ int ath_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int page, uint8_t *oob) { ath_nand_sc_t *sc = (ath_nand_sc_t *)chip->priv; uint8_t *pa; unsigned char status; pa = (uint8_t *)dma_map_single(NULL, buf, mtd->writesize, DMA_TO_DEVICE); ath_nand_command(ATH_NAND_CMD_PROGRAM_INIT, page, pa, mtd->writesize, sc); dma_unmap_single(NULL, (dma_addr_t)pa, mtd->writesize, DMA_TO_DEVICE); if (oob) { pa = (uint8_t *)dma_map_single(NULL, chip->oob_poi, mtd->oobsize, DMA_TO_DEVICE); ath_nand_command(ATH_NAND_CMD_PROGRAM_COLUMN, mtd->writesize, pa, mtd->oobsize, sc); dma_unmap_single(NULL, (dma_addr_t)pa, mtd->oobsize, DMA_TO_DEVICE); } ath_nand_command(ATH_NAND_CMD_PROGRAM_START, 0, NULL, 0, sc); status = ath_nand_status(chip); if (status & NAND_STATUS_FAIL) return -EIO; #ifdef CONFIG_MTD_NAND_VERIFY_WRITE if (ath_nand_verify_buf(mtd, chip, buf, page)) return -EIO; #endif return 0; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ #define NOTALIGNED(x) (x & (chip->subpagesize - 1)) != 0 static int ath_nand_do_write_ops(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) { int realpage, page, column; struct nand_chip *chip = mtd->priv; uint32_t writelen = ops->len; uint8_t *oob = ops->oobbuf; uint8_t *buf = ops->datbuf; int ret, subpage; ops->retlen = 0; if ( ! writelen) return 0; /* reject writes, which are not page aligned */ if (NOTALIGNED(to) || NOTALIGNED(ops->len)) { printk("[%s] ERROR: Attempt to write not page aligned data ", __func__); iodbg("{%s} to 0x%llx ops->len %d\n", __func__, to, ops->len); return -EINVAL; } column = to & (mtd->writesize - 1); subpage = column || (writelen & (mtd->writesize - 1)); iodbg("\n{%s} column 0x%x subpage 0x%x ", __func__, column, subpage); if (subpage && oob) return -EINVAL; /* Check, if it is write protected */ if (ath_nand_check_wp(mtd)) return -EIO; realpage = (int)(to >> chip->page_shift); page = realpage & chip->pagemask; /* If we're not given explicit OOB data, let it be 0xFF */ if (likely(!oob)) memset(chip->oob_poi, 0xff, mtd->oobsize); while(1) { int bytes = mtd->writesize; uint8_t *wbuf = buf; /* Partial page write ? */ if (unlikely(column || writelen < (mtd->writesize - 1))) { iodbg("Partial page write\n"); bytes = min_t(int, bytes - column, (int) writelen); memset(chip->buffers->databuf, 0xff, mtd->writesize); memcpy(&chip->buffers->databuf[column], buf, bytes); wbuf = chip->buffers->databuf; } if (unlikely(oob)) oob = ath_nand_fill_oob(chip, oob, ops); ret = ath_nand_write_page(mtd, chip, wbuf, page, oob); if (ret) break; writelen -= bytes; if (!writelen) break; column = 0; buf += bytes; realpage++; page = realpage & chip->pagemask; } ops->retlen = ops->len - writelen; if (unlikely(oob)) ops->oobretlen = ops->ooblen; return ret; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static int ath_nand_do_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) { int page, status, len; struct nand_chip *chip = mtd->priv; iodbg("{%s}: to = 0x%08x, len = %i\n", __func__, (unsigned int)to, (int)ops->ooblen); if (ops->mode == MTD_OPS_AUTO_OOB) len = chip->ecc.layout->oobavail; else len = mtd->oobsize; /* Do not allow write past end of page */ if ((ops->ooboffs + ops->ooblen) > len) { printk("[%s] ERROR: Attempt to write past end of page\n", __func__); return -EINVAL; } if (unlikely(ops->ooboffs >= len)) { printk("[%s] ERROR: Attempt to start write outside oob\n", __func__); return -EINVAL; } /* Do not allow reads past end of device */ if (unlikely(to >= mtd->size || ops->ooboffs + ops->ooblen > ((mtd->size >> chip->page_shift) - (to >> chip->page_shift)) * len)) { printk("[%s] ERROR: Attempt write beyond end of device\n", __func__); return -EINVAL; } /* Shift to get page */ page = (int)(to >> chip->page_shift); /* Check, if it is write protected */ if (ath_nand_check_wp(mtd)) return -EROFS; memset(chip->oob_poi, 0xff, mtd->oobsize); ath_nand_fill_oob(chip, ops->oobbuf, ops); status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask); memset(chip->oob_poi, 0xff, mtd->oobsize); if (status) return status; ops->oobretlen = ops->ooblen; return 0; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static int ath_nand_mtd_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) { int ret = -ENOTSUPP; ops->retlen = 0; /* Do not allow writes past end of device */ if (ops->datbuf && (to + ops->len) > mtd->size) { printk("[%s] ERROR: Attempt write beyond end of device\n", __func__); return -EINVAL; } /*--- down(&ath_nand_sem); ---*/ ath_get_device(mtd, FL_WRITING); switch(ops->mode) { case MTD_OPS_PLACE_OOB: case MTD_OPS_AUTO_OOB: case MTD_OPS_RAW: break; default: goto out; } if ( ! ops->datbuf) ret = ath_nand_do_write_oob(mtd, to, ops); else ret = ath_nand_do_write_ops(mtd, to, ops); out: /*--- up(&ath_nand_sem); ---*/ ath_release_device(mtd); return ret; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static int ath_nand_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { struct mtd_oob_ops ops; int ret; /* Do not allow reads past end of device */ if ((to + len) > mtd->size) return -EINVAL; if (!len) return 0; /*--- down(&ath_nand_sem); ---*/ ath_get_device(mtd, FL_WRITING); ops.len = len; ops.datbuf = (uint8_t *)buf; ops.oobbuf = NULL; ret = ath_nand_do_write_ops(mtd, to, &ops); *retlen = ops.retlen; /*--- up(&ath_nand_sem); ---*/ ath_release_device(mtd); return ret; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static int ath_nand_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { struct mtd_oob_ops ops; int ret; /* Do not allow reads past end of device */ if ((from + len) > mtd->size) return -EINVAL; if (!len) return 0; /*--- down(&ath_nand_sem); ---*/ ath_get_device(mtd, FL_READING); ops.len = len; ops.datbuf = buf; ops.oobbuf = NULL; ret = ath_nand_do_read_ops(mtd, from, &ops); *retlen = ops.retlen; /*--- up(&ath_nand_sem); ---*/ ath_release_device(mtd); return ret; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static int ath_nand_erase(struct mtd_info *mtd, struct erase_info *instr) { struct nand_chip *chip = mtd->priv; ath_nand_sc_t *sc = (ath_nand_sc_t *)chip->priv; unsigned int page, pages_per_block, len; unsigned char status; int ret = 0; if (instr->addr & ((1 << chip->phys_erase_shift) - 1)) { printk("[%s] ERROR: Unaligned address\n", __func__); return -EINVAL; } /* Length must align on block boundary */ if (instr->len & ((1 << chip->phys_erase_shift) - 1)) { printk("[%s] ERROR: Length not block aligned\n", __func__); return -EINVAL; } if (instr->addr + instr->len > mtd->size) { return -EINVAL; } /*--- down(&ath_nand_sem); ---*/ ath_get_device(mtd, FL_ERASING); page = (int)(instr->addr >> chip->page_shift); len = instr->len; pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift); instr->state = MTD_ERASING; while (len) { if (instr->priv != 1) { /*--- 1 signals force to mtd->erase ---*/ if (ath_nand_block_isbad(mtd, instr->addr)) { instr->state = MTD_ERASE_FAILED; goto erase_exit; } } ath_nand_command(ATH_NAND_CMD_ERASE, page, NULL, 0, sc); status = ath_nand_status(chip); if (status & NAND_STATUS_FAIL) { printk("[%s] ERROR: Failed erase, page 0x%08x\n", __func__, page); instr->state = MTD_ERASE_FAILED; instr->fail_addr = ((loff_t)page << chip->page_shift); goto erase_exit; } /* Increment page address and decrement length */ len -= (1 << chip->phys_erase_shift); page += pages_per_block; ath_nand_set_blk_state(mtd, page, ATH_NAND_BLK_ERASED); } instr->state = MTD_ERASE_DONE; erase_exit: ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO; /*--- up(&ath_nand_sem); ---*/ ath_release_device(mtd); if ( ! ret) { mtd_erase_callback(instr); } return ret; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static int ath_nand_block_isbad(struct mtd_info *mtd, loff_t ofs) { unsigned char oob[128]; unsigned bs, i; struct mtd_oob_ops ops = { .mode = MTD_OPS_RAW, /* * Read just 128 bytes. assuming Bad Block Marker * is available in the initial few bytes. */ .ooblen = sizeof(oob), .oobbuf = oob, }; bs = ath_nand_get_blk_state(mtd, ofs); if ((bs == ATH_NAND_BLK_ERASED) || (bs == ATH_NAND_BLK_GOOD)) { return 0; } if (bs == ATH_NAND_BLK_BAD) { return 1; } /* * H27U1G8F2B Series [1 Gbit (128 M x 8 bit) NAND Flash] * * The Bad Block Information is written prior to shipping. Any * block where the 1st Byte in the spare area of the 1st or * 2nd th page (if the 1st page is Bad) does not contain FFh * is a Bad Block. The Bad Block Information must be read * before any erase is attempted as the Bad Block Information * may be erased. For the system to be able to recognize the * Bad Blocks based on the original information it is * recommended to create a Bad Block table following the * flowchart shown in Figure 24. The 1st block, which is * ^^^^^^^^^^^^^ * placed on 00h block address is guaranteed to be a valid * block. ^^^^^^^^^^^^^^^^^^^^^^^^^^^ */ for (i = 0; i < 2; i++, ofs += mtd->writesize) { if (ath_nand_do_read_oob(mtd, ofs, &ops) || ops.oobretlen != ops.ooblen) { printk("[%s] ERROR: oob read failed at 0x%llx\n", __func__, ofs); return 1; } /* First two bytes of oob data are clean markers */ if (oob[0] != 0xff || oob[1] != 0xff) { printk("%s: \n", __func__, ofs); oobdbg( "%02x %02x %02x %02x %02x %02x %02x %02x " "%02x %02x %02x %02x %02x %02x %02x %02x " "%02x %02x %02x %02x %02x %02x %02x %02x " "%02x %02x %02x %02x %02x %02x %02x %02x " "%02x %02x %02x %02x %02x %02x %02x %02x " "%02x %02x %02x %02x %02x %02x %02x %02x " "%02x %02x %02x %02x %02x %02x %02x %02x " "%02x %02x %02x %02x %02x %02x %02x %02x\n", 0xff & oob[ 0], 0xff & oob[ 1], 0xff & oob[ 2], 0xff & oob[ 3], 0xff & oob[ 4], 0xff & oob[ 5], 0xff & oob[ 6], 0xff & oob[ 7], 0xff & oob[ 8], 0xff & oob[ 9], 0xff & oob[10], 0xff & oob[11], 0xff & oob[12], 0xff & oob[13], 0xff & oob[14], 0xff & oob[15], 0xff & oob[16], 0xff & oob[17], 0xff & oob[18], 0xff & oob[19], 0xff & oob[20], 0xff & oob[21], 0xff & oob[22], 0xff & oob[23], 0xff & oob[24], 0xff & oob[25], 0xff & oob[26], 0xff & oob[27], 0xff & oob[28], 0xff & oob[29], 0xff & oob[30], 0xff & oob[31], 0xff & oob[32], 0xff & oob[33], 0xff & oob[34], 0xff & oob[35], 0xff & oob[36], 0xff & oob[37], 0xff & oob[38], 0xff & oob[39], 0xff & oob[40], 0xff & oob[41], 0xff & oob[42], 0xff & oob[43], 0xff & oob[44], 0xff & oob[45], 0xff & oob[46], 0xff & oob[47], 0xff & oob[48], 0xff & oob[49], 0xff & oob[50], 0xff & oob[51], 0xff & oob[52], 0xff & oob[53], 0xff & oob[54], 0xff & oob[55], 0xff & oob[56], 0xff & oob[57], 0xff & oob[58], 0xff & oob[59], 0xff & oob[60], 0xff & oob[61], 0xff & oob[62], 0xff & oob[63]); ath_nand_set_blk_state(mtd, ofs, ATH_NAND_BLK_BAD); return 1; } } for (i = 0; (i < mtd->oobsize) && (oob[i] == 0xff); i++); if (i == mtd->oobsize) { ath_nand_set_blk_state(mtd, ofs, ATH_NAND_BLK_ERASED); } else { ath_nand_set_blk_state(mtd, ofs, ATH_NAND_BLK_GOOD); } return 0; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static int ath_nand_block_markbad(struct mtd_info *mtd, loff_t ofs) { unsigned char oob[128]; struct mtd_oob_ops ops = { .mode = MTD_OPS_RAW, .ooblen = mtd->oobsize, .oobbuf = oob, }; pr_debug("{%s} 0x%llx\n", __func__, ofs); memset(oob, 0xff, sizeof(oob)); oob[0] = 0x03; oob[1] = 0x03; if (ath_nand_mtd_write_oob(mtd, ofs, &ops) || ops.oobretlen != ops.ooblen) { printk("[%s] ERROR: oob write failed at 0x%llx\n", __func__, ofs); return 1; } return 0; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static unsigned long ath_parse_read_id(ath_nand_sc_t *sc) { int i; #if 0 iodbg( "____ %s _____\n" " vid did wc ilp nsp ct dp sa1 org bs sa0 ss " "ps res1 pls pn res2\n" "0x%3x %3x %3x %3x %3x %3x %3x %3x %3x %3x %3x %3x " "%3x %3x %3x %3x %3x\n-------------\n", __func__, sc->nid.vid, sc->nid.did, sc->nid.wc, sc->nid.ilp, sc->nid.nsp, sc->nid.ct, sc->nid.dp, sc->nid.sa1, sc->nid.org, sc->nid.bs, sc->nid.sa0, sc->nid.ss, sc->nid.ps, sc->nid.res1, sc->nid.pls, sc->nid.pn, sc->nid.res2); #endif for (i = 0; nand_manuf_ids[i].id != 0x0; i++) { if (nand_manuf_ids[i].id == sc->nid.vid) { printk(nand_manuf_ids[i].name); break; } } for (i = 0; nand_flash_ids[i].dev_id != 0x0; i++) { if (nand_flash_ids[i].dev_id == sc->nid.did) { printk(" %s [%uMB]\n", nand_flash_ids[i].name, nand_flash_ids[i].chipsize); return nand_flash_ids[i].chipsize; } } return 0; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ ath_nand_vend_data_t * ath_nand_get_entry(ath_nand_id_t *nand_id, ath_nand_vend_data_t *tbl, int count) { int i; /*--- printk("{%s} vid 0x%x did 0x%x\n", __func__, nand_id->__details.vid, nand_id->__details.did); ---*/ for (i = 0; i < count; i++, tbl++) { /*--- printk("tbl vid 0x%x did 0x%x\n", tbl->vid, tbl->did); ---*/ if ((nand_id->__details.vid == tbl->vid) && (nand_id->__details.did == tbl->did) && (nand_id->byte_id[1] == tbl->b3)) { /*--- printk("{%s} found entry\n", __func__); ---*/ return tbl; } } return NULL; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static inline void ath_nand_onfi_endian_convert(uint8_t *buf) { uint32_t i, *u = (uint32_t *)(buf + ONFI_DEV_DESC); for (i = 0; i < (ONFI_DEV_DESC_SZ / sizeof(*u)); i++) { u[i] = __le32_to_cpu(u[i]); } // Hope nobody has a 20 character device description buf[ONFI_DEV_DESC + ONFI_DEV_DESC_SZ - 1] = 0; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ int ath_nand_get_onfi(ath_nand_sc_t *sc, uint8_t *buf, unsigned count) { uint8_t *pa; pa = (uint8_t *)dma_map_single(NULL, buf, count, DMA_FROM_DEVICE); ath_nand_command(ATH_NAND_CMD_READONFI, 0, pa, count, sc); dma_unmap_single(NULL, (dma_addr_t)pa, count, DMA_FROM_DEVICE); if (buf[3] == 'O' && buf[2] == 'N' && buf[1] == 'F' && buf[0] == 'I') { ath_nand_onfi_endian_convert(buf); return 0; } return 1; } /*------------------------------------------------------------------------------------------*\ * System initialization functions \*------------------------------------------------------------------------------------------*/ static loff_t ath_nand_hw_init(ath_nand_sc_t *sc, void *p) { struct ath_nand *const Nand = (struct ath_nand *)&(*(volatile unsigned int *)(KSEG1ADDR(ATH_NAND_FLASH_BASE))); struct mtd_info *mtd = &sc->mtd; unsigned int rddata, i; loff_t flashsize; // Put into reset ath_reg_rmw_set(ATH_RESET, RST_RESET_NANDF_RESET_MASK); udelay(250); ath_reg_rmw_clear(ATH_RESET, RST_RESET_NANDF_RESET_MASK); udelay(100); Nand->irq_mask.Register = 0; Nand->time_asyn.Register = ATH_NF_TIMING_ASYN; /*--- TIMINGS_ASYN Reg Settings ---*/ Nand->mem_ctrl.Register = 0xff00; /*--- remove WriteProtect ---*/ Nand->cmd.Register = (NAND_CMD_RESET << 8) | ATH_NF_COMMAND_CMD_SEQ_0; /*--- Reset Flash ---*/ while (Nand->status.Bits.ctrl) ; /*--- wait for cmd ---*/ i = ATH_NF_STATUS_RETRY; while ( ! Nand->status.Bits.mem0 ) { udelay(25); if ( i-- == 0 ) panic("NAND Device Reset failed\n"); } #if defined(ATH_USE_IRQ) Nand->ctrl.Bits.int_en = 1; #endif if (p) { unsigned char *pa; ath_nand_vend_data_t *entry; pa = (unsigned char *)dma_map_single(NULL, p, 8, DMA_FROM_DEVICE); ath_nand_command(ATH_NAND_CMD_READID, 0, pa, 8, sc); dma_unmap_single(NULL, (dma_addr_t)pa, 8, DMA_FROM_DEVICE); pa = p; printk("ath Nand ID: %02x:%02x:%02x:%02x:%02x\n", pa[3], pa[2], pa[1], pa[0], pa[7]); sc->onfi[0] = 0; entry = ath_nand_get_entry((ath_nand_id_t *)p, ath_nand_arr, NUM_ARRAY_ENTRIES(ath_nand_arr)); if (entry) { sc->entry = entry; Nand->ctrl.Bits.addr_cycle0 = entry->addrcyc; /*--- Nand->ctrl.Bits.addr_cycle1 = entry->addrcyc; ---*/ } else if (ath_nand_get_onfi(sc, sc->onfi, sizeof(sc->onfi)) == 0) { rddata = sc->onfi[ONFI_NUM_ADDR_CYCLES]; rddata = ((rddata >> 4) & 0xf) + (rddata & 0xf); Nand->ctrl.Bits.addr_cycle0 = rddata; /*--- Nand->ctrl.Bits.addr_cycle1 = rddata; ---*/ } else { printk("[%s] \n", __func__); Nand->ctrl.Bits.addr_cycle0 = ATH_NF_CTRL_ADDR_CYCLE_5; /*--- Nand->ctrl.Bits.addr_cycle1 = ATH_NF_CTRL_ADDR_CYCLE_5; ---*/ } } if ( ! ath_nand_sc->onfi[0]) { mtd->writesize_shift = 10 + ath_nand_sc->nid.ps; mtd->writesize = (1 << mtd->writesize_shift); mtd->writesize_mask = (mtd->writesize - 1); mtd->erasesize_shift = 16 + ath_nand_sc->nid.bs; mtd->erasesize = (1 << mtd->erasesize_shift); mtd->erasesize_mask = (mtd->erasesize - 1); mtd->oobsize = (mtd->writesize / 512) * (8 << ath_nand_sc->nid.ss); mtd->oobavail = mtd->oobsize; flashsize = ath_parse_read_id(ath_nand_sc) << 20; if ( ! flashsize ) { flashsize = ath_plane_size[ath_nand_sc->nid.pls] << ath_nand_sc->nid.pn; } mtd->size = flashsize; } else { mtd->writesize = *(uint32_t *)(&ath_nand_sc->onfi[ONFI_PAGE_SIZE]); mtd->writesize_shift = ffs(mtd->writesize) - 1; mtd->writesize_mask = (mtd->writesize - 1); mtd->erasesize = *(uint32_t *)(&ath_nand_sc->onfi[ONFI_PAGES_PER_BLOCK]) * mtd->writesize; mtd->erasesize_shift = ffs(mtd->erasesize) - 1; mtd->erasesize_mask = (mtd->erasesize - 1); mtd->oobsize = *(uint16_t *)(&ath_nand_sc->onfi[ONFI_SPARE_SIZE]); mtd->oobavail = mtd->oobsize; mtd->size = mtd->erasesize * (*(uint32_t *)(&ath_nand_sc->onfi[ONFI_BLOCKS_PER_LUN])) * ath_nand_sc->onfi[ONFI_NUM_LUNS]; } for (i = 0; i < 7; i++) { if (mtd->writesize == (256 << i)) { /*--- ath_nand_sc->nf_ctrl |= (i << 8); ---*/ Nand->ctrl.Bits.pagesize = i; } } for (i = 0; i < 4; i++) { if ((mtd->erasesize / mtd->writesize) == (32 << i)) { /*--- ath_nand_sc->nf_ctrl |= (i << 6); ---*/ Nand->ctrl.Bits.blocksize = i; } } Nand->spare_size.Register = mtd->oobsize; return mtd->size; } static struct nand_ecclayout oobinfo_ath_4bit = { #if 0 /*--- HW-ECC ---*/ .eccbytes = 28, .eccpos = { 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47 }, #endif .oobavail = 32, .oobfree = { { .offset = 4, .length = 16 }, { .offset = 48, .length = 16 } } }; /*------------------------------------------------------------------------------------------*\ * ath_nand_probe * * called by device layer when it finds a device matching * one our driver can handled. This code checks to see if * it can allocate all necessary resources then calls the * nand layer to look for devices \*------------------------------------------------------------------------------------------*/ static int ath_nand_probe(struct platform_device *pdev) { struct physmap_flash_data *ath_nand_data = pdev->dev.platform_data; struct mtd_info *mtd; struct nand_chip *chip; int err = 0, bbt_size; #if defined(ATH_USE_IRQ) int result; #endif if ( ! ath_nand_data) { printk(KERN_ERR "[%s] Platform data invalid", __func__); return -EINVAL; } if (ath_nand_sc) { printk(KERN_ERR "[%s] ERROR: always initialized\n", __func__); return -EPERM; } ath_nand_sc = kzalloc(sizeof(ath_nand_sc_t) + sizeof(struct nand_chip), GFP_KERNEL); if (ath_nand_sc == NULL) { printk(KERN_ERR "[%s] ERROR: no memory for flash\n", __func__); return -ENOMEM; } chip = (struct nand_chip *)(&ath_nand_sc[1]); sema_init(&ath_nand_sem, 1); #if defined(ATH_USE_IRQ) result = request_irq(ATH_MISC_IRQ_NANDF, ath_nand_intr_handler, 0, "ath_nand", ath_nand_sc); if(result < 0) { printk("[%s] ERROR: request irq failed\n", __func__); err = -EINTR; goto out_err_hw_init; } init_waitqueue_head(&ath_nand_sc->wait_q); #endif /* initialise the hardware */ if ( ! ath_nand_hw_init(ath_nand_sc, &ath_nand_sc->nid)) { printk(KERN_ERR "[%s] ERROR: nand_hw_init\n", __func__); goto out_err_hw_init; } /* initialise mtd ath_nand_sc data struct */ mtd = &ath_nand_sc->mtd; mtd->priv = chip; chip->priv = ath_nand_sc; mtd->name = DRV_NAME; mtd->owner = THIS_MODULE; mtd->type = MTD_NANDFLASH; mtd->flags = MTD_CAP_NANDFLASH; chip->chipsize = mtd->size; chip->page_shift = mtd->writesize_shift; chip->pagemask = (chip->chipsize >> chip->page_shift) - 1; chip->subpagesize = mtd->writesize >> mtd->subpage_sft; /*--- now we use no subpage ---*/ chip->bbt_erase_shift = chip->phys_erase_shift = mtd->erasesize_shift; chip->buffers = kmalloc(sizeof(*chip->buffers) + mtd->writesize + mtd->oobsize * 3, GFP_KERNEL); if ( ! chip->buffers) { printk(KERN_ERR "[%s] ERROR: no memory for flash\n", __func__); err = -ENOMEM; goto out_err_hw_init; } chip->buffers->ecccalc = (uint8_t *)(chip->buffers + 1); chip->buffers->ecccode = chip->buffers->ecccalc + mtd->oobsize; chip->buffers->databuf = chip->buffers->ecccode + mtd->oobsize; chip->oob_poi = chip->buffers->databuf + mtd->writesize; ath_nand_sc->tmpbuffer = kmalloc(mtd->writesize, GFP_KERNEL); if ( ! ath_nand_sc->tmpbuffer) { printk(KERN_ERR "[%s] ERROR: no memory for tmpbuffer\n", __func__); err = -ENOMEM; goto out_err_hw_init; } chip->ecc.layout = &oobinfo_ath_4bit; chip->ecc.read_page = ath_nand_read_page; chip->ecc.read_oob = ath_nand_read_oob; chip->ecc.write_oob = ath_nand_write_oob; mtd->_read = ath_nand_read; mtd->_write = ath_nand_write; mtd->_erase = ath_nand_erase; mtd->_sync = ath_nand_sync; mtd->_read_oob = ath_nand_mtd_read_oob; mtd->_write_oob = ath_nand_mtd_write_oob; mtd->_block_isbad = ath_nand_block_isbad; mtd->_block_markbad = ath_nand_block_markbad; chip->controller = &chip->hwcontrol; spin_lock_init(&chip->controller->lock); init_waitqueue_head(&chip->controller->wq); /*--- ath_nand_ecc_init(mtd); ---*/ // bbt has 2 bits per block bbt_size = ((mtd->size >> mtd->erasesize_shift) * 2) / 8; ath_nand_sc->bbt = kzalloc(bbt_size, GFP_KERNEL); if ( ! bbt_size) { printk(KERN_ERR "[%s] ERROR: no memory for bbt\n", __func__); err = -ENOMEM; goto out_err_hw_init; } mtd_device_parse_register(mtd, ath_nand_data->part_probe_types, NULL, ath_nand_data->parts, ath_nand_data->nr_parts); printk( "====== NAND Parameters ======\n" "ath_nand_sc = 0x%p bbt = 0x%p bbt_size = 0x%x page = 0x%x block = 0x%x oob = 0x%x\n", ath_nand_sc, ath_nand_sc->bbt, bbt_size, mtd->writesize, mtd->erasesize, mtd->oobsize); return 0; out_err_hw_init: if (chip->buffers) kfree(chip->buffers); if (ath_nand_sc->tmpbuffer) kfree(ath_nand_sc->tmpbuffer); kfree(ath_nand_sc); ath_nand_sc = 0; return err; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static int __init ath_nand_init(void) { printk(DRV_DESC ", Version " DRV_VERSION " (c) 2014 AVM GmbH, 2010 Atheros Communications, Ltd.\n"); return platform_driver_register(&ath_avmnand_driver); } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static int ath_nand_remove(struct platform_device *dev) { struct nand_chip *chip; struct mtd_info *mtd; ath_nand_sc_t *tmp = ath_nand_sc; mtd = &tmp->mtd; chip = mtd->priv; if (chip->buffers) kfree(chip->buffers); if (ath_nand_sc->tmpbuffer) kfree(ath_nand_sc->tmpbuffer); #ifdef CONFIG_MTD_PARTITIONS del_mtd_partitions(&ath_nand_sc->mtd); /* Deregister partitions */ #endif ath_nand_sc = NULL; kfree(tmp); return 0; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static void __exit ath_nand_exit(void) { platform_driver_unregister(&ath_avmnand_driver); } module_init(ath_nand_init); module_exit(ath_nand_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR(DRV_AUTHOR); MODULE_DESCRIPTION(DRV_DESC); MODULE_ALIAS("platform:" DRV_NAME);