OpenWrt – Blame information for rev 4

Subversion Repositories:
Rev:
Rev Author Line No. Line
4 office 1 --- a/drivers/mtd/nand/Kconfig
2 +++ b/drivers/mtd/nand/Kconfig
3 @@ -589,4 +589,12 @@ config MTD_NAND_AR934X_HW_ECC
4 bool "Hardware ECC support for the AR934X NAND Controller (EXPERIMENTAL)"
5 depends on MTD_NAND_AR934X
6  
7 +config MTD_NAND_SPI_NAND
8 + tristate "SPI Nand flash support"
9 + default n
10 + depends on MTD_NAND
11 + help
12 + Enables the driver for SPI NAND flash controller on Qualcomm-Atheros System on Chips
13 + This controller is used on families AR71xx and AR9xxx.
14 +
15 endif # MTD_NAND
16 --- a/drivers/mtd/nand/Makefile
17 +++ b/drivers/mtd/nand/Makefile
18 @@ -62,5 +62,6 @@ obj-$(CONFIG_MTD_NAND_HISI504) +
19 obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
20 obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o
21 obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o
22 +obj-$(CONFIG_MTD_NAND_SPI_NAND) += spi_nand/
23  
24 nand-objs := nand_base.o nand_bbt.o nand_timings.o
25 --- /dev/null
26 +++ b/drivers/mtd/nand/spi_nand/Makefile
27 @@ -0,0 +1 @@
28 +obj-$(CONFIG_MTD_NAND_SPI_NAND) += generic-spinand-controller.o core.o bbt.o nand_core.o micron.o etron.o gigadevice.o paragon.o
29 \ No newline at end of file
30 --- /dev/null
31 +++ b/drivers/mtd/nand/spi_nand/bbt.c
32 @@ -0,0 +1,79 @@
33 +/*
34 + * Copyright (c) 2017 Free Electrons
35 + *
36 + * This program is free software; you can redistribute it and/or
37 + * modify it under the terms of the GNU General Public License
38 + * as published by the Free Software Foundation; either version 2
39 + * of the License, or (at your option) any later version.
40 + *
41 + * This program is distributed in the hope that it will be useful,
42 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
43 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
44 + * GNU General Public License for more details.
45 + *
46 + * Authors:
47 + * Boris Brezillon <boris.brezillon@free-electrons.com>
48 + * Peter Pan <peterpandong@micron.com>
49 + */
50 +
51 +#define pr_fmt(fmt) "nand-bbt: " fmt
52 +
53 +#include <linux/mtd/nand.h>
54 +#include <linux/slab.h>
55 +#include <linux/mtd/spinand.h>
56 +
57 +int nanddev_bbt_init(struct nand_device *nand)
58 +{
59 + unsigned int nwords = nanddev_neraseblocks(nand);
60 +
61 + nand->bbt.cache = kzalloc(nwords, GFP_KERNEL);
62 + if (!nand->bbt.cache)
63 + return -ENOMEM;
64 + memset(nand->bbt.cache,0,nwords);
65 + return 0;
66 +}
67 +EXPORT_SYMBOL_GPL(nanddev_bbt_init);
68 +
69 +void nanddev_bbt_cleanup(struct nand_device *nand)
70 +{
71 + kfree(nand->bbt.cache);
72 +}
73 +EXPORT_SYMBOL_GPL(nanddev_bbt_cleanup);
74 +
75 +int nanddev_bbt_update(struct nand_device *nand)
76 +{
77 + return 0;
78 +}
79 +EXPORT_SYMBOL_GPL(nanddev_bbt_update);
80 +
81 +int nanddev_bbt_get_block_status(const struct nand_device *nand,
82 + unsigned int entry)
83 +{
84 + unsigned char *pos = nand->bbt.cache + entry;
85 + unsigned long status;
86 +
87 + if (entry >= nanddev_neraseblocks(nand)){
88 + return -ERANGE;
89 + }
90 +
91 + status = pos[0];
92 +
93 +
94 + return status & 0xff;
95 +}
96 +EXPORT_SYMBOL_GPL(nanddev_bbt_get_block_status);
97 +
98 +int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry,
99 + enum nand_bbt_block_status status)
100 +{
101 + unsigned char *pos = nand->bbt.cache + entry;;
102 +
103 + if (entry >= nanddev_neraseblocks(nand)){
104 + return -ERANGE;
105 + }
106 +
107 + pos[0] = status & 0xff;
108 +
109 + return 0;
110 +}
111 +EXPORT_SYMBOL_GPL(nanddev_bbt_set_block_status);
112 --- /dev/null
113 +++ b/drivers/mtd/nand/spi_nand/core.c
114 @@ -0,0 +1,902 @@
115 +/*
116 + *
117 + * Copyright (c) 2016-2017 Micron Technology, Inc.
118 + *
119 + * This program is free software; you can redistribute it and/or
120 + * modify it under the terms of the GNU General Public License
121 + * as published by the Free Software Foundation; either version 2
122 + * of the License, or (at your option) any later version.
123 + *
124 + * This program is distributed in the hope that it will be useful,
125 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
126 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
127 + * GNU General Public License for more details.
128 + */
129 +
130 +#define pr_fmt(fmt) "spi-nand: " fmt
131 +
132 +#include <linux/kernel.h>
133 +#include <linux/device.h>
134 +#include <linux/module.h>
135 +#include <linux/jiffies.h>
136 +#include <linux/mtd/spinand.h>
137 +#include <linux/slab.h>
138 +#include <linux/of.h>
139 +static inline void spinand_adjust_cache_op(struct spinand_device *spinand,
140 + const struct nand_page_io_req *req,
141 + struct spinand_op *op)
142 +{
143 + if (!spinand->manufacturer.manu->ops->adjust_cache_op)
144 + return;
145 +
146 + spinand->manufacturer.manu->ops->adjust_cache_op(spinand, req, op);
147 +}
148 +
149 +static inline int spinand_exec_op(struct spinand_device *spinand,
150 + struct spinand_op *op)
151 +{
152 + return spinand->controller.controller->ops->exec_op(spinand, op);
153 +}
154 +
155 +static inline void spinand_op_init(struct spinand_op *op)
156 +{
157 + memset(op, 0, sizeof(struct spinand_op));
158 + op->addr_nbits = 1;
159 + op->data_nbits = 1;
160 +}
161 +
162 +static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
163 +{
164 + struct spinand_op op;
165 + int ret;
166 +
167 + spinand_op_init(&op);
168 + op.cmd = SPINAND_CMD_GET_FEATURE;
169 + op.n_addr = 1;
170 + op.addr[0] = reg;
171 + op.n_rx = 1;
172 + op.rx_buf = val;
173 +
174 + ret = spinand_exec_op(spinand, &op);
175 + if (ret < 0)
176 + pr_err("failed to read register %d (err = %d)\n", reg, ret);
177 +
178 + return ret;
179 +}
180 +
181 +static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
182 +{
183 + struct spinand_op op;
184 + int ret;
185 +
186 + spinand_op_init(&op);
187 + op.cmd = SPINAND_CMD_SET_FEATURE;
188 + op.n_addr = 1;
189 + op.addr[0] = reg;
190 + op.n_tx = 1;
191 + op.tx_buf = &val;
192 +
193 + ret = spinand_exec_op(spinand, &op);
194 + if (ret < 0)
195 + pr_err("failed to write register %d (err = %d)\n", reg, ret);
196 +
197 + return ret;
198 +}
199 +
200 +static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
201 +{
202 + return spinand_read_reg_op(spinand, REG_CFG, cfg);
203 +}
204 +
205 +static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
206 +{
207 + return spinand_write_reg_op(spinand, REG_CFG, cfg);
208 +}
209 +
210 +static int spinand_read_status(struct spinand_device *spinand, u8 *status)
211 +{
212 + return spinand_read_reg_op(spinand, REG_STATUS, status);
213 +}
214 +
215 +static void spinand_disable_ecc(struct spinand_device *spinand)
216 +{
217 + u8 cfg = 0;
218 +
219 + spinand_get_cfg(spinand, &cfg);
220 +
221 + if ((cfg & CFG_ECC_MASK) == CFG_ECC_ENABLE) {
222 + cfg &= ~CFG_ECC_ENABLE;
223 + spinand_set_cfg(spinand, cfg);
224 + }
225 +}
226 +
227 +static void spinand_enable_ecc(struct spinand_device *spinand)
228 +{
229 + u8 cfg = 0;
230 +
231 + spinand_get_cfg(spinand, &cfg);
232 +
233 + if ((cfg & CFG_ECC_MASK) != CFG_ECC_ENABLE) {
234 + cfg |= CFG_ECC_ENABLE;
235 + spinand_set_cfg(spinand, cfg);
236 + }
237 +}
238 +static int spinand_write_enable_op(struct spinand_device *spinand)
239 +{
240 + struct spinand_op op;
241 +
242 + spinand_op_init(&op);
243 + op.cmd = SPINAND_CMD_WR_ENABLE;
244 +
245 + return spinand_exec_op(spinand, &op);
246 +}
247 +
248 +static int spinand_load_page_op(struct spinand_device *spinand,
249 + const struct nand_page_io_req *req)
250 +{
251 + struct nand_device *nand = &spinand->base;
252 + unsigned int row = nanddev_pos_to_offs(nand, &req->pos);
253 + struct spinand_op op;
254 +
255 + spinand_op_init(&op);
256 + op.cmd = SPINAND_CMD_PAGE_READ;
257 + op.n_addr = 3;
258 + unsigned int page = row /nand->memorg.pagesize;
259 + unsigned int block = page /nand->memorg.pages_per_eraseblock;
260 + op.addr[0] = block >> 10;
261 + op.addr[1] = block >> 2;
262 + op.addr[2] = ((block & 0x3)<<6)|(page & 0x3f);
263 +
264 + return spinand_exec_op(spinand, &op);
265 +}
266 +
267 +static int spinand_get_address_bits(u8 opcode)
268 +{
269 + switch (opcode) {
270 + case SPINAND_CMD_READ_FROM_CACHE_QUAD_IO:
271 + return 4;
272 + case SPINAND_CMD_READ_FROM_CACHE_DUAL_IO:
273 + return 2;
274 + default:
275 + return 1;
276 + }
277 +}
278 +
279 +static int spinand_get_data_bits(u8 opcode)
280 +{
281 + switch (opcode) {
282 + case SPINAND_CMD_READ_FROM_CACHE_QUAD_IO:
283 + case SPINAND_CMD_READ_FROM_CACHE_X4:
284 + case SPINAND_CMD_PROG_LOAD_X4:
285 + case SPINAND_CMD_PROG_LOAD_RDM_DATA_X4:
286 + return 4;
287 + case SPINAND_CMD_READ_FROM_CACHE_DUAL_IO:
288 + case SPINAND_CMD_READ_FROM_CACHE_X2:
289 + return 2;
290 + default:
291 + return 1;
292 + }
293 +}
294 +
295 +static int spinand_read_from_cache_op(struct spinand_device *spinand,
296 + const struct nand_page_io_req *req)
297 +{
298 + struct nand_device *nand = &spinand->base;
299 + struct nand_page_io_req adjreq = *req;
300 + struct spinand_op op;
301 + u16 column = 0;
302 + int ret;
303 +
304 + spinand_op_init(&op);
305 + op.cmd = spinand->read_cache_op;
306 + op.n_addr = 3;
307 + op.addr_nbits = spinand_get_address_bits(spinand->read_cache_op);
308 + if (req->datalen) {
309 + adjreq.datalen = nanddev_page_size(nand);
310 + adjreq.dataoffs = 0;
311 + adjreq.databuf.in = spinand->buf;
312 + op.rx_buf = spinand->buf;
313 + op.n_rx = adjreq.datalen;
314 + }
315 +
316 + if (req->ooblen) {
317 + adjreq.ooblen = nanddev_per_page_oobsize(nand);
318 + adjreq.ooboffs = 0;
319 + adjreq.oobbuf.in = spinand->oobbuf;
320 + op.n_rx = nanddev_per_page_oobsize(nand);
321 + if (!op.rx_buf) {
322 + op.rx_buf = spinand->oobbuf;
323 + column = nanddev_page_size(nand);
324 + }
325 + }
326 + op.addr[0] = 0;
327 + op.addr[1] = column >> 8;
328 + op.addr[2] = column;
329 + op.data_nbits = spinand_get_data_bits(spinand->read_cache_op);
330 + spinand_adjust_cache_op(spinand, &adjreq, &op);
331 +
332 + ret = spinand_exec_op(spinand, &op);
333 + if (ret)
334 + return ret;
335 +
336 + if (req->datalen)
337 + memcpy(req->databuf.in, spinand->buf + req->dataoffs,
338 + req->datalen);
339 +
340 + if (req->ooblen)
341 + memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
342 + req->ooblen);
343 +
344 + return 0;
345 +}
346 +
347 +static int spinand_write_to_cache_op(struct spinand_device *spinand,
348 + const struct nand_page_io_req *req)
349 +{
350 + struct nand_device *nand = &spinand->base;
351 + struct nand_page_io_req adjreq = *req;
352 + struct spinand_op op;
353 + u16 column = 0;
354 +
355 + spinand_op_init(&op);
356 + op.cmd = spinand->write_cache_op;
357 + op.n_addr = 2;
358 +
359 + memset(spinand->buf, 0xff,
360 + nanddev_page_size(nand) +
361 + nanddev_per_page_oobsize(nand));
362 +
363 + if (req->datalen) {
364 + memcpy(spinand->buf + req->dataoffs, req->databuf.out,
365 + req->datalen);
366 + adjreq.dataoffs = 0;
367 + adjreq.datalen = nanddev_page_size(nand);
368 + adjreq.databuf.out = spinand->buf;
369 + op.tx_buf = spinand->buf;
370 + op.n_tx = adjreq.datalen;
371 + }
372 +
373 + if (req->ooblen) {
374 + memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
375 + req->ooblen);
376 + memset(spinand->oobbuf,0x00,2);
377 + adjreq.ooblen = nanddev_per_page_oobsize(nand);
378 + adjreq.ooboffs = 0;
379 + op.n_tx = nanddev_page_size(nand)+adjreq.ooblen;
380 +
381 + if (!op.tx_buf) {
382 + printk("oob write \n");
383 + op.tx_buf = spinand->buf;
384 + //column = nanddev_page_size(nand);
385 + }
386 + }
387 +
388 + op.addr[0] = column >> 8;
389 + op.addr[1] = column;
390 +
391 + op.addr_nbits = spinand_get_address_bits(spinand->write_cache_op);
392 + op.data_nbits = spinand_get_data_bits(spinand->write_cache_op);
393 + spinand_adjust_cache_op(spinand, &adjreq, &op);
394 +
395 + return spinand_exec_op(spinand, &op);
396 +}
397 +
398 +static int spinand_program_op(struct spinand_device *spinand,
399 + const struct nand_page_io_req *req)
400 +{
401 + struct nand_device *nand = spinand_to_nand(spinand);
402 + unsigned int row = nanddev_pos_to_offs(nand, &req->pos);
403 + struct spinand_op op;
404 + spinand_op_init(&op);
405 + op.cmd = SPINAND_CMD_PROG_EXC;
406 + op.n_addr = 3;
407 + unsigned int page = row /nand->memorg.pagesize;
408 + unsigned int block = page /nand->memorg.pages_per_eraseblock;
409 + op.addr[0] = block >> 10;
410 + op.addr[1] = block >> 2;
411 + op.addr[2] = ((block & 0x3)<<6)|(page & 0x3f);
412 +
413 + return spinand_exec_op(spinand, &op);
414 +}
415 +
416 +static int spinand_erase_op(struct spinand_device *spinand,
417 + const struct nand_pos *pos)
418 +{
419 + struct nand_device *nand = &spinand->base;
420 + unsigned int row = nanddev_pos_to_offs(nand, pos);
421 + struct spinand_op op;
422 +
423 + spinand_op_init(&op);
424 + op.cmd = SPINAND_CMD_BLK_ERASE;
425 + op.n_addr = 3;
426 + unsigned int page = row /nand->memorg.pagesize;
427 + unsigned int block = page /nand->memorg.pages_per_eraseblock;
428 + op.addr[0] = block >> 10;
429 + op.addr[1] = block >> 2;
430 + op.addr[2] = ((block & 0x3)<<6)|(page & 0x3f);
431 +
432 + return spinand_exec_op(spinand, &op);
433 +}
434 +
435 +static int spinand_wait(struct spinand_device *spinand, u8 *s)
436 +{
437 + unsigned long timeo = jiffies + msecs_to_jiffies(400);
438 + u8 status;
439 +
440 + do {
441 + spinand_read_status(spinand, &status);
442 + if ((status & STATUS_OIP_MASK) == STATUS_READY)
443 + goto out;
444 + } while (time_before(jiffies, timeo));
445 +
446 + /*
447 + * Extra read, just in case the STATUS_READY bit has changed
448 + * since our last check
449 + */
450 + spinand_read_status(spinand, &status);
451 +out:
452 + if (s)
453 + *s = status;
454 +
455 + return (status & STATUS_OIP_MASK) == STATUS_READY ? 0 : -ETIMEDOUT;
456 +}
457 +
458 +static int spinand_read_id_op(struct spinand_device *spinand, u8 *buf)
459 +{
460 + struct spinand_op op;
461 +
462 + spinand_op_init(&op);
463 + op.cmd = SPINAND_CMD_READ_ID;
464 + op.n_rx = SPINAND_MAX_ID_LEN;
465 + op.rx_buf = buf;
466 +
467 + return spinand_exec_op(spinand, &op);
468 +}
469 +
470 +static int spinand_reset_op(struct spinand_device *spinand)
471 +{
472 + struct spinand_op op;
473 + int ret;
474 +
475 + spinand_op_init(&op);
476 + op.cmd = SPINAND_CMD_RESET;
477 +
478 + ret = spinand_exec_op(spinand, &op);
479 + if (ret < 0) {
480 + pr_err("failed to reset the NAND (err = %d)\n", ret);
481 + goto out;
482 + }
483 +
484 + ret = spinand_wait(spinand, NULL);
485 +
486 +out:
487 + return ret;
488 +}
489 +
490 +static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
491 +{
492 + return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
493 +}
494 +
495 +static int spinand_read_page(struct spinand_device *spinand,
496 + const struct nand_page_io_req *req)
497 +{
498 + struct nand_device *nand = spinand_to_nand(spinand);
499 + int ret;
500 +
501 + spinand_load_page_op(spinand, req);
502 +
503 + ret = spinand_wait(spinand, NULL);
504 + if (ret < 0) {
505 + pr_err("failed to load page @%llx (err = %d)\n",
506 + nanddev_pos_to_offs(nand, &req->pos), ret);
507 + return ret;
508 + }
509 +
510 + spinand_read_from_cache_op(spinand, req);
511 +
512 + return 0;
513 +}
514 +
515 +static int spinand_write_page(struct spinand_device *spinand,
516 + const struct nand_page_io_req *req)
517 +{
518 + struct nand_device *nand = spinand_to_nand(spinand);
519 + u8 status;
520 + int ret = 0;
521 +
522 + spinand_write_enable_op(spinand);
523 + spinand_write_to_cache_op(spinand, req);
524 + spinand_program_op(spinand, req);
525 +
526 + ret = spinand_wait(spinand, &status);
527 + if (!ret && (status & STATUS_P_FAIL_MASK) == STATUS_P_FAIL)
528 + ret = -EIO;
529 +
530 + if (ret < 0)
531 + pr_err("failed to program page @%llx (err = %d)\n",
532 + nanddev_pos_to_offs(nand, &req->pos), ret);
533 +
534 + return ret;
535 +}
536 +
537 +static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
538 + struct mtd_oob_ops *ops)
539 +{
540 + struct spinand_device *spinand = mtd_to_spinand(mtd);
541 + struct nand_device *nand = mtd_to_nanddev(mtd);
542 + struct nand_io_iter iter;
543 + int ret;
544 +
545 + mutex_lock(&spinand->lock);
546 + nanddev_io_for_each_page(nand, from, ops, &iter) {
547 + ret = spinand_read_page(spinand, &iter.req);
548 + if (ret)
549 + break;
550 +
551 + ops->retlen += iter.req.datalen;
552 + ops->oobretlen += iter.req.datalen;
553 + }
554 + mutex_unlock(&spinand->lock);
555 +
556 + return ret;
557 +}
558 +
559 +static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
560 + struct mtd_oob_ops *ops)
561 +{
562 + struct spinand_device *spinand = mtd_to_spinand(mtd);
563 + struct nand_device *nand = mtd_to_nanddev(mtd);
564 + struct nand_io_iter iter;
565 + int ret = 0;
566 + mutex_lock(&spinand->lock);
567 + nanddev_io_for_each_page(nand, to, ops, &iter) {
568 + ret = spinand_write_page(spinand, &iter.req);
569 + if (ret)
570 + return ret;
571 +
572 + ops->retlen += iter.req.datalen;
573 + ops->oobretlen += iter.req.ooblen;
574 + }
575 + mutex_unlock(&spinand->lock);
576 +
577 + return ret;
578 +}
579 +
580 +static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
581 +{
582 + struct spinand_device *spinand = nand_to_spinand(nand);
583 + struct nand_page_io_req req = {
584 + .pos = *pos,
585 + .ooblen = 2,
586 + .ooboffs = 0,
587 + .oobbuf.in = spinand->oobbuf,
588 + };
589 +
590 + memset(spinand->oobbuf, 0, 2);
591 + spinand_read_page(spinand, &req);
592 + if (spinand->oobbuf[0] != 0xff || spinand->oobbuf[1] != 0xff)
593 + return true;
594 +
595 + return false;
596 +}
597 +
598 +static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
599 +{
600 + struct nand_device *nand = mtd_to_nanddev(mtd);
601 + struct spinand_device *spinand = nand_to_spinand(nand);
602 + struct nand_pos pos;
603 + int ret;
604 + nanddev_offs_to_pos(nand, offs, &pos);
605 + mutex_lock(&spinand->lock);
606 + ret = spinand_isbad(nand, &pos);
607 + mutex_unlock(&spinand->lock);
608 +
609 + return ret;
610 +}
611 +
612 +static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
613 +{
614 + struct spinand_device *spinand = nand_to_spinand(nand);
615 + struct nand_page_io_req req = {
616 + .pos = *pos,
617 + .ooboffs = 0,
618 + .ooblen = 2,
619 + .oobbuf.out = spinand->oobbuf,
620 + };
621 +
622 + /* Erase block before marking it bad. */
623 + spinand_write_enable_op(spinand);
624 + spinand_erase_op(spinand, pos);
625 + u8 status;
626 + spinand_wait(spinand, &status);
627 +
628 + memset(spinand->oobbuf, 0x00, 2);
629 + return spinand_write_page(spinand, &req);
630 +}
631 +
632 +
633 +static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
634 +{
635 + struct nand_device *nand = mtd_to_nanddev(mtd);
636 + struct spinand_device *spinand = nand_to_spinand(nand);
637 + struct nand_pos pos;
638 + int ret;
639 + nanddev_offs_to_pos(nand, offs, &pos);
640 + /*bad block mark the first page*/
641 + pos.page=0;
642 +
643 + mutex_lock(&spinand->lock);
644 + ret = nanddev_markbad(nand, &pos);
645 + mutex_unlock(&spinand->lock);
646 +
647 + return ret;
648 +}
649 +
650 +static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
651 +{
652 + struct spinand_device *spinand = nand_to_spinand(nand);
653 + u8 status;
654 + int ret;
655 +
656 + spinand_write_enable_op(spinand);
657 + spinand_erase_op(spinand, pos);
658 +
659 + ret = spinand_wait(spinand, &status);
660 +
661 + if (!ret && (status & STATUS_E_FAIL_MASK) == STATUS_E_FAIL)
662 + ret = -EIO;
663 +
664 + if (ret)
665 + pr_err("failed to erase block %d (err = %d)\n",
666 + pos->eraseblock, ret);
667 +
668 + return ret;
669 +}
670 +
671 +static int spinand_mtd_erase(struct mtd_info *mtd,
672 + struct erase_info *einfo)
673 +{
674 + struct spinand_device *spinand = mtd_to_spinand(mtd);
675 + int ret;
676 +// printk("erase block\n");
677 + mutex_lock(&spinand->lock);
678 + ret = nanddev_mtd_erase(mtd, einfo);
679 + mutex_unlock(&spinand->lock);
680 +
681 + //if (!ret)
682 + // mtd_erase_callback(einfo);
683 +
684 + return ret;
685 +}
686 +
687 +static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
688 +{
689 + struct spinand_device *spinand = mtd_to_spinand(mtd);
690 + struct nand_device *nand = mtd_to_nanddev(mtd);
691 + struct nand_pos pos;
692 + int ret;
693 +
694 + nanddev_offs_to_pos(nand, offs, &pos);
695 + mutex_lock(&spinand->lock);
696 + ret = nanddev_isreserved(nand, &pos);
697 + mutex_unlock(&spinand->lock);
698 +
699 + return ret;
700 +}
701 +
702 +static void spinand_set_rd_wr_op(struct spinand_device *spinand)
703 +{
704 + u32 controller_cap = spinand->controller.controller->caps;
705 + u32 rw_mode = spinand->rw_mode;
706 +
707 + if ((controller_cap & SPINAND_CAP_RD_QUAD) &&
708 + (rw_mode & SPINAND_RD_QUAD))
709 + spinand->read_cache_op = SPINAND_CMD_READ_FROM_CACHE_QUAD_IO;
710 + else if ((controller_cap & SPINAND_CAP_RD_X4) &&
711 + (rw_mode & SPINAND_RD_X4))
712 + spinand->read_cache_op = SPINAND_CMD_READ_FROM_CACHE_X4;
713 + else if ((controller_cap & SPINAND_CAP_RD_DUAL) &&
714 + (rw_mode & SPINAND_RD_DUAL))
715 + spinand->read_cache_op = SPINAND_CMD_READ_FROM_CACHE_DUAL_IO;
716 + else if ((controller_cap & SPINAND_CAP_RD_X2) &&
717 + (rw_mode & SPINAND_RD_X2))
718 + spinand->read_cache_op = SPINAND_CMD_READ_FROM_CACHE_X2;
719 + else
720 + spinand->read_cache_op = SPINAND_CMD_READ_FROM_CACHE_FAST;
721 +
722 + if ((controller_cap & SPINAND_CAP_WR_X4) &&
723 + (rw_mode & SPINAND_WR_X4))
724 + spinand->write_cache_op = SPINAND_CMD_PROG_LOAD_X4;
725 + else
726 + spinand->write_cache_op = SPINAND_CMD_PROG_LOAD;
727 +}
728 +
729 +static const struct nand_ops spinand_ops = {
730 + .erase = spinand_erase,
731 + .markbad = spinand_markbad,
732 + .isbad = spinand_isbad,
733 +};
734 +
735 +static const struct spinand_manufacturer *spinand_manufacturers[] = {
736 + &micron_spinand_manufacturer,
737 + &etron_spinand_manufacturer,
738 + &giga_spinand_manufacturer,
739 + &paragon_spinand_manufacturer,
740 +};
741 +
742 +
743 +static int spinand_manufacturer_detect(struct spinand_device *spinand)
744 +{
745 + unsigned int i;
746 +
747 + for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
748 + if (spinand_manufacturers[i]->ops->detect(spinand)) {
749 + spinand->manufacturer.manu = spinand_manufacturers[i];
750 +
751 + return 0;
752 + }
753 + }
754 +
755 + return -ENODEV;
756 +}
757 +
758 +static int spinand_manufacturer_init(struct spinand_device *spinand)
759 +{
760 + if (spinand->manufacturer.manu->ops->init)
761 + return spinand->manufacturer.manu->ops->init(spinand);
762 +
763 + return 0;
764 +}
765 +
766 +static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
767 +{
768 + /* Release manufacturer private data */
769 + if (spinand->manufacturer.manu->ops->cleanup)
770 + return spinand->manufacturer.manu->ops->cleanup(spinand);
771 +}
772 +static int spinand_detect(struct spinand_device *spinand)
773 +{
774 + struct nand_device *nand = &spinand->base;
775 + int ret;
776 +
777 + spinand_reset_op(spinand);
778 + spinand_read_id_op(spinand, spinand->id.data);
779 + spinand->id.len = SPINAND_MAX_ID_LEN;
780 +
781 + ret = spinand_manufacturer_detect(spinand);
782 + if (ret) {
783 + pr_err("unknown raw ID %*phN\n",
784 + SPINAND_MAX_ID_LEN, spinand->id.data);
785 + return ret;
786 + }
787 +
788 + pr_info("%s SPI NAND was found.\n", spinand->manufacturer.manu->name);
789 + pr_info("%d MiB, block size: %d KiB, page size: %d, OOB size: %d\n",
790 + (int)(nanddev_size(nand) >> 20),
791 + nanddev_eraseblock_size(nand) >> 10,
792 + nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
793 + return 0;
794 +}
795 +/**
796 + * devm_spinand_alloc - [SPI NAND Interface] allocate SPI NAND device instance
797 + * @dev: pointer to device model structure
798 + */
799 +struct spinand_device *devm_spinand_alloc(struct device *dev)
800 +{
801 + struct spinand_device *spinand;
802 + struct mtd_info *mtd;
803 +
804 + spinand = devm_kzalloc(dev, sizeof(*spinand), GFP_KERNEL);
805 + if (!spinand)
806 + return ERR_PTR(-ENOMEM);
807 +
808 + spinand_set_of_node(spinand, dev->of_node);
809 + mutex_init(&spinand->lock);
810 + mtd = spinand_to_mtd(spinand);
811 + mtd->dev.parent = dev;
812 +
813 + return spinand;
814 +}
815 +EXPORT_SYMBOL_GPL(devm_spinand_alloc);
816 +static int spinand_read(struct mtd_info *mtd, loff_t from, size_t len,size_t *retlen, u_char *buf)
817 +{
818 + int ret;
819 + struct mtd_oob_ops ops = {
820 + .len = len,
821 + .datbuf = buf,
822 + };
823 + ret = mtd->_read_oob(mtd, from, &ops);
824 + *retlen = ops.retlen;
825 +
826 + if (unlikely(ret < 0))
827 + return ret;
828 + if (mtd->ecc_strength == 0)
829 + return 0; /* device lacks ecc */
830 + return ret >= mtd->bitflip_threshold ? -EUCLEAN : 0;
831 +}
832 +
833 +static int spinand_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,const u_char *buf)
834 +{
835 + struct mtd_oob_ops ops = {
836 + .len = len,
837 + .datbuf = (u8 *)buf,
838 + };
839 + int ret;
840 +
841 + ret = mtd->_write_oob(mtd, to, &ops);
842 + *retlen = ops.retlen;
843 + return ret;
844 +
845 +}
846 +
847 +int spinand_bbt_create(struct nand_device *nand )
848 +{
849 + unsigned int block=0;
850 + unsigned int entry=0;
851 + int status=NAND_BBT_BLOCK_STATUS_UNKNOWN;
852 + int ret = 0;
853 + struct nand_pos pos;
854 + struct mtd_info *mtd = nanddev_to_mtd(nand);
855 + if (nanddev_bbt_is_initialized(nand)) {
856 + for(block=0;block < nand->memorg.eraseblocks_per_lun;block++){
857 + pos.eraseblock=block;
858 + pos.lun=0;
859 + pos.page=0;
860 + pos.plane=0;
861 + pos.target=0;
862 + entry = nanddev_bbt_pos_to_entry(nand, &pos);
863 + if(nand->ops->isbad(nand, &pos)){
864 + printk("found bad block %llx\n",nanddev_pos_to_offs(nand,&pos));
865 + ret = nanddev_bbt_set_block_status(nand, entry, NAND_BBT_BLOCK_FACTORY_BAD);
866 + ret = nanddev_bbt_update(nand);
867 + mtd->ecc_stats.badblocks++;
868 + }
869 + else{
870 + nanddev_bbt_set_block_status(nand, entry, NAND_BBT_BLOCK_GOOD);
871 + }
872 + }
873 +
874 + }
875 + return 0;
876 +
877 +}
878 +int write_test(struct mtd_info *mtd,loff_t to,size_t len)
879 +{
880 + struct nand_device *nand = mtd_to_nanddev(mtd);
881 + size_t retlen;
882 + unsigned char *buf;
883 + int i=0;
884 +
885 + buf = kzalloc(nanddev_page_size(nand) +
886 + nanddev_per_page_oobsize(nand),
887 + GFP_KERNEL);
888 + for(i=0;i<len;i++){
889 + buf[i]=i%16;
890 + }
891 + spinand_write(mtd,to,len,&retlen,buf);
892 + kfree(buf);
893 + return 0;
894 +}
895 +int erase_test(struct mtd_info *mtd,uint64_t from,uint64_t len)
896 +{
897 + struct erase_info einfo={
898 + .mtd=mtd,
899 + .addr=from,
900 + .len=len,
901 + .callback = NULL,
902 + };
903 +
904 + spinand_mtd_erase(mtd,&einfo);
905 + return 0;
906 +}
907 +int read_test(struct mtd_info *mtd,loff_t from,size_t len)
908 +{
909 + struct nand_device *nand = mtd_to_nanddev(mtd);
910 + size_t retlen;
911 + unsigned char *buf;
912 + int i=0;
913 + char en=16;
914 + buf = kzalloc(nanddev_page_size(nand) +
915 + nanddev_per_page_oobsize(nand),
916 + GFP_KERNEL);
917 + spinand_read(mtd,from,len,&retlen,buf);
918 + for(i=0;i<len;i++){
919 + if(i%en==0){
920 + printk("\n");
921 + }
922 + printk("%2X ",buf[i]);
923 + if(i==2047)en=8;
924 + }
925 + kfree(buf);
926 + return 0;
927 +}
928 +
929 +int mark_bad_test(struct mtd_info *mtd,loff_t offs)
930 +{
931 + return spinand_mtd_block_markbad(mtd,offs);
932 +}
933 +/**
934 + * spinand_init - [SPI NAND Interface] initialize the SPI NAND device
935 + * @spinand: SPI NAND device structure
936 + */
937 +int spinand_init(struct spinand_device *spinand, struct module *owner)
938 +{
939 + struct mtd_info *mtd = spinand_to_mtd(spinand);
940 + struct nand_device *nand = mtd_to_nanddev(mtd);
941 + int ret;
942 +
943 + ret = spinand_detect(spinand);
944 + if (ret) {
945 + pr_err("Failed to detect a SPI NAND (err = %d).\n", ret);
946 + return ret;
947 + }
948 +
949 + ret = nanddev_init(nand, &spinand_ops, owner);
950 + if (ret)
951 + return ret;
952 +
953 + spinand_set_rd_wr_op(spinand);
954 +
955 + /*
956 + * Use kzalloc() instead of devm_kzalloc() here, because some drivers
957 + * may use this buffer for DMA access.
958 + * Memory allocated by devm_ does not guarantee DMA-safe alignment.
959 + */
960 + spinand->buf = kzalloc(nanddev_page_size(nand) +
961 + nanddev_per_page_oobsize(nand),
962 + GFP_KERNEL);
963 + if (!spinand->buf)
964 + return -ENOMEM;
965 +
966 + spinand->oobbuf = spinand->buf + nanddev_page_size(nand);
967 +
968 + ret = spinand_manufacturer_init(spinand);
969 + if (ret) {
970 + pr_err("Init of SPI NAND failed (err = %d).\n", ret);
971 + goto err_free_buf;
972 + }
973 +
974 + /*
975 + * Right now, we don't support ECC, so let the whole oob
976 + * area is available for user.
977 + */
978 + mtd->_read_oob = spinand_mtd_read;
979 + mtd->_write_oob = spinand_mtd_write;
980 + mtd->_block_isbad = spinand_mtd_block_isbad;
981 + mtd->_block_markbad = spinand_mtd_block_markbad;
982 + mtd->_block_isreserved = spinand_mtd_block_isreserved;
983 + mtd->_erase = spinand_mtd_erase;
984 + mtd->_read = spinand_read;
985 + mtd->_write = spinand_write;
986 +
987 + /* After power up, all blocks are locked, so unlock it here. */
988 + spinand_lock_block(spinand, BL_ALL_UNLOCKED);
989 + /* Right now, we don't support ECC, so disable on-die ECC */
990 + //spinand_disable_ecc(spinand);
991 + spinand_enable_ecc(spinand);
992 +
993 + return 0;
994 +
995 +err_free_buf:
996 + kfree(spinand->buf);
997 + return ret;
998 +}
999 +EXPORT_SYMBOL_GPL(spinand_init);
1000 +/**
1001 + * spinand_cleanup - clean SPI NAND device
1002 + * @spinand: SPI NAND device structure
1003 + */
1004 +void spinand_cleanup(struct spinand_device *spinand)
1005 +{
1006 + struct nand_device *nand = &spinand->base;
1007 +
1008 + spinand_manufacturer_cleanup(spinand);
1009 + kfree(spinand->buf);
1010 + nanddev_cleanup(nand);
1011 +}
1012 +EXPORT_SYMBOL_GPL(spinand_cleanup);
1013 +
1014 +MODULE_DESCRIPTION("SPI NAND framework");
1015 +MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
1016 +MODULE_LICENSE("GPL v2");
1017 --- /dev/null
1018 +++ b/drivers/mtd/nand/spi_nand/etron.c
1019 @@ -0,0 +1,147 @@
1020 +/*
1021 + *
1022 + * Copyright (c) 2016-2017 Micron Technology, Inc.
1023 + *
1024 + * This program is free software; you can redistribute it and/or
1025 + * modify it under the terms of the GNU General Public License
1026 + * as published by the Free Software Foundation; either version 2
1027 + * of the License, or (at your option) any later version.
1028 + *
1029 + * This program is distributed in the hope that it will be useful,
1030 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1031 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1032 + * GNU General Public License for more details.
1033 + */
1034 +
1035 +#include <linux/device.h>
1036 +#include <linux/kernel.h>
1037 +#include <linux/mtd/spinand.h>
1038 +
1039 +#define SPINAND_MFR_ETRON 0xD5
1040 +
1041 +struct etron_spinand_info {
1042 + char *name;
1043 + u8 dev_id;
1044 + struct nand_memory_organization memorg;
1045 + struct nand_ecc_req eccreq;
1046 + unsigned int rw_mode;
1047 +};
1048 +
1049 +#define ETRON_SPI_NAND_INFO(nm, did, mo, er, rwm) \
1050 + { \
1051 + .name = (nm), \
1052 + .dev_id = (did), \
1053 + .memorg = mo, \
1054 + .eccreq = er, \
1055 + .rw_mode = (rwm) \
1056 + }
1057 +
1058 +static const struct etron_spinand_info etron_spinand_table[] = {
1059 + ETRON_SPI_NAND_INFO("ETNORxxxx", 0x11,
1060 + NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
1061 + NAND_ECCREQ(8, 512),
1062 + SPINAND_RW_COMMON),
1063 +};
1064 +
1065 +static int etron_spinand_get_dummy(struct spinand_device *spinand,
1066 + struct spinand_op *op)
1067 +{
1068 + u8 opcode = op->cmd;
1069 +
1070 + switch (opcode) {
1071 + case SPINAND_CMD_READ_FROM_CACHE:
1072 + case SPINAND_CMD_READ_FROM_CACHE_FAST:
1073 + case SPINAND_CMD_READ_FROM_CACHE_X2:
1074 + case SPINAND_CMD_READ_FROM_CACHE_DUAL_IO:
1075 + case SPINAND_CMD_READ_FROM_CACHE_X4:
1076 + case SPINAND_CMD_READ_ID:
1077 + return 1;
1078 +
1079 + case SPINAND_CMD_READ_FROM_CACHE_QUAD_IO:
1080 + return 2;
1081 +
1082 + default:
1083 + return 0;
1084 + }
1085 +}
1086 +
1087 +/**
1088 + * etron_spinand_scan_id_table - scan SPI NAND info in id table
1089 + * @spinand: SPI NAND device structure
1090 + * @id: point to manufacture id and device id
1091 + * Description:
1092 + * If found in id table, config device with table information.
1093 + */
1094 +static bool etron_spinand_scan_id_table(struct spinand_device *spinand,
1095 + u8 dev_id)
1096 +{
1097 + struct mtd_info *mtd = spinand_to_mtd(spinand);
1098 + struct nand_device *nand = mtd_to_nanddev(mtd);
1099 + struct etron_spinand_info *item;
1100 + unsigned int i;
1101 +
1102 + for (i = 0; i < ARRAY_SIZE(etron_spinand_table); i++) {
1103 + item = (struct etron_spinand_info *)etron_spinand_table + i;
1104 + if (dev_id != item->dev_id)
1105 + continue;
1106 +
1107 + nand->memorg = item->memorg;
1108 + nand->eccreq = item->eccreq;
1109 + spinand->rw_mode = item->rw_mode;
1110 +
1111 + return true;
1112 + }
1113 +
1114 + return false;
1115 +}
1116 +
1117 +/**
1118 + * etron_spinand_detect - initialize device related part in spinand_device
1119 + * struct if it is Micron device.
1120 + * @spinand: SPI NAND device structure
1121 + */
1122 +static bool etron_spinand_detect(struct spinand_device *spinand)
1123 +{
1124 + u8 *id = spinand->id.data;
1125 +
1126 + /*
1127 + * Micron SPI NAND read ID need a dummy byte,
1128 + * so the first byte in raw_id is dummy.
1129 + */
1130 + if (id[1] != SPINAND_MFR_ETRON)
1131 + return false;
1132 +
1133 + return etron_spinand_scan_id_table(spinand, id[2]);
1134 +}
1135 +
1136 +/**
1137 + * etron_spinand_prepare_op - Fix address for cache operation.
1138 + * @spinand: SPI NAND device structure
1139 + * @op: pointer to spinand_op struct
1140 + * @page: page address
1141 + * @column: column address
1142 + */
1143 +static void etron_spinand_adjust_cache_op(struct spinand_device *spinand,
1144 + const struct nand_page_io_req *req,
1145 + struct spinand_op *op)
1146 +{
1147 + struct nand_device *nand = spinand_to_nand(spinand);
1148 + unsigned int shift;
1149 +
1150 + op->n_addr= 2;
1151 + op->addr[0] = op->addr[1];
1152 + op->addr[1] = op->addr[2];
1153 + op->addr[2] = 0;
1154 + op->dummy_bytes = etron_spinand_get_dummy(spinand, op);
1155 +}
1156 +
1157 +static const struct spinand_manufacturer_ops etron_spinand_manuf_ops = {
1158 + .detect = etron_spinand_detect,
1159 + .adjust_cache_op = etron_spinand_adjust_cache_op,
1160 +};
1161 +
1162 +const struct spinand_manufacturer etron_spinand_manufacturer = {
1163 + .id = SPINAND_MFR_ETRON,
1164 + .name = "Etron",
1165 + .ops = &etron_spinand_manuf_ops,
1166 +};
1167 --- /dev/null
1168 +++ b/drivers/mtd/nand/spi_nand/micron.c
1169 @@ -0,0 +1,153 @@
1170 +/*
1171 + *
1172 + * Copyright (c) 2016-2017 Micron Technology, Inc.
1173 + *
1174 + * This program is free software; you can redistribute it and/or
1175 + * modify it under the terms of the GNU General Public License
1176 + * as published by the Free Software Foundation; either version 2
1177 + * of the License, or (at your option) any later version.
1178 + *
1179 + * This program is distributed in the hope that it will be useful,
1180 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1181 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1182 + * GNU General Public License for more details.
1183 + */
1184 +
1185 +#include <linux/device.h>
1186 +#include <linux/kernel.h>
1187 +#include <linux/mtd/spinand.h>
1188 +
1189 +#define SPINAND_MFR_MICRON 0x2C
1190 +
1191 +struct micron_spinand_info {
1192 + char *name;
1193 + u8 dev_id;
1194 + struct nand_memory_organization memorg;
1195 + struct nand_ecc_req eccreq;
1196 + unsigned int rw_mode;
1197 +};
1198 +
1199 +#define MICRON_SPI_NAND_INFO(nm, did, mo, er, rwm) \
1200 + { \
1201 + .name = (nm), \
1202 + .dev_id = (did), \
1203 + .memorg = mo, \
1204 + .eccreq = er, \
1205 + .rw_mode = (rwm) \
1206 + }
1207 +
1208 +static const struct micron_spinand_info micron_spinand_table[] = {
1209 + MICRON_SPI_NAND_INFO("MT29F2G01ABAGD", 0x24,
1210 + NAND_MEMORG(1, 2048, 128, 64, 2048, 2, 1, 1),
1211 + NAND_ECCREQ(8, 512),
1212 + SPINAND_RW_COMMON),
1213 +};
1214 +
1215 +static int micron_spinand_get_dummy(struct spinand_device *spinand,
1216 + struct spinand_op *op)
1217 +{
1218 + u8 opcode = op->cmd;
1219 +
1220 + switch (opcode) {
1221 + case SPINAND_CMD_READ_FROM_CACHE:
1222 + case SPINAND_CMD_READ_FROM_CACHE_FAST:
1223 + case SPINAND_CMD_READ_FROM_CACHE_X2:
1224 + case SPINAND_CMD_READ_FROM_CACHE_DUAL_IO:
1225 + case SPINAND_CMD_READ_FROM_CACHE_X4:
1226 + case SPINAND_CMD_READ_ID:
1227 + return 1;
1228 +
1229 + case SPINAND_CMD_READ_FROM_CACHE_QUAD_IO:
1230 + return 2;
1231 +
1232 + default:
1233 + return 0;
1234 + }
1235 +}
1236 +
1237 +/**
1238 + * micron_spinand_scan_id_table - scan SPI NAND info in id table
1239 + * @spinand: SPI NAND device structure
1240 + * @id: point to manufacture id and device id
1241 + * Description:
1242 + * If found in id table, config device with table information.
1243 + */
1244 +static bool micron_spinand_scan_id_table(struct spinand_device *spinand,
1245 + u8 dev_id)
1246 +{
1247 + struct mtd_info *mtd = spinand_to_mtd(spinand);
1248 + struct nand_device *nand = mtd_to_nanddev(mtd);
1249 + struct micron_spinand_info *item;
1250 + unsigned int i;
1251 +
1252 + for (i = 0; i < ARRAY_SIZE(micron_spinand_table); i++) {
1253 + item = (struct micron_spinand_info *)micron_spinand_table + i;
1254 + if (dev_id != item->dev_id)
1255 + continue;
1256 +
1257 + nand->memorg = item->memorg;
1258 + nand->eccreq = item->eccreq;
1259 + spinand->rw_mode = item->rw_mode;
1260 +
1261 + return true;
1262 + }
1263 +
1264 + return false;
1265 +}
1266 +
1267 +/**
1268 + * micron_spinand_detect - initialize device related part in spinand_device
1269 + * struct if it is Micron device.
1270 + * @spinand: SPI NAND device structure
1271 + */
1272 +static bool micron_spinand_detect(struct spinand_device *spinand)
1273 +{
1274 + u8 *id = spinand->id.data;
1275 +
1276 + /*
1277 + * Micron SPI NAND read ID need a dummy byte,
1278 + * so the first byte in raw_id is dummy.
1279 + */
1280 + if (id[1] != SPINAND_MFR_MICRON)
1281 + return false;
1282 +
1283 + return micron_spinand_scan_id_table(spinand, id[2]);
1284 +}
1285 +
1286 +/**
1287 + * micron_spinand_prepare_op - Fix address for cache operation.
1288 + * @spinand: SPI NAND device structure
1289 + * @op: pointer to spinand_op struct
1290 + * @page: page address
1291 + * @column: column address
1292 + */
1293 +static void micron_spinand_adjust_cache_op(struct spinand_device *spinand,
1294 + const struct nand_page_io_req *req,
1295 + struct spinand_op *op)
1296 +{
1297 + struct nand_device *nand = spinand_to_nand(spinand);
1298 + unsigned int shift;
1299 +
1300 + /*
1301 + * No need to specify the plane number if there's only one plane per
1302 + * LUN.
1303 + */
1304 + if (nand->memorg.planes_per_lun < 2)
1305 + return;
1306 +
1307 + /* The plane number is passed in MSB just above the column address */
1308 + shift = fls(nand->memorg.pagesize);
1309 + op->addr[(16 - shift) / 8] |= req->pos.plane << (shift % 8);
1310 + op->dummy_bytes = micron_spinand_get_dummy(spinand, op);
1311 +}
1312 +
1313 +static const struct spinand_manufacturer_ops micron_spinand_manuf_ops = {
1314 + .detect = micron_spinand_detect,
1315 + .adjust_cache_op = micron_spinand_adjust_cache_op,
1316 +};
1317 +
1318 +const struct spinand_manufacturer micron_spinand_manufacturer = {
1319 + .id = SPINAND_MFR_MICRON,
1320 + .name = "Micron",
1321 + .ops = &micron_spinand_manuf_ops,
1322 +};
1323 --- /dev/null
1324 +++ b/drivers/mtd/nand/spi_nand/nand_core.c
1325 @@ -0,0 +1,213 @@
1326 +/*
1327 + * Copyright (c) 2017 Free Electrons
1328 + *
1329 + * This program is free software; you can redistribute it and/or
1330 + * modify it under the terms of the GNU General Public License
1331 + * as published by the Free Software Foundation; either version 2
1332 + * of the License, or (at your option) any later version.
1333 + *
1334 + * This program is distributed in the hope that it will be useful,
1335 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1336 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1337 + * GNU General Public License for more details.
1338 + *
1339 + * Authors:
1340 + * Boris Brezillon <boris.brezillon@free-electrons.com>
1341 + * Peter Pan <peterpandong@micron.com>
1342 + */
1343 +
1344 +#define pr_fmt(fmt) "nand: " fmt
1345 +
1346 +#include <linux/mtd/nand.h>
1347 +#include <linux/mtd/spinand.h>
1348 +
1349 +bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos)
1350 +{
1351 +#if 1
1352 + if (nanddev_bbt_is_initialized(nand)) {
1353 + unsigned int entry=0;
1354 + int status=0;
1355 +
1356 + entry = nanddev_bbt_pos_to_entry(nand, pos);
1357 + status = nanddev_bbt_get_block_status(nand, entry);
1358 + /* Lazy block status retrieval */
1359 + if (status == NAND_BBT_BLOCK_STATUS_UNKNOWN) {
1360 + if (nand->ops->isbad(nand, pos))
1361 + status = NAND_BBT_BLOCK_FACTORY_BAD;
1362 + else
1363 + status = NAND_BBT_BLOCK_GOOD;
1364 +
1365 + nanddev_bbt_set_block_status(nand, entry, status);
1366 + }
1367 + //printk("status %llx,%x\n",nanddev_pos_to_offs(nand, pos),status);
1368 + if (status == NAND_BBT_BLOCK_WORN ||
1369 + status == NAND_BBT_BLOCK_FACTORY_BAD)
1370 + return true;
1371 +
1372 + return false;
1373 + }
1374 +#endif
1375 + return nand->ops->isbad(nand, pos);
1376 +}
1377 +EXPORT_SYMBOL_GPL(nanddev_isbad);
1378 +
1379 +/**
1380 + * nanddev_markbad - Write a bad block marker to a block
1381 + * @nand: NAND device
1382 + * @block: block to mark bad
1383 + *
1384 + * Mark a block bad. This function is updating the BBT if available and
1385 + * calls the low-level markbad hook (nand->ops->markbad()) if
1386 + * NAND_BBT_NO_OOB_BBM is not set.
1387 + *
1388 + * Return: 0 in case of success, a negative error code otherwise.
1389 + */
1390 +int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos)
1391 +{
1392 + struct mtd_info *mtd = nanddev_to_mtd(nand);
1393 + unsigned int entry;
1394 + int ret = 0;
1395 + if (nanddev_isbad(nand, pos))
1396 + return 0;
1397 +
1398 + ret = nand->ops->markbad(nand, pos);
1399 + if (ret)
1400 + pr_warn("failed to write BBM to block @%llx (err = %d)\n",
1401 + nanddev_pos_to_offs(nand, pos), ret);
1402 +
1403 + if (!nanddev_bbt_is_initialized(nand))
1404 + goto out;
1405 +
1406 + entry = nanddev_bbt_pos_to_entry(nand, pos);
1407 + ret = nanddev_bbt_set_block_status(nand, entry, NAND_BBT_BLOCK_WORN);
1408 + if (ret)
1409 + goto out;
1410 +
1411 + ret = nanddev_bbt_update(nand);
1412 +
1413 +out:
1414 + if (!ret)
1415 + mtd->ecc_stats.badblocks++;
1416 +
1417 + return ret;
1418 +}
1419 +EXPORT_SYMBOL_GPL(nanddev_markbad);
1420 +
1421 +bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos)
1422 +{
1423 + unsigned int entry;
1424 + int status;
1425 +
1426 + if (!nanddev_bbt_is_initialized(nand))
1427 + return false;
1428 +
1429 + /* Return info from the table */
1430 + entry = nanddev_bbt_pos_to_entry(nand, pos);
1431 + status = nanddev_bbt_get_block_status(nand, entry);
1432 + return status == NAND_BBT_BLOCK_RESERVED;
1433 +}
1434 +EXPORT_SYMBOL_GPL(nanddev_isreserved);
1435 +
1436 +/**
1437 + * nanddev_erase - Erase a NAND portion
1438 + * @nand: NAND device
1439 + * @block: eraseblock to erase
1440 + *
1441 + * Erase @block block if it's not bad.
1442 + *
1443 + * Return: 0 in case of success, a negative error code otherwise.
1444 + */
1445 +
1446 +int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos)
1447 +{
1448 + if (nanddev_isbad(nand, pos) || nanddev_isreserved(nand, pos)) {
1449 + //pr_warn("attempt to erase a bad/reserved block @%llx\n",
1450 + // nanddev_pos_to_offs(nand, pos));
1451 + return -EIO;
1452 + }
1453 +
1454 + return nand->ops->erase(nand, pos);
1455 +}
1456 +EXPORT_SYMBOL_GPL(nanddev_erase);
1457 +
1458 +int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
1459 +{
1460 + struct nand_device *nand = mtd_to_nanddev(mtd);
1461 + struct nand_pos pos, last;
1462 + int ret;
1463 +
1464 + nanddev_offs_to_pos(nand, einfo->addr, &pos);
1465 + nanddev_offs_to_pos(nand, einfo->addr + einfo->len - 1, &last);
1466 + while (nanddev_pos_cmp(&pos, &last) <= 0) {
1467 + ret = nanddev_erase(nand, &pos);
1468 + if (ret) {
1469 + einfo->fail_addr = nanddev_pos_to_offs(nand, &pos);
1470 + einfo->state = MTD_ERASE_FAILED;
1471 + //printk("erase failed ....\n");
1472 + return ret;
1473 + }
1474 +
1475 + nanddev_pos_next_eraseblock(nand, &pos);
1476 + }
1477 +
1478 + einfo->state = MTD_ERASE_DONE;
1479 +
1480 + return 0;
1481 +}
1482 +EXPORT_SYMBOL_GPL(nanddev_mtd_erase);
1483 +
1484 +/**
1485 + * nanddev_init - Initialize a NAND device
1486 + * @nand: NAND device
1487 + * @memorg: NAND memory organization descriptor
1488 + * @ops: NAND device operations
1489 + *
1490 + * Initialize a NAND device object. Consistency checks are done on @memorg and
1491 + * @ops.
1492 + *
1493 + * Return: 0 in case of success, a negative error code otherwise.
1494 + */
1495 +int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
1496 + struct module *owner)
1497 +{
1498 + struct mtd_info *mtd = nanddev_to_mtd(nand);
1499 + struct nand_memory_organization *memorg = nanddev_get_memorg(nand);
1500 +
1501 + if (!nand || !ops)
1502 + return -EINVAL;
1503 +
1504 + if (!ops->erase || !ops->markbad || !ops->isbad)
1505 + return -EINVAL;
1506 +
1507 + if (!memorg->bits_per_cell || !memorg->pagesize ||
1508 + !memorg->pages_per_eraseblock || !memorg->eraseblocks_per_lun ||
1509 + !memorg->planes_per_lun || !memorg->luns_per_target ||
1510 + !memorg->ntargets)
1511 + return -EINVAL;
1512 +
1513 + nand->rowconv.eraseblock_addr_shift = fls(memorg->pagesize);
1514 + nand->rowconv.lun_addr_shift = fls(memorg->eraseblocks_per_lun) +
1515 + nand->rowconv.eraseblock_addr_shift;
1516 +
1517 + nand->ops = ops;
1518 +
1519 + mtd->type = memorg->bits_per_cell == 1 ?
1520 + MTD_NANDFLASH : MTD_MLCNANDFLASH;
1521 + mtd->flags = MTD_CAP_NANDFLASH;
1522 + mtd->erasesize = memorg->pagesize * memorg->pages_per_eraseblock;
1523 + mtd->writesize = memorg->pagesize;
1524 + mtd->writebufsize = memorg->pagesize;
1525 + mtd->oobsize = memorg->oobsize;
1526 + mtd->size = nanddev_size(nand);
1527 + mtd->owner = owner;
1528 +
1529 + return nanddev_bbt_init(nand);
1530 +}
1531 +EXPORT_SYMBOL_GPL(nanddev_init);
1532 +
1533 +void nanddev_cleanup(struct nand_device *nand)
1534 +{
1535 + if (nanddev_bbt_is_initialized(nand))
1536 + nanddev_bbt_cleanup(nand);
1537 +}
1538 +EXPORT_SYMBOL_GPL(nanddev_cleanup);
1539 --- /dev/null
1540 +++ b/include/linux/mtd/spinand.h
1541 @@ -0,0 +1,764 @@
1542 +/*
1543 + *
1544 + * Copyright (c) 2016-2017 Micron Technology, Inc.
1545 + *
1546 + * This program is free software; you can redistribute it and/or
1547 + * modify it under the terms of the GNU General Public License
1548 + * as published by the Free Software Foundation; either version 2
1549 + * of the License, or (at your option) any later version.
1550 + *
1551 + * This program is distributed in the hope that it will be useful,
1552 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1553 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1554 + * GNU General Public License for more details.
1555 + */
1556 +#ifndef __LINUX_MTD_SPINAND_H
1557 +#define __LINUX_MTD_SPINAND_H
1558 +
1559 +#include <linux/mutex.h>
1560 +#include <linux/bitops.h>
1561 +#include <linux/device.h>
1562 +#include <linux/mtd/mtd.h>
1563 +#include <linux/mtd/nand.h>
1564 +#include <linux/of.h>
1565 +
1566 +/**
1567 + * Standard SPI NAND flash commands
1568 + */
1569 +#define SPINAND_CMD_RESET 0xff
1570 +#define SPINAND_CMD_GET_FEATURE 0x0f
1571 +#define SPINAND_CMD_SET_FEATURE 0x1f
1572 +#define SPINAND_CMD_PAGE_READ 0x13
1573 +#define SPINAND_CMD_READ_FROM_CACHE 0x03
1574 +#define SPINAND_CMD_READ_FROM_CACHE_FAST 0x0b
1575 +#define SPINAND_CMD_READ_FROM_CACHE_X2 0x3b
1576 +#define SPINAND_CMD_READ_FROM_CACHE_DUAL_IO 0xbb
1577 +#define SPINAND_CMD_READ_FROM_CACHE_X4 0x6b
1578 +#define SPINAND_CMD_READ_FROM_CACHE_QUAD_IO 0xeb
1579 +#define SPINAND_CMD_BLK_ERASE 0xd8
1580 +#define SPINAND_CMD_PROG_EXC 0x10
1581 +#define SPINAND_CMD_PROG_LOAD 0x02
1582 +#define SPINAND_CMD_PROG_LOAD_RDM_DATA 0x84
1583 +#define SPINAND_CMD_PROG_LOAD_X4 0x32
1584 +#define SPINAND_CMD_PROG_LOAD_RDM_DATA_X4 0x34
1585 +#define SPINAND_CMD_READ_ID 0x9f
1586 +#define SPINAND_CMD_WR_DISABLE 0x04
1587 +#define SPINAND_CMD_WR_ENABLE 0x06
1588 +
1589 +/* feature register */
1590 +#define REG_BLOCK_LOCK 0xa0
1591 +#define REG_CFG 0xb0
1592 +#define REG_STATUS 0xc0
1593 +
1594 +/* status register */
1595 +#define STATUS_OIP_MASK BIT(0)
1596 +#define STATUS_CRBSY_MASK BIT(7)
1597 +#define STATUS_READY 0
1598 +#define STATUS_BUSY BIT(0)
1599 +
1600 +#define STATUS_E_FAIL_MASK BIT(2)
1601 +#define STATUS_E_FAIL BIT(2)
1602 +
1603 +#define STATUS_P_FAIL_MASK BIT(3)
1604 +#define STATUS_P_FAIL BIT(3)
1605 +
1606 +/* configuration register */
1607 +#define CFG_ECC_MASK BIT(4)
1608 +#define CFG_ECC_ENABLE BIT(4)
1609 +
1610 +/* block lock register */
1611 +#define BL_ALL_UNLOCKED 0X00
1612 +
1613 +struct spinand_op;
1614 +struct spinand_device;
1615 +struct nand_device;
1616 +
1617 +/**
1618 + * struct nand_memory_organization - memory organization structure
1619 + * @bits_per_cell: number of bits per NAND cell
1620 + * @pagesize: page size
1621 + * @oobsize: OOB area size
1622 + * @pages_per_eraseblock: number of pages per eraseblock
1623 + * @eraseblocks_per_die: number of eraseblocks per die
1624 + * @ndies: number of dies
1625 + */
1626 +struct nand_memory_organization {
1627 + unsigned int bits_per_cell;
1628 + unsigned int pagesize;
1629 + unsigned int oobsize;
1630 + unsigned int pages_per_eraseblock;
1631 + unsigned int eraseblocks_per_lun;
1632 + unsigned int planes_per_lun;
1633 + unsigned int luns_per_target;
1634 + unsigned int ntargets;
1635 +};
1636 +
1637 +#define NAND_MEMORG(bpc, ps, os, ppe, epl, ppl, lpt, nt) \
1638 + { \
1639 + .bits_per_cell = (bpc), \
1640 + .pagesize = (ps), \
1641 + .oobsize = (os), \
1642 + .pages_per_eraseblock = (ppe), \
1643 + .eraseblocks_per_lun = (epl), \
1644 + .planes_per_lun = (ppl), \
1645 + .luns_per_target = (lpt), \
1646 + .ntargets = (nt), \
1647 + }
1648 +
1649 +/**
1650 + * struct nand_bbt - bad block table structure
1651 + * @cache: in memory BBT cache
1652 + */
1653 +struct nand_bbt {
1654 + unsigned char *cache;
1655 +};
1656 +
1657 +struct nand_row_converter {
1658 + unsigned int lun_addr_shift;
1659 + unsigned int eraseblock_addr_shift;
1660 +};
1661 +
1662 +struct nand_pos {
1663 + unsigned int target;
1664 + unsigned int lun;
1665 + unsigned int plane;
1666 + unsigned int eraseblock;
1667 + unsigned int page;
1668 +};
1669 +
1670 +struct nand_page_io_req {
1671 + struct nand_pos pos;
1672 + unsigned int dataoffs;
1673 + unsigned int datalen;
1674 + union {
1675 + const void *out;
1676 + void *in;
1677 + } databuf;
1678 + unsigned int ooboffs;
1679 + unsigned int ooblen;
1680 + union {
1681 + const void *out;
1682 + void *in;
1683 + } oobbuf;
1684 +};
1685 +/**
1686 + * struct nand_ops - NAND operations
1687 + * @erase: erase a specific block
1688 + * @markbad: mark a specific block bad
1689 + */
1690 +struct nand_ops {
1691 + int (*erase)(struct nand_device *nand, const struct nand_pos *pos);
1692 + int (*markbad)(struct nand_device *nand, const struct nand_pos *pos);
1693 + bool (*isbad)(struct nand_device *nand, const struct nand_pos *pos);
1694 +};
1695 +
1696 +struct nand_ecc_req {
1697 + unsigned int strength;
1698 + unsigned int step_size;
1699 +};
1700 +
1701 +#define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) }
1702 +
1703 +struct nand_device{
1704 + struct mtd_info mtd;
1705 + struct nand_memory_organization memorg;
1706 + struct nand_ecc_req eccreq;
1707 + struct nand_row_converter rowconv;
1708 + struct nand_bbt bbt;
1709 + const struct nand_ops *ops;
1710 +};
1711 +
1712 +#define SPINAND_MAX_ID_LEN 4
1713 +
1714 +/**
1715 + * struct spinand_id - SPI NAND id structure
1716 + * @data: buffer containing the id bytes. Currently 4 bytes large, but can
1717 + * be extended if required.
1718 + * @len: ID length
1719 + */
1720 +struct spinand_id {
1721 + u8 data[SPINAND_MAX_ID_LEN];
1722 + int len;
1723 +};
1724 +
1725 +/**
1726 + * struct spinand_controller_ops - SPI NAND controller operations
1727 + * @exec_op: executute SPI NAND operation
1728 + */
1729 +struct spinand_controller_ops {
1730 + int (*exec_op)(struct spinand_device *spinand,
1731 + struct spinand_op *op);
1732 +};
1733 +
1734 +
1735 +/**
1736 + * struct manufacurer_ops - SPI NAND manufacturer specified operations
1737 + * @detect: detect SPI NAND device, should bot be NULL.
1738 + * ->detect() implementation for manufacturer A never sends
1739 + * any manufacturer specific SPI command to a SPI NAND from
1740 + * manufacturer B, so the proper way is to decode the raw id
1741 + * data in spinand->id.data first, if manufacture ID dismatch,
1742 + * return directly and let others to detect.
1743 + * @init: initialize SPI NAND device.
1744 + * @cleanup: clean SPI NAND device footprint.
1745 + * @prepare_op: prepara read/write operation.
1746 + */
1747 +struct spinand_manufacturer_ops {
1748 + bool (*detect)(struct spinand_device *spinand);
1749 + int (*init)(struct spinand_device *spinand);
1750 + void (*cleanup)(struct spinand_device *spinand);
1751 + void (*adjust_cache_op)(struct spinand_device *spinand,
1752 + const struct nand_page_io_req *req,
1753 + struct spinand_op *op);
1754 +};
1755 +
1756 +/**
1757 + * struct spinand_manufacturer - SPI NAND manufacturer instance
1758 + * @id: manufacturer ID
1759 + * @name: manufacturer name
1760 + * @ops: point to manufacturer operations
1761 + */
1762 +struct spinand_manufacturer {
1763 + u8 id;
1764 + char *name;
1765 + const struct spinand_manufacturer_ops *ops;
1766 +};
1767 +
1768 +extern const struct spinand_manufacturer micron_spinand_manufacturer;
1769 +extern const struct spinand_manufacturer etron_spinand_manufacturer;
1770 +extern const struct spinand_manufacturer paragon_spinand_manufacturer;
1771 +extern const struct spinand_manufacturer giga_spinand_manufacturer;
1772 +
1773 +#define SPINAND_CAP_RD_X1 BIT(0)
1774 +#define SPINAND_CAP_RD_X2 BIT(1)
1775 +#define SPINAND_CAP_RD_X4 BIT(2)
1776 +#define SPINAND_CAP_RD_DUAL BIT(3)
1777 +#define SPINAND_CAP_RD_QUAD BIT(4)
1778 +#define SPINAND_CAP_WR_X1 BIT(5)
1779 +#define SPINAND_CAP_WR_X2 BIT(6)
1780 +#define SPINAND_CAP_WR_X4 BIT(7)
1781 +#define SPINAND_CAP_WR_DUAL BIT(8)
1782 +#define SPINAND_CAP_WR_QUAD BIT(9)
1783 +
1784 +/**
1785 + * struct spinand_controller - SPI NAND controller instance
1786 + * @ops: point to controller operations
1787 + * @caps: controller capabilities
1788 + */
1789 +struct spinand_controller {
1790 + struct spinand_controller_ops *ops;
1791 + u32 caps;
1792 +};
1793 +
1794 +/**
1795 + * struct spinand_device - SPI NAND device instance
1796 + * @base: NAND device instance
1797 + * @bbp: internal bad block pattern descriptor
1798 + * @lock: protection lock
1799 + * @id: ID structure
1800 + * @read_cache_op: Opcode of read from cache
1801 + * @write_cache_op: Opcode of program load
1802 + * @buf: buffer for read/write data
1803 + * @oobbuf: buffer for read/write oob
1804 + * @rw_mode: read/write mode of SPI NAND device
1805 + * @controller: SPI NAND controller instance
1806 + * @manufacturer: SPI NAND manufacturer instance, describe
1807 + * manufacturer related objects
1808 + */
1809 +struct spinand_device {
1810 + struct nand_device base;
1811 + struct mutex lock;
1812 + struct spinand_id id;
1813 + u8 read_cache_op;
1814 + u8 write_cache_op;
1815 + u8 *buf;
1816 + u8 *oobbuf;
1817 + u32 rw_mode;
1818 + struct {
1819 + struct spinand_controller *controller;
1820 + void *priv;
1821 + } controller;
1822 + struct {
1823 + const struct spinand_manufacturer *manu;
1824 + void *priv;
1825 + } manufacturer;
1826 +};
1827 +
1828 +/**
1829 + * struct nand_io_iter - NAND I/O iterator
1830 + * @req: current I/O request
1831 + * @oobbytes_per_page: maximun oob bytes per page
1832 + * @dataleft: remaining number of data bytes to read/write
1833 + * @oobleft: remaining number of OOB bytes to read/write
1834 + */
1835 +struct nand_io_iter {
1836 + struct nand_page_io_req req;
1837 + unsigned int oobbytes_per_page;
1838 + unsigned int dataleft;
1839 + unsigned int oobleft;
1840 +};
1841 +
1842 +/**
1843 + * mtd_to_nanddev - Get the NAND device attached to the MTD instance
1844 + * @mtd: MTD instance
1845 + *
1846 + * Return: the NAND device embedding @mtd.
1847 + */
1848 +static inline struct nand_device *mtd_to_nanddev(struct mtd_info *mtd)
1849 +{
1850 + return container_of(mtd, struct nand_device, mtd);
1851 +}
1852 +/**
1853 + * nanddev_to_mtd - Get the MTD device attached to a NAND device
1854 + * @nand: NAND device
1855 + *
1856 + * Return: the MTD device embedded in @nand.
1857 + */
1858 +static inline struct mtd_info *nanddev_to_mtd(struct nand_device *nand)
1859 +{
1860 + return &nand->mtd;
1861 +}
1862 +
1863 +/**
1864 + * mtd_to_spinand - Get the SPI NAND device attached to the MTD instance
1865 + * @mtd: MTD instance
1866 + *
1867 + * Returns the SPI NAND device attached to @mtd.
1868 + */
1869 +static inline struct spinand_device *mtd_to_spinand(struct mtd_info *mtd)
1870 +{
1871 + return container_of(mtd_to_nanddev(mtd), struct spinand_device, base);
1872 +}
1873 +
1874 +/**
1875 + * spinand_to_mtd - Get the MTD device attached to the SPI NAND device
1876 + * @spinand: SPI NAND device
1877 + *
1878 + * Returns the MTD device attached to @spinand.
1879 + */
1880 +static inline struct mtd_info *spinand_to_mtd(struct spinand_device *spinand)
1881 +{
1882 + return nanddev_to_mtd(&spinand->base);
1883 +}
1884 +
1885 +/**
1886 + * nand_to_spinand - Get the SPI NAND device embedding an NAND object
1887 + * @nand: NAND object
1888 + *
1889 + * Returns the SPI NAND device embedding @nand.
1890 + */
1891 +static inline struct spinand_device *nand_to_spinand(struct nand_device *nand)
1892 +{
1893 + return container_of(nand, struct spinand_device, base);
1894 +}
1895 +
1896 +/**
1897 + * spinand_to_nand - Get the NAND device embedded in a SPI NAND object
1898 + * @spinand: SPI NAND device
1899 + *
1900 + * Returns the NAND device embedded in @spinand.
1901 + */
1902 +static inline struct nand_device *
1903 +spinand_to_nand(struct spinand_device *spinand)
1904 +{
1905 + return &spinand->base;
1906 +}
1907 +
1908 +/**
1909 + * nanddev_set_of_node - Attach a DT node to a NAND device
1910 + * @nand: NAND device
1911 + * @np: DT node
1912 + *
1913 + * Attach a DT node to a NAND device.
1914 + */
1915 +static inline void nanddev_set_of_node(struct nand_device *nand,
1916 + struct device_node *np)
1917 +{
1918 + mtd_set_of_node(&nand->mtd, np);
1919 +}
1920 +
1921 +/**
1922 + * spinand_set_of_node - Attach a DT node to a SPI NAND device
1923 + * @spinand: SPI NAND device
1924 + * @np: DT node
1925 + *
1926 + * Attach a DT node to a SPI NAND device.
1927 + */
1928 +static inline void spinand_set_of_node(struct spinand_device *spinand,
1929 + struct device_node *np)
1930 +{
1931 + nanddev_set_of_node(&spinand->base, np);
1932 +}
1933 +
1934 +#define SPINAND_MAX_ADDR_LEN 4
1935 +
1936 +/**
1937 + * struct spinand_op - SPI NAND operation description
1938 + * @cmd: opcode to send
1939 + * @n_addr: address bytes
1940 + * @addr_nbits: number of bit used to transfer address
1941 + * @dummy_types: dummy bytes followed address
1942 + * @addr: address or dummy bytes buffer
1943 + * @n_tx: size of tx_buf
1944 + * @tx_buf: data to be written
1945 + * @n_rx: size of rx_buf
1946 + * @rx_buf: data to be read
1947 + * @data_nbits: number of bit used to transfer data
1948 + */
1949 +struct spinand_op {
1950 + u8 cmd;
1951 + u8 n_addr;
1952 + u8 addr_nbits;
1953 + u8 dummy_bytes;
1954 + u8 addr[SPINAND_MAX_ADDR_LEN];
1955 + u32 n_tx;
1956 + const u8 *tx_buf;
1957 + u32 n_rx;
1958 + u8 *rx_buf;
1959 + u8 data_nbits;
1960 +};
1961 +/**
1962 + * nanddev_neraseblocks - Get the total number of erasablocks
1963 + * @nand: NAND device
1964 + *
1965 + * Return: the number of eraseblocks exposed by @nand.
1966 + */
1967 +static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand)
1968 +{
1969 + return (u64)nand->memorg.luns_per_target *
1970 + nand->memorg.eraseblocks_per_lun *
1971 + nand->memorg.ntargets;
1972 +}
1973 +
1974 +/* BBT related functions */
1975 +enum nand_bbt_block_status {
1976 + NAND_BBT_BLOCK_STATUS_UNKNOWN,
1977 + NAND_BBT_BLOCK_GOOD,
1978 + NAND_BBT_BLOCK_WORN,
1979 + NAND_BBT_BLOCK_RESERVED,
1980 + NAND_BBT_BLOCK_FACTORY_BAD,
1981 + NAND_BBT_BLOCK_NUM_STATUS,
1982 +};
1983 +int nanddev_bbt_init(struct nand_device *nand);
1984 +void nanddev_bbt_cleanup(struct nand_device *nand);
1985 +int nanddev_bbt_update(struct nand_device *nand);
1986 +int nanddev_bbt_get_block_status(const struct nand_device *nand,
1987 + unsigned int entry);
1988 +int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry,
1989 + enum nand_bbt_block_status status);
1990 +
1991 +/* SPI NAND supported OP mode */
1992 +#define SPINAND_RD_X1 BIT(0)
1993 +#define SPINAND_RD_X2 BIT(1)
1994 +#define SPINAND_RD_X4 BIT(2)
1995 +#define SPINAND_RD_DUAL BIT(3)
1996 +#define SPINAND_RD_QUAD BIT(4)
1997 +#define SPINAND_WR_X1 BIT(5)
1998 +#define SPINAND_WR_X2 BIT(6)
1999 +#define SPINAND_WR_X4 BIT(7)
2000 +#define SPINAND_WR_DUAL BIT(8)
2001 +#define SPINAND_WR_QUAD BIT(9)
2002 +
2003 +#define SPINAND_RD_COMMON (SPINAND_RD_X1 | SPINAND_RD_X2 | \
2004 + SPINAND_RD_X4 | SPINAND_RD_DUAL | \
2005 + SPINAND_RD_QUAD)
2006 +#define SPINAND_WR_COMMON (SPINAND_WR_X1 | SPINAND_WR_X4)
2007 +#define SPINAND_RW_COMMON (SPINAND_RD_COMMON | SPINAND_WR_COMMON)
2008 +
2009 +struct spinand_device *devm_spinand_alloc(struct device *dev);
2010 +int spinand_init(struct spinand_device *spinand, struct module *owner);
2011 +void spinand_cleanup(struct spinand_device *spinand);
2012 +
2013 +/**
2014 + * nanddev_page_size - Get NAND page size
2015 + * @nand: NAND device
2016 + *
2017 + * Return: the page size.
2018 + */
2019 +static inline size_t nanddev_page_size(const struct nand_device *nand)
2020 +{
2021 + return nand->memorg.pagesize;
2022 +}
2023 +
2024 +/**
2025 + * nanddev_per_page_oobsize - Get NAND OOB size
2026 + * @nand: NAND device
2027 + *
2028 + * Return: the OOB size.
2029 + */
2030 +static inline unsigned int
2031 +nanddev_per_page_oobsize(const struct nand_device *nand)
2032 +{
2033 + return nand->memorg.oobsize;
2034 +}
2035 +
2036 +/**
2037 + * nanddev_pages_per_eraseblock - Get the number of pages per eraseblock
2038 + * @nand: NAND device
2039 + *
2040 + * Return: the number of pages per eraseblock.
2041 + */
2042 +static inline unsigned int
2043 +nanddev_pages_per_eraseblock(const struct nand_device *nand)
2044 +{
2045 + return nand->memorg.pages_per_eraseblock;
2046 +}
2047 +
2048 +/**
2049 + * nanddev_per_page_oobsize - Get NAND erase block size
2050 + * @nand: NAND device
2051 + *
2052 + * Return: the eraseblock size.
2053 + */
2054 +static inline size_t nanddev_eraseblock_size(const struct nand_device *nand)
2055 +{
2056 + return nand->memorg.pagesize * nand->memorg.pages_per_eraseblock;
2057 +}
2058 +
2059 +static inline u64 nanddev_target_size(const struct nand_device *nand)
2060 +{
2061 + return (u64)nand->memorg.luns_per_target *
2062 + nand->memorg.eraseblocks_per_lun *
2063 + nand->memorg.pages_per_eraseblock *
2064 + nand->memorg.pagesize;
2065 +}
2066 +
2067 +/**
2068 + * nanddev_ntarget - Get the total of targets
2069 + * @nand: NAND device
2070 + *
2071 + * Return: the number of dies exposed by @nand.
2072 + */
2073 +static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
2074 +{
2075 + return nand->memorg.ntargets;
2076 +}
2077 +
2078 +/**
2079 + * nanddev_size - Get NAND size
2080 + * @nand: NAND device
2081 + *
2082 + * Return: the total size exposed of @nand.
2083 + */
2084 +static inline u64 nanddev_size(const struct nand_device *nand)
2085 +{
2086 + return nanddev_target_size(nand) * nanddev_ntargets(nand);
2087 +}
2088 +
2089 +/**
2090 + * nanddev_get_memorg - Extract memory organization info from a NAND device
2091 + * @nand: NAND device
2092 + *
2093 + * This can be used by the upper layer to fill the memorg info before calling
2094 + * nanddev_init().
2095 + *
2096 + * Return: the memorg object embedded in the NAND device.
2097 + */
2098 +static inline struct nand_memory_organization *
2099 +nanddev_get_memorg(struct nand_device *nand)
2100 +{
2101 + return &nand->memorg;
2102 +}
2103 +
2104 +
2105 +static inline unsigned int nanddev_pos_to_row(struct nand_device *nand,
2106 + const struct nand_pos *pos)
2107 +{
2108 + return (pos->lun << nand->rowconv.lun_addr_shift) |
2109 + (pos->eraseblock << nand->rowconv.eraseblock_addr_shift) |
2110 + pos->page;
2111 +}
2112 +
2113 +
2114 +static inline unsigned int nanddev_offs_to_pos(struct nand_device *nand,
2115 + loff_t offs,
2116 + struct nand_pos *pos)
2117 +{
2118 + unsigned int pageoffs;
2119 + u64 tmp = offs;
2120 +
2121 + pageoffs = do_div(tmp, nand->memorg.pagesize);
2122 + pos->page = do_div(tmp, nand->memorg.pages_per_eraseblock);
2123 + pos->eraseblock = do_div(tmp, nand->memorg.eraseblocks_per_lun);
2124 + pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
2125 + pos->lun = do_div(tmp, nand->memorg.luns_per_target);
2126 + pos->target = tmp;
2127 +
2128 + return pageoffs;
2129 +}
2130 +
2131 +static inline int nanddev_pos_cmp(const struct nand_pos *a,
2132 + const struct nand_pos *b)
2133 +{
2134 + if (a->target != b->target)
2135 + return a->target < b->target ? -1 : 1;
2136 +
2137 + if (a->lun != b->lun)
2138 + return a->lun < b->lun ? -1 : 1;
2139 +
2140 + if (a->eraseblock != b->eraseblock)
2141 + return a->eraseblock < b->eraseblock ? -1 : 1;
2142 +
2143 + if (a->page != b->page)
2144 + return a->page < b->page ? -1 : 1;
2145 +
2146 + return 0;
2147 +}
2148 +
2149 +static inline void nanddev_pos_next_target(struct nand_device *nand,
2150 + struct nand_pos *pos)
2151 +{
2152 + pos->page = 0;
2153 + pos->plane = 0;
2154 + pos->eraseblock = 0;
2155 + pos->lun = 0;
2156 + pos->target++;
2157 +}
2158 +
2159 +static inline void nanddev_pos_next_lun(struct nand_device *nand,
2160 + struct nand_pos *pos)
2161 +{
2162 + if (pos->lun >= nand->memorg.luns_per_target - 1)
2163 + return nanddev_pos_next_target(nand, pos);
2164 +
2165 + pos->lun++;
2166 + pos->page = 0;
2167 + pos->plane = 0;
2168 + pos->eraseblock = 0;
2169 +}
2170 +
2171 +static inline void nanddev_pos_next_eraseblock(struct nand_device *nand,
2172 + struct nand_pos *pos)
2173 +{
2174 + if (pos->eraseblock >= nand->memorg.eraseblocks_per_lun - 1)
2175 + return nanddev_pos_next_lun(nand, pos);
2176 +
2177 + pos->eraseblock++;
2178 + pos->page = 0;
2179 + pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
2180 +}
2181 +
2182 +static inline loff_t nanddev_pos_to_offs(struct nand_device *nand,
2183 + const struct nand_pos *pos)
2184 +{
2185 + unsigned int npages;
2186 +
2187 + npages = pos->page +
2188 + ((pos->eraseblock +
2189 + (pos->lun +
2190 + (pos->target * nand->memorg.luns_per_target)) *
2191 + nand->memorg.eraseblocks_per_lun) *
2192 + nand->memorg.pages_per_eraseblock);
2193 +
2194 + return (loff_t)npages * nand->memorg.pagesize;
2195 +}
2196 +
2197 +static inline void nanddev_pos_next_page(struct nand_device *nand,
2198 + struct nand_pos *pos)
2199 +{
2200 + if (pos->page >= nand->memorg.pages_per_eraseblock - 1)
2201 + return nanddev_pos_next_eraseblock(nand, pos);
2202 +
2203 + pos->page++;
2204 +}
2205 +
2206 +/**
2207 + * nand_io_iter_init - Initialize a NAND I/O iterator
2208 + * @nand: NAND device
2209 + * @offs: absolute offset
2210 + * @req: MTD request
2211 + * @iter: page iterator
2212 + */
2213 +static inline void nanddev_io_iter_init(struct nand_device *nand,
2214 + loff_t offs, struct mtd_oob_ops *req,
2215 + struct nand_io_iter *iter)
2216 +{
2217 + struct mtd_info *mtd = nanddev_to_mtd(nand);
2218 +
2219 + iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
2220 + iter->req.ooboffs = req->ooboffs;
2221 + iter->oobbytes_per_page = mtd_oobavail(mtd, req);
2222 + iter->dataleft = req->len;
2223 + iter->oobleft = req->ooblen;
2224 + iter->req.databuf.in = req->datbuf;
2225 + iter->req.datalen = min_t(unsigned int,
2226 + nand->memorg.pagesize - iter->req.dataoffs,
2227 + iter->dataleft);
2228 + iter->req.oobbuf.in = req->oobbuf;
2229 + iter->req.ooblen = min_t(unsigned int,
2230 + iter->oobbytes_per_page - iter->req.ooboffs,
2231 + iter->oobleft);
2232 +}
2233 +
2234 +/**
2235 + * nand_io_iter_next_page - Move to the next page
2236 + * @nand: NAND device
2237 + * @iter: page iterator
2238 + */
2239 +static inline void nanddev_io_iter_next_page(struct nand_device *nand,
2240 + struct nand_io_iter *iter)
2241 +{
2242 + nanddev_pos_next_page(nand, &iter->req.pos);
2243 + iter->dataleft -= iter->req.datalen;
2244 + iter->req.databuf.in += iter->req.datalen;
2245 + iter->oobleft -= iter->req.ooblen;
2246 + iter->req.oobbuf.in += iter->req.ooblen;
2247 + iter->req.dataoffs = 0;
2248 + iter->req.ooboffs = 0;
2249 + iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize,
2250 + iter->dataleft);
2251 + iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page,
2252 + iter->oobleft);
2253 +}
2254 +
2255 +/**
2256 + * nand_io_iter_end - Should end iteration or not
2257 + * @nand: NAND device
2258 + * @iter: page iterator
2259 + */
2260 +static inline bool nanddev_io_iter_end(struct nand_device *nand,
2261 + const struct nand_io_iter *iter)
2262 +{
2263 + if (iter->dataleft || iter->oobleft)
2264 + return false;
2265 +
2266 + return true;
2267 +}
2268 +
2269 +/**
2270 + * nand_io_for_each_page - Iterate over all NAND pages contained in an MTD I/O
2271 + * request
2272 + * @nand: NAND device
2273 + * @start: start address to read/write
2274 + * @req: MTD I/O request
2275 + * @iter: page iterator
2276 + */
2277 +#define nanddev_io_for_each_page(nand, start, req, iter) \
2278 + for (nanddev_io_iter_init(nand, start, req, iter); \
2279 + !nanddev_io_iter_end(nand, iter); \
2280 + nanddev_io_iter_next_page(nand, iter))
2281 +
2282 +static inline unsigned int nanddev_bbt_pos_to_entry(struct nand_device *nand,
2283 + const struct nand_pos *pos)
2284 +{
2285 + return pos->eraseblock;
2286 +}
2287 +
2288 +static inline bool nanddev_bbt_is_initialized(struct nand_device *nand)
2289 +{
2290 + return !!nand->bbt.cache;
2291 +}
2292 +
2293 +int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
2294 + struct module *owner);
2295 +void nanddev_cleanup(struct nand_device *nand);
2296 +bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos);
2297 +bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos);
2298 +int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos);
2299 +int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos);
2300 +
2301 +/* MTD -> NAND helper functions. */
2302 +int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo);
2303 +
2304 +
2305 +#endif /* __LINUX_MTD_SPINAND_H */
2306 --- /dev/null
2307 +++ b/drivers/mtd/nand/spi_nand/generic-spinand-controller.c
2308 @@ -0,0 +1,182 @@
2309 +/*
2310 + * Copyright (c) 2016-2017 Micron Technology, Inc.
2311 + *
2312 + * This program is free software; you can redistribute it and/or
2313 + * modify it under the terms of the GNU General Public License
2314 + * as published by the Free Software Foundation; either version 2
2315 + * of the License, or (at your option) any later version.
2316 + *
2317 + * This program is distributed in the hope that it will be useful,
2318 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
2319 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2320 + * GNU General Public License for more details.
2321 + */
2322 +#include <linux/kernel.h>
2323 +#include <linux/module.h>
2324 +#include <linux/spi/spi.h>
2325 +#include <linux/mtd/mtd.h>
2326 +#include <linux/mtd/spinand.h>
2327 +
2328 +struct gen_spinand_controller {
2329 + struct spinand_controller ctrl;
2330 + struct spi_device *spi;
2331 +};
2332 +
2333 +#define to_gen_spinand_controller(c) \
2334 + container_of(c, struct gen_spinand_controller, ctrl)
2335 +
2336 +/*
2337 + * gen_spinand_controller_exec_op - to process a command to send to the
2338 + * SPI NAND by generic SPI bus
2339 + * @spinand: SPI NAND device structure
2340 + * @op: SPI NAND operation descriptor
2341 + */
2342 +static int gen_spinand_controller_exec_op(struct spinand_device *spinand,
2343 + struct spinand_op *op)
2344 +{
2345 + struct spi_message message;
2346 + struct spi_transfer x[3];
2347 + struct spinand_controller *spinand_controller;
2348 + struct gen_spinand_controller *controller;
2349 +
2350 + spinand_controller = spinand->controller.controller;
2351 + controller = to_gen_spinand_controller(spinand_controller);
2352 + spi_message_init(&message);
2353 + memset(x, 0, sizeof(x));
2354 + x[0].len = 1;
2355 + x[0].tx_nbits = 1;
2356 + x[0].tx_buf = &op->cmd;
2357 + spi_message_add_tail(&x[0], &message);
2358 +
2359 + if (op->n_addr + op->dummy_bytes) {
2360 + x[1].len = op->n_addr + op->dummy_bytes;
2361 + x[1].tx_nbits = op->addr_nbits;
2362 + x[1].tx_buf = op->addr;
2363 + //printk("cmd:%2X,naddr:%d,[%2X][%2X][%2X]\n",op->cmd,op->n_addr,op->addr[0],op->addr[1],op->addr[2]);
2364 + spi_message_add_tail(&x[1], &message);
2365 + }
2366 +
2367 + if (op->n_tx) {
2368 + x[2].len = op->n_tx;
2369 + x[2].tx_nbits = op->data_nbits;
2370 + x[2].tx_buf = op->tx_buf;
2371 + spi_message_add_tail(&x[2], &message);
2372 + } else if (op->n_rx) {
2373 + x[2].len = op->n_rx;
2374 + x[2].rx_nbits = op->data_nbits;
2375 + x[2].rx_buf = op->rx_buf;
2376 + spi_message_add_tail(&x[2], &message);
2377 + }
2378 +
2379 + return spi_sync(controller->spi, &message);
2380 +}
2381 +
2382 +static struct spinand_controller_ops gen_spinand_controller_ops = {
2383 + .exec_op = gen_spinand_controller_exec_op,
2384 +};
2385 +extern int read_test(struct mtd_info *mtd,loff_t from,size_t len);
2386 +extern int erase_test(struct mtd_info *mtd,uint64_t from,uint64_t len);
2387 +extern int write_test(struct mtd_info *mtd,loff_t to,size_t len);
2388 +extern int spinand_bbt_create(struct nand_device *nand );
2389 +extern int mark_bad_test(struct mtd_info *mtd,loff_t offs);
2390 +static int gen_spinand_controller_probe(struct spi_device *spi)
2391 +{
2392 + struct spinand_device *spinand;
2393 + struct gen_spinand_controller *controller;
2394 + struct spinand_controller *spinand_controller;
2395 + struct device *dev = &spi->dev;
2396 + u16 mode = spi->mode;
2397 + int ret;
2398 +
2399 + spinand = devm_spinand_alloc(dev);
2400 + if (IS_ERR(spinand)) {
2401 + ret = PTR_ERR(spinand);
2402 + goto out;
2403 + }
2404 +
2405 + controller = devm_kzalloc(dev, sizeof(*controller), GFP_KERNEL);
2406 + if (!controller) {
2407 + ret = -ENOMEM;
2408 + goto out;
2409 + }
2410 +
2411 + controller->spi = spi;
2412 + spinand_controller = &controller->ctrl;
2413 + spinand_controller->ops = &gen_spinand_controller_ops;
2414 + spinand_controller->caps = SPINAND_CAP_RD_X1 | SPINAND_CAP_WR_X1;
2415 +
2416 + if ((mode & SPI_RX_QUAD) && (mode & SPI_TX_QUAD))
2417 + spinand_controller->caps |= SPINAND_CAP_RD_QUAD;
2418 +
2419 + if ((mode & SPI_RX_DUAL) && (mode & SPI_TX_DUAL))
2420 + spinand_controller->caps |= SPINAND_CAP_RD_DUAL;
2421 +
2422 + if (mode & SPI_RX_QUAD)
2423 + spinand_controller->caps |= SPINAND_CAP_RD_X4;
2424 +
2425 + if (mode & SPI_RX_DUAL)
2426 + spinand_controller->caps |= SPINAND_CAP_RD_X2;
2427 +
2428 + if (mode & SPI_TX_QUAD)
2429 + spinand_controller->caps |= SPINAND_CAP_WR_QUAD |
2430 + SPINAND_CAP_WR_X4;
2431 +
2432 + if (mode & SPI_TX_DUAL)
2433 + spinand_controller->caps |= SPINAND_CAP_WR_DUAL |
2434 + SPINAND_CAP_WR_X2;
2435 +
2436 + spinand->controller.controller = spinand_controller;
2437 + spi_set_drvdata(spi, spinand);
2438 +
2439 + ret = spinand_init(spinand, THIS_MODULE);
2440 + if (ret)
2441 + goto out;
2442 +
2443 + ret = mtd_device_register(spinand_to_mtd(spinand), NULL, 0);
2444 + struct nand_device *nand =spinand_to_nand(spinand);
2445 + spinand_bbt_create(nand);
2446 + //mark_bad_test(spinand_to_mtd(spinand),0x00);
2447 + /*
2448 + int i=0,status=0;
2449 + unsigned int entry=0;
2450 + struct nand_pos pos;
2451 + for(i=0;i<1024;i++){
2452 +
2453 + erase_test(spinand_to_mtd(spinand),i*0x20000,0x20000);
2454 + }*/
2455 + //erase_test(spinand_to_mtd(spinand),0x00,0x20000);
2456 + //write_test(spinand_to_mtd(spinand),0x00,2048);
2457 + //read_test(spinand_to_mtd(spinand),0x00,2048);
2458 + //mark_bad_test(spinand_to_mtd(spinand),0);
2459 + //read_test(spinand_to_mtd(spinand),0x00,2048);
2460 +out:
2461 + return ret;
2462 +}
2463 +
2464 +static int gen_spinand_controller_remove(struct spi_device *spi)
2465 +{
2466 + struct spinand_device *spinand = spi_get_drvdata(spi);
2467 + int ret;
2468 +
2469 + ret = mtd_device_unregister(spinand_to_mtd(spinand));
2470 + if (ret)
2471 + return ret;
2472 +
2473 + spinand_cleanup(spinand);
2474 +
2475 + return 0;
2476 +}
2477 +
2478 +static struct spi_driver gen_spinand_controller_driver = {
2479 + .driver = {
2480 + .name = "generic-spinand-controller",
2481 + .owner = THIS_MODULE,
2482 + },
2483 + .probe = gen_spinand_controller_probe,
2484 + .remove = gen_spinand_controller_remove,
2485 +};
2486 +module_spi_driver(gen_spinand_controller_driver);
2487 +
2488 +MODULE_DESCRIPTION("Generic SPI NAND controller");
2489 +MODULE_AUTHOR("Peter Pan <peterpandong@micron.com>");
2490 +MODULE_LICENSE("GPL v2");
2491 --- /dev/null
2492 +++ b/drivers/mtd/nand/spi_nand/gigadevice.c
2493 @@ -0,0 +1,142 @@
2494 +/*
2495 + *
2496 + * Copyright (c) 2016-2017 Micron Technology, Inc.
2497 + *
2498 + * This program is free software; you can redistribute it and/or
2499 + * modify it under the terms of the GNU General Public License
2500 + * as published by the Free Software Foundation; either version 2
2501 + * of the License, or (at your option) any later version.
2502 + *
2503 + * This program is distributed in the hope that it will be useful,
2504 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
2505 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2506 + * GNU General Public License for more details.
2507 + */
2508 +
2509 +#include <linux/device.h>
2510 +#include <linux/kernel.h>
2511 +#include <linux/mtd/spinand.h>
2512 +
2513 +#define SPINAND_MFR_GIGA 0xC8
2514 +
2515 +struct giga_spinand_info {
2516 + char *name;
2517 + u8 dev_id;
2518 + struct nand_memory_organization memorg;
2519 + struct nand_ecc_req eccreq;
2520 + unsigned int rw_mode;
2521 +};
2522 +
2523 +#define GIGA_SPI_NAND_INFO(nm, did, mo, er, rwm) \
2524 + { \
2525 + .name = (nm), \
2526 + .dev_id = (did), \
2527 + .memorg = mo, \
2528 + .eccreq = er, \
2529 + .rw_mode = (rwm) \
2530 + }
2531 +
2532 +static const struct giga_spinand_info giga_spinand_table[] = {
2533 + GIGA_SPI_NAND_INFO("GIGAxxxx", 0xB1,
2534 + NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
2535 + NAND_ECCREQ(8, 512),
2536 + SPINAND_RW_COMMON),
2537 +};
2538 +
2539 +static int giga_spinand_get_dummy(struct spinand_device *spinand,
2540 + struct spinand_op *op)
2541 +{
2542 + u8 opcode = op->cmd;
2543 +
2544 + switch (opcode) {
2545 + case SPINAND_CMD_READ_FROM_CACHE_FAST:
2546 + case SPINAND_CMD_READ_FROM_CACHE:
2547 + case SPINAND_CMD_READ_FROM_CACHE_X2:
2548 + case SPINAND_CMD_READ_FROM_CACHE_DUAL_IO:
2549 + case SPINAND_CMD_READ_FROM_CACHE_X4:
2550 + case SPINAND_CMD_READ_ID:
2551 + return 1;
2552 + case SPINAND_CMD_READ_FROM_CACHE_QUAD_IO:
2553 + return 2;
2554 +
2555 + default:
2556 + return 0;
2557 + }
2558 +}
2559 +
2560 +/**
2561 + * giga_spinand_scan_id_table - scan SPI NAND info in id table
2562 + * @spinand: SPI NAND device structure
2563 + * @id: point to manufacture id and device id
2564 + * Description:
2565 + * If found in id table, config device with table information.
2566 + */
2567 +static bool giga_spinand_scan_id_table(struct spinand_device *spinand,
2568 + u8 dev_id)
2569 +{
2570 + struct mtd_info *mtd = spinand_to_mtd(spinand);
2571 + struct nand_device *nand = mtd_to_nanddev(mtd);
2572 + struct giga_spinand_info *item;
2573 + unsigned int i;
2574 +
2575 + for (i = 0; i < ARRAY_SIZE(giga_spinand_table); i++) {
2576 + item = (struct giga_spinand_info *)giga_spinand_table + i;
2577 + if (dev_id != item->dev_id)
2578 + continue;
2579 +
2580 + nand->memorg = item->memorg;
2581 + nand->eccreq = item->eccreq;
2582 + spinand->rw_mode = item->rw_mode;
2583 +
2584 + return true;
2585 + }
2586 +
2587 + return false;
2588 +}
2589 +
2590 +/**
2591 + * giga_spinand_detect - initialize device related part in spinand_device
2592 + * struct if it is Micron device.
2593 + * @spinand: SPI NAND device structure
2594 + */
2595 +static bool giga_spinand_detect(struct spinand_device *spinand)
2596 +{
2597 + u8 *id = spinand->id.data;
2598 +
2599 + /*
2600 + * Micron SPI NAND read ID need a dummy byte,
2601 + * so the first byte in raw_id is dummy.
2602 + */
2603 + if (id[0] != SPINAND_MFR_GIGA)
2604 + return false;
2605 +
2606 + return giga_spinand_scan_id_table(spinand, id[1]);
2607 +}
2608 +
2609 +/**
2610 + * giga_spinand_prepare_op - Fix address for cache operation.
2611 + * @spinand: SPI NAND device structure
2612 + * @op: pointer to spinand_op struct
2613 + * @page: page address
2614 + * @column: column address
2615 + */
2616 +static void giga_spinand_adjust_cache_op(struct spinand_device *spinand,
2617 + const struct nand_page_io_req *req,
2618 + struct spinand_op *op)
2619 +{
2620 + struct nand_device *nand = spinand_to_nand(spinand);
2621 + unsigned int shift;
2622 +
2623 + op->dummy_bytes = giga_spinand_get_dummy(spinand, op);
2624 +}
2625 +
2626 +static const struct spinand_manufacturer_ops giga_spinand_manuf_ops = {
2627 + .detect = giga_spinand_detect,
2628 + .adjust_cache_op = giga_spinand_adjust_cache_op,
2629 +};
2630 +
2631 +const struct spinand_manufacturer giga_spinand_manufacturer = {
2632 + .id = SPINAND_MFR_GIGA,
2633 + .name = "Giga",
2634 + .ops = &giga_spinand_manuf_ops,
2635 +};
2636 --- /dev/null
2637 +++ b/drivers/mtd/nand/spi_nand/paragon.c
2638 @@ -0,0 +1,147 @@
2639 +/*
2640 + *
2641 + * Copyright (c) 2016-2017 Micron Technology, Inc.
2642 + *
2643 + * This program is free software; you can redistribute it and/or
2644 + * modify it under the terms of the GNU General Public License
2645 + * as published by the Free Software Foundation; either version 2
2646 + * of the License, or (at your option) any later version.
2647 + *
2648 + * This program is distributed in the hope that it will be useful,
2649 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
2650 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2651 + * GNU General Public License for more details.
2652 + */
2653 +
2654 +#include <linux/device.h>
2655 +#include <linux/kernel.h>
2656 +#include <linux/mtd/spinand.h>
2657 +
2658 +#define SPINAND_MFR_PARAGON 0xA1
2659 +
2660 +struct paragon_spinand_info {
2661 + char *name;
2662 + u8 dev_id;
2663 + struct nand_memory_organization memorg;
2664 + struct nand_ecc_req eccreq;
2665 + unsigned int rw_mode;
2666 +};
2667 +
2668 +#define PARAGON_SPI_NAND_INFO(nm, did, mo, er, rwm) \
2669 + { \
2670 + .name = (nm), \
2671 + .dev_id = (did), \
2672 + .memorg = mo, \
2673 + .eccreq = er, \
2674 + .rw_mode = (rwm) \
2675 + }
2676 +
2677 +static const struct paragon_spinand_info paragon_spinand_table[] = {
2678 + PARAGON_SPI_NAND_INFO("PARAGONxxxx", 0xe1,
2679 + NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
2680 + NAND_ECCREQ(8, 512),
2681 + SPINAND_RW_COMMON),
2682 +};
2683 +
2684 +static int paragon_spinand_get_dummy(struct spinand_device *spinand,
2685 + struct spinand_op *op)
2686 +{
2687 + u8 opcode = op->cmd;
2688 +
2689 + switch (opcode) {
2690 + case SPINAND_CMD_READ_FROM_CACHE_FAST:
2691 + case SPINAND_CMD_READ_FROM_CACHE:
2692 + case SPINAND_CMD_READ_FROM_CACHE_X2:
2693 + case SPINAND_CMD_READ_FROM_CACHE_DUAL_IO:
2694 + case SPINAND_CMD_READ_FROM_CACHE_X4:
2695 + case SPINAND_CMD_READ_ID:
2696 + return 1;
2697 +
2698 + case SPINAND_CMD_READ_FROM_CACHE_QUAD_IO:
2699 + return 2;
2700 +
2701 + default:
2702 + return 0;
2703 + }
2704 +}
2705 +
2706 +/**
2707 + * paragon_spinand_scan_id_table - scan SPI NAND info in id table
2708 + * @spinand: SPI NAND device structure
2709 + * @id: point to manufacture id and device id
2710 + * Description:
2711 + * If found in id table, config device with table information.
2712 + */
2713 +static bool paragon_spinand_scan_id_table(struct spinand_device *spinand,
2714 + u8 dev_id)
2715 +{
2716 + struct mtd_info *mtd = spinand_to_mtd(spinand);
2717 + struct nand_device *nand = mtd_to_nanddev(mtd);
2718 + struct paragon_spinand_info *item;
2719 + unsigned int i;
2720 +
2721 + for (i = 0; i < ARRAY_SIZE(paragon_spinand_table); i++) {
2722 + item = (struct paragon_spinand_info *)paragon_spinand_table + i;
2723 + if (dev_id != item->dev_id)
2724 + continue;
2725 +
2726 + nand->memorg = item->memorg;
2727 + nand->eccreq = item->eccreq;
2728 + spinand->rw_mode = item->rw_mode;
2729 +
2730 + return true;
2731 + }
2732 +
2733 + return false;
2734 +}
2735 +
2736 +/**
2737 + * paragon_spinand_detect - initialize device related part in spinand_device
2738 + * struct if it is Micron device.
2739 + * @spinand: SPI NAND device structure
2740 + */
2741 +static bool paragon_spinand_detect(struct spinand_device *spinand)
2742 +{
2743 + u8 *id = spinand->id.data;
2744 +
2745 + /*
2746 + * Micron SPI NAND read ID need a dummy byte,
2747 + * so the first byte in raw_id is dummy.
2748 + */
2749 + if (id[1] != SPINAND_MFR_PARAGON)
2750 + return false;
2751 +
2752 + return paragon_spinand_scan_id_table(spinand, id[2]);
2753 +}
2754 +
2755 +/**
2756 + * paragon_spinand_prepare_op - Fix address for cache operation.
2757 + * @spinand: SPI NAND device structure
2758 + * @op: pointer to spinand_op struct
2759 + * @page: page address
2760 + * @column: column address
2761 + */
2762 +static void paragon_spinand_adjust_cache_op(struct spinand_device *spinand,
2763 + const struct nand_page_io_req *req,
2764 + struct spinand_op *op)
2765 +{
2766 + struct nand_device *nand = spinand_to_nand(spinand);
2767 + unsigned int shift;
2768 +
2769 + op->n_addr= 2;
2770 + op->addr[0] = op->addr[1];
2771 + op->addr[1] = op->addr[2];
2772 + op->addr[2] = 0;
2773 + op->dummy_bytes = paragon_spinand_get_dummy(spinand, op);
2774 +}
2775 +
2776 +static const struct spinand_manufacturer_ops paragon_spinand_manuf_ops = {
2777 + .detect = paragon_spinand_detect,
2778 + .adjust_cache_op = paragon_spinand_adjust_cache_op,
2779 +};
2780 +
2781 +const struct spinand_manufacturer paragon_spinand_manufacturer = {
2782 + .id = SPINAND_MFR_PARAGON,
2783 + .name = "Paragon",
2784 + .ops = &paragon_spinand_manuf_ops,
2785 +};