OpenWrt – Blame information for rev 2
?pathlinks?
Rev | Author | Line No. | Line |
---|---|---|---|
1 | office | 1 | From 997a8965db8417266bea3fbdcfa3e5655a1b52fa Mon Sep 17 00:00:00 2001 |
2 | From: John Crispin <blogic@openwrt.org> |
||
3 | Date: Tue, 9 Sep 2014 23:12:15 +0200 |
||
4 | Subject: [PATCH 18/36] MTD: nand: lots of xrx200 fixes |
||
5 | |||
6 | Signed-off-by: John Crispin <blogic@openwrt.org> |
||
7 | --- |
||
8 | drivers/mtd/nand/xway_nand.c | 63 ++++++++++++++++++++++++++++++++++++++++++ |
||
9 | 1 file changed, 63 insertions(+) |
||
10 | |||
11 | --- a/drivers/mtd/nand/xway_nand.c |
||
12 | +++ b/drivers/mtd/nand/xway_nand.c |
||
13 | @@ -63,6 +63,24 @@ |
||
14 | #define NAND_CON_CSMUX (1 << 1) |
||
15 | #define NAND_CON_NANDM 1 |
||
16 | |||
17 | +#define DANUBE_PCI_REG32( addr ) (*(volatile u32 *)(addr)) |
||
18 | +#define PCI_CR_PR_OFFSET (KSEG1+0x1E105400) |
||
19 | +#define PCI_CR_PC_ARB (PCI_CR_PR_OFFSET + 0x0080) |
||
20 | + |
||
21 | +/* |
||
22 | + * req_mask provides a mechanism to prevent interference between |
||
23 | + * nand and pci (probably only relevant for the BT Home Hub 2B). |
||
24 | + * Setting it causes the corresponding pci req pins to be masked |
||
25 | + * during nand access, and also moves ebu locking from the read/write |
||
26 | + * functions to the chip select function to ensure that the whole |
||
27 | + * operation runs with interrupts disabled. |
||
28 | + * In addition it switches on some extra waiting in xway_cmd_ctrl(). |
||
29 | + * This seems to be necessary if the ebu_cs1 pin has open-drain disabled, |
||
30 | + * which in turn seems to be necessary for the nor chip to be recognised |
||
31 | + * reliably, on a board (Home Hub 2B again) which has both nor and nand. |
||
32 | + */ |
||
33 | +static __be32 req_mask = 0; |
||
34 | + |
||
35 | struct xway_nand_data { |
||
36 | struct nand_chip chip; |
||
37 | unsigned long csflags; |
||
38 | @@ -94,10 +112,22 @@ static void xway_select_chip(struct mtd_ |
||
39 | case -1: |
||
40 | ltq_ebu_w32_mask(NAND_CON_CE, 0, EBU_NAND_CON); |
||
41 | ltq_ebu_w32_mask(NAND_CON_NANDM, 0, EBU_NAND_CON); |
||
42 | + |
||
43 | + if (req_mask) { |
||
44 | + /* Unmask all external PCI request */ |
||
45 | + DANUBE_PCI_REG32(PCI_CR_PC_ARB) &= ~(req_mask << 16); |
||
46 | + } |
||
47 | + |
||
48 | spin_unlock_irqrestore(&ebu_lock, data->csflags); |
||
49 | break; |
||
50 | case 0: |
||
51 | spin_lock_irqsave(&ebu_lock, data->csflags); |
||
52 | + |
||
53 | + if (req_mask) { |
||
54 | + /* Mask all external PCI request */ |
||
55 | + DANUBE_PCI_REG32(PCI_CR_PC_ARB) |= (req_mask << 16); |
||
56 | + } |
||
57 | + |
||
58 | ltq_ebu_w32_mask(0, NAND_CON_NANDM, EBU_NAND_CON); |
||
59 | ltq_ebu_w32_mask(0, NAND_CON_CE, EBU_NAND_CON); |
||
60 | break; |
||
61 | @@ -108,6 +138,12 @@ static void xway_select_chip(struct mtd_ |
||
62 | |||
63 | static void xway_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) |
||
64 | { |
||
65 | + |
||
66 | + if (req_mask) { |
||
67 | + if (cmd != NAND_CMD_STATUS) |
||
68 | + ltq_ebu_w32(0, EBU_NAND_WAIT); /* Clear nand ready */ |
||
69 | + } |
||
70 | + |
||
71 | if (cmd == NAND_CMD_NONE) |
||
72 | return; |
||
73 | |||
74 | @@ -118,6 +154,24 @@ static void xway_cmd_ctrl(struct mtd_inf |
||
75 | |||
76 | while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0) |
||
77 | ; |
||
78 | + |
||
79 | + if (req_mask) { |
||
80 | + /* |
||
81 | + * program and erase have their own busy handlers |
||
82 | + * status and sequential in needs no delay |
||
83 | + */ |
||
84 | + switch (cmd) { |
||
85 | + case NAND_CMD_ERASE1: |
||
86 | + case NAND_CMD_SEQIN: |
||
87 | + case NAND_CMD_STATUS: |
||
88 | + case NAND_CMD_READID: |
||
89 | + return; |
||
90 | + } |
||
91 | + |
||
92 | + /* wait until command is processed */ |
||
93 | + while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_RD) == 0) |
||
94 | + ; |
||
95 | + } |
||
96 | } |
||
97 | |||
98 | static int xway_dev_ready(struct mtd_info *mtd) |
||
99 | @@ -157,6 +211,7 @@ static int xway_nand_probe(struct platfo |
||
100 | int err; |
||
101 | u32 cs; |
||
102 | u32 cs_flag = 0; |
||
103 | + const __be32 *req_mask_ptr; |
||
104 | |||
105 | /* Allocate memory for the device structure (and zero it) */ |
||
106 | data = devm_kzalloc(&pdev->dev, sizeof(struct xway_nand_data), |
||
107 | @@ -192,6 +247,15 @@ static int xway_nand_probe(struct platfo |
||
108 | if (!err && cs == 1) |
||
109 | cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1; |
||
110 | |||
111 | + req_mask_ptr = of_get_property(pdev->dev.of_node, |
||
112 | + "req-mask", NULL); |
||
113 | + |
||
114 | + /* |
||
115 | + * Load the PCI req lines to mask from the device tree. If the |
||
116 | + * property is not present, setting req_mask to 0 disables masking. |
||
117 | + */ |
||
118 | + req_mask = (req_mask_ptr ? *req_mask_ptr : 0); |
||
119 | + |
||
120 | /* setup the EBU to run in NAND mode on our base addr */ |
||
121 | ltq_ebu_w32(CPHYSADDR(data->nandaddr) |
||
122 | | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1); |