aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/lantiq/patches-4.19/0018-MTD-nand-lots-of-xrx200-fixes.patch
blob: 4e34580efcb79cac86b70ab8027e89cf11525990 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
From 997a8965db8417266bea3fbdcfa3e5655a1b52fa Mon Sep 17 00:00:00 2001
From: John Crispin <blogic@openwrt.org>
Date: Tue, 9 Sep 2014 23:12:15 +0200
Subject: [PATCH 18/36] MTD: nand: lots of xrx200 fixes

Signed-off-by: John Crispin <blogic@openwrt.org>
---
 drivers/mtd/nand/raw/xway_nand.c |   63 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 63 insertions(+)

--- a/drivers/mtd/nand/raw/xway_nand.c
+++ b/drivers/mtd/nand/raw/xway_nand.c
@@ -63,6 +63,24 @@
 #define NAND_CON_CSMUX		(1 << 1)
 #define NAND_CON_NANDM		1
 
+#define DANUBE_PCI_REG32( addr )    (*(volatile u32 *)(addr))
+#define PCI_CR_PR_OFFSET	    (KSEG1+0x1E105400)
+#define PCI_CR_PC_ARB		    (PCI_CR_PR_OFFSET + 0x0080)
+
+/*
+ * req_mask provides a mechanism to prevent interference between
+ * nand and pci (probably only relevant for the BT Home Hub 2B).
+ * Setting it causes the corresponding pci req pins to be masked
+ * during nand access, and also moves ebu locking from the read/write
+ * functions to the chip select function to ensure that the whole
+ * operation runs with interrupts disabled.
+ * In addition it switches on some extra waiting in xway_cmd_ctrl().
+ * This seems to be necessary if the ebu_cs1 pin has open-drain disabled,
+ * which in turn seems to be necessary for the nor chip to be recognised
+ * reliably, on a board (Home Hub 2B again) which has both nor and nand.
+ */
+static __be32 req_mask = 0;
+
 struct xway_nand_data {
 	struct nand_chip	chip;
 	unsigned long		csflags;
@@ -94,10 +112,22 @@ static void xway_select_chip(struct mtd_
 	case -1:
 		ltq_ebu_w32_mask(NAND_CON_CE, 0, EBU_NAND_CON);
 		ltq_ebu_w32_mask(NAND_CON_NANDM, 0, EBU_NAND_CON);
+
+		if (req_mask) {
+			/* Unmask all external PCI request */
+			DANUBE_PCI_REG32(PCI_CR_PC_ARB) &= ~(req_mask << 16);
+		}
+
 		spin_unlock_irqrestore(&ebu_lock, data->csflags);
 		break;
 	case 0:
 		spin_lock_irqsave(&ebu_lock, data->csflags);
+
+		if (req_mask) {
+			/* Mask all external PCI request */
+			DANUBE_PCI_REG32(PCI_CR_PC_ARB) |= (req_mask << 16);
+		}
+
 		ltq_ebu_w32_mask(0, NAND_CON_NANDM, EBU_NAND_CON);
 		ltq_ebu_w32_mask(0, NAND_CON_CE, EBU_NAND_CON);
 		break;
@@ -108,6 +138,12 @@ static void xway_select_chip(struct mtd_
 
 static void xway_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
 {
+
+	if (req_mask) {
+		if (cmd != NAND_CMD_STATUS)
+			ltq_ebu_w32(0, EBU_NAND_WAIT); /* Clear nand ready */
+	}
+
 	if (cmd == NAND_CMD_NONE)
 		return;
 
@@ -118,6 +154,24 @@ static void xway_cmd_ctrl(struct mtd_inf
 
 	while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
 		;
+
+	if (req_mask) {
+	       /*
+		* program and erase have their own busy handlers
+		* status and sequential in needs no delay
+		*/
+		switch (cmd) {
+			case NAND_CMD_ERASE1:
+			case NAND_CMD_SEQIN:
+			case NAND_CMD_STATUS:
+			case NAND_CMD_READID:
+			return;
+		}
+
+		/* wait until command is processed */
+		while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_RD) == 0)
+			;
+	}
 }
 
 static int xway_dev_ready(struct mtd_info *mtd)
@@ -157,6 +211,7 @@ static int xway_nand_probe(struct platfo
 	int err;
 	u32 cs;
 	u32 cs_flag = 0;
+	const __be32 *req_mask_ptr;
 
 	/* Allocate memory for the device structure (and zero it) */
 	data = devm_kzalloc(&pdev->dev, sizeof(struct xway_nand_data),
@@ -192,6 +247,15 @@ static int xway_nand_probe(struct platfo
 	if (!err && cs == 1)
 		cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1;
 
+	req_mask_ptr = of_get_property(pdev->dev.of_node,
+					"req-mask", NULL);
+
+	/*
+	 * Load the PCI req lines to mask from the device tree. If the
+	 * property is not present, setting req_mask to 0 disables masking.
+	 */
+	req_mask = (req_mask_ptr ? *req_mask_ptr : 0);
+
 	/* setup the EBU to run in NAND mode on our base addr */
 	ltq_ebu_w32(CPHYSADDR(data->nandaddr)
 		    | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1);