aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/leon/patches/015-dma_ops.patch
blob: 7f488e02219f91b73ddc8bcc86b174cffa17bc83 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
From c6d8f92cfd7f4f19eb3b16545b3b68c561978fe8 Mon Sep 17 00:00:00 2001
From: Kristoffer Glembo <kristoffer@gaisler.com>
Date: Mon, 7 Jun 2010 14:00:30 +0200
Subject: [PATCH] sparc32: Added LEON dma_ops.

Added leon3_dma_ops and mmu_inval_dma_area.
---
 arch/sparc/kernel/ioport.c |  139 +++++++++++++++++++++++++++++++------------
 1 files changed, 100 insertions(+), 39 deletions(-)

--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -50,10 +50,15 @@
 #include <asm/io-unit.h>
 #include <asm/leon.h>
 
-#ifdef CONFIG_SPARC_LEON
-#define mmu_inval_dma_area(p, l) leon_flush_dcache_all()
-#else
+#ifndef CONFIG_SPARC_LEON
 #define mmu_inval_dma_area(p, l)	/* Anton pulled it out for 2.4.0-xx */
+#else
+static inline void mmu_inval_dma_area(unsigned long va, unsigned long len)
+{
+	if (!sparc_leon3_snooping_enabled()) {
+		leon_flush_dcache_all();
+	}
+}
 #endif
 
 static struct resource *_sparc_find_resource(struct resource *r,
@@ -254,7 +259,7 @@ static void *sbus_alloc_coherent(struct
 				 dma_addr_t *dma_addrp, gfp_t gfp)
 {
 	struct platform_device *op = to_platform_device(dev);
-	unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
+	unsigned long len_total = PAGE_ALIGN(len);
 	unsigned long va;
 	struct resource *res;
 	int order;
@@ -287,15 +292,19 @@ static void *sbus_alloc_coherent(struct
 	 * XXX That's where sdev would be used. Currently we load
 	 * all iommu tables with the same translations.
 	 */
-	if (mmu_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0)
-		goto err_noiommu;
-
-	res->name = op->dev.of_node->name;
+#ifdef CONFIG_SPARC_LEON
+	sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);	
+	*dma_addrp = virt_to_phys(va);
+#else
+	if (mmu_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0) {
+		release_resource(res);
+		goto err_nova;
+	}
+#endif
+	res->name = op->node->name;
 
 	return (void *)(unsigned long)res->start;
 
-err_noiommu:
-	release_resource(res);
 err_nova:
 	free_pages(va, order);
 err_nomem:
@@ -321,7 +330,7 @@ static void sbus_free_coherent(struct de
 		return;
 	}
 
-	n = (n + PAGE_SIZE-1) & PAGE_MASK;
+	n = PAGE_ALIGN(n);
 	if ((res->end-res->start)+1 != n) {
 		printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n",
 		    (long)((res->end-res->start)+1), n);
@@ -333,7 +342,12 @@ static void sbus_free_coherent(struct de
 
 	/* mmu_inval_dma_area(va, n); */ /* it's consistent, isn't it */
 	pgv = virt_to_page(p);
-	mmu_unmap_dma_area(dev, ba, n);
+
+#ifdef CONFIG_SPARC_LEON
+	sparc_unmapiorange((unsigned long)p, n);
+#else
+ 	mmu_unmap_dma_area(dev, ba, n);
+#endif
 
 	__free_pages(pgv, get_order(n));
 }
@@ -408,9 +422,6 @@ struct dma_map_ops sbus_dma_ops = {
 	.sync_sg_for_device	= sbus_sync_sg_for_device,
 };
 
-struct dma_map_ops *dma_ops = &sbus_dma_ops;
-EXPORT_SYMBOL(dma_ops);
-
 static int __init sparc_register_ioport(void)
 {
 	register_proc_sparc_ioport();
@@ -422,7 +433,7 @@ arch_initcall(sparc_register_ioport);
 
 #endif /* CONFIG_SBUS */
 
-#ifdef CONFIG_PCI
+#if defined(CONFIG_PCI) || defined(CONFIG_SPARC_LEON)
 
 /* Allocate and map kernel buffer using consistent mode DMA for a device.
  * hwdev should be valid struct pci_dev pointer for PCI devices.
@@ -430,7 +441,7 @@ arch_initcall(sparc_register_ioport);
 static void *pci32_alloc_coherent(struct device *dev, size_t len,
 				  dma_addr_t *pba, gfp_t gfp)
 {
-	unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
+	unsigned long len_total = PAGE_ALIGN(len);
 	unsigned long va;
 	struct resource *res;
 	int order;
@@ -463,10 +474,6 @@ static void *pci32_alloc_coherent(struct
 		return NULL;
 	}
 	mmu_inval_dma_area(va, len_total);
-#if 0
-/* P3 */ printk("pci_alloc_consistent: kva %lx uncva %lx phys %lx size %lx\n",
-  (long)va, (long)res->start, (long)virt_to_phys(va), len_total);
-#endif
 	sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
 
 	*pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
@@ -498,7 +505,7 @@ static void pci32_free_coherent(struct d
 		return;
 	}
 
-	n = (n + PAGE_SIZE-1) & PAGE_MASK;
+	n = PAGE_ALIGN(n);
 	if ((res->end-res->start)+1 != n) {
 		printk("pci_free_consistent: region 0x%lx asked 0x%lx\n",
 		    (long)((res->end-res->start)+1), (long)n);
@@ -515,6 +522,14 @@ static void pci32_free_coherent(struct d
 	free_pages(pgp, get_order(n));
 }
 
+static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size,
+			     enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+	if (dir != PCI_DMA_TODEVICE) {
+		mmu_inval_dma_area((unsigned long)phys_to_virt(ba), PAGE_ALIGN(size));
+	}
+}
+
 /*
  * Same as pci_map_single, but with pages.
  */
@@ -551,8 +566,7 @@ static int pci32_map_sg(struct device *d
 
 	/* IIep is write-through, not flushing. */
 	for_each_sg(sgl, sg, nents, n) {
-		BUG_ON(page_address(sg_page(sg)) == NULL);
-		sg->dma_address = virt_to_phys(sg_virt(sg));
+		sg->dma_address = sg_phys(sg);
 		sg->dma_length = sg->length;
 	}
 	return nents;
@@ -571,10 +585,7 @@ static void pci32_unmap_sg(struct device
 
 	if (dir != PCI_DMA_TODEVICE) {
 		for_each_sg(sgl, sg, nents, n) {
-			BUG_ON(page_address(sg_page(sg)) == NULL);
-			mmu_inval_dma_area(
-			    (unsigned long) page_address(sg_page(sg)),
-			    (sg->length + PAGE_SIZE-1) & PAGE_MASK);
+			mmu_inval_dma_area((unsigned long)sg_virt(sg), PAGE_ALIGN(sg->length));
 		}
 	}
 }
@@ -594,7 +605,7 @@ static void pci32_sync_single_for_cpu(st
 {
 	if (dir != PCI_DMA_TODEVICE) {
 		mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
-		    (size + PAGE_SIZE-1) & PAGE_MASK);
+				   PAGE_ALIGN(size));
 	}
 }
 
@@ -621,10 +632,7 @@ static void pci32_sync_sg_for_cpu(struct
 
 	if (dir != PCI_DMA_TODEVICE) {
 		for_each_sg(sgl, sg, nents, n) {
-			BUG_ON(page_address(sg_page(sg)) == NULL);
-			mmu_inval_dma_area(
-			    (unsigned long) page_address(sg_page(sg)),
-			    (sg->length + PAGE_SIZE-1) & PAGE_MASK);
+			mmu_inval_dma_area((unsigned long)sg_virt(sg), PAGE_ALIGN(sg->length));
 		}
 	}
 }
@@ -637,18 +645,38 @@ static void pci32_sync_sg_for_device(str
 
 	if (dir != PCI_DMA_TODEVICE) {
 		for_each_sg(sgl, sg, nents, n) {
-			BUG_ON(page_address(sg_page(sg)) == NULL);
-			mmu_inval_dma_area(
-			    (unsigned long) page_address(sg_page(sg)),
-			    (sg->length + PAGE_SIZE-1) & PAGE_MASK);
+			mmu_inval_dma_area((unsigned long)sg_virt(sg),  PAGE_ALIGN(sg->length));
 		}
 	}
 }
 
+/* LEON3 unmapping functions 
+ * 
+ * We can only invalidate the whole cache so unmap_page and unmap_sg do the same thing
+ */
+static void leon3_unmap_page(struct device *dev, dma_addr_t ba, size_t size,
+			     enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+	if (dir != PCI_DMA_TODEVICE) {
+		mmu_inval_dma_area(0, 0);
+	}
+}
+
+static void leon3_unmap_sg(struct device *dev, struct scatterlist *sgl,
+			   int nents, enum dma_data_direction dir,
+			   struct dma_attrs *attrs)
+{
+
+	if (dir != PCI_DMA_TODEVICE) {
+		mmu_inval_dma_area(0, 0);
+	}
+}
+
 struct dma_map_ops pci32_dma_ops = {
 	.alloc_coherent		= pci32_alloc_coherent,
 	.free_coherent		= pci32_free_coherent,
 	.map_page		= pci32_map_page,
+	.unmap_page		= pci32_unmap_page,
 	.map_sg			= pci32_map_sg,
 	.unmap_sg		= pci32_unmap_sg,
 	.sync_single_for_cpu	= pci32_sync_single_for_cpu,
@@ -658,7 +686,30 @@ struct dma_map_ops pci32_dma_ops = {
 };
 EXPORT_SYMBOL(pci32_dma_ops);
 
-#endif /* CONFIG_PCI */
+struct dma_map_ops leon3_dma_ops = {
+	.alloc_coherent		= sbus_alloc_coherent,
+	.free_coherent		= sbus_free_coherent,
+	.map_page		= pci32_map_page,
+	.unmap_page		= leon3_unmap_page,
+	.map_sg			= pci32_map_sg,
+	.unmap_sg		= leon3_unmap_sg,
+	.sync_single_for_cpu	= pci32_sync_single_for_cpu,
+	.sync_single_for_device	= pci32_sync_single_for_device,
+	.sync_sg_for_cpu	= pci32_sync_sg_for_cpu,
+	.sync_sg_for_device	= pci32_sync_sg_for_device,
+};
+
+#endif /* CONFIG_PCI || CONFIG_SPARC_LEON */
+
+#ifdef CONFIG_SPARC_LEON
+struct dma_map_ops *dma_ops = &leon3_dma_ops;
+#else
+struct dma_map_ops *dma_ops = &sbus_dma_ops;
+#endif
+
+#ifdef CONFIG_SBUS
+EXPORT_SYMBOL(dma_ops);
+#endif
 
 /*
  * Return whether the given PCI device DMA address mask can be
@@ -676,6 +727,16 @@ int dma_supported(struct device *dev, u6
 }
 EXPORT_SYMBOL(dma_supported);
 
+int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+#ifdef CONFIG_PCI
+	if (dev->bus == &pci_bus_type)
+		return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
+#endif
+	return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(dma_set_mask);
+
 #ifdef CONFIG_PROC_FS
 
 static int sparc_io_proc_show(struct seq_file *m, void *v)
@@ -717,7 +778,7 @@ static const struct file_operations spar
 static struct resource *_sparc_find_resource(struct resource *root,
 					     unsigned long hit)
 {
-        struct resource *tmp;
+	struct resource *tmp;
 
 	for (tmp = root->child; tmp != 0; tmp = tmp->sibling) {
 		if (tmp->start <= hit && tmp->end >= hit)