aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/ubicom32/files/arch/ubicom32/mach-common/cachectl.c
blob: afb9dc4d4a5423d8b7465e3b81d87b664c5a9ff9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
/*
 * arch/ubicom32/mach-common/cachectl.c
 *   Architecture cache control support
 *
 * (C) Copyright 2009, Ubicom, Inc.
 *
 * This file is part of the Ubicom32 Linux Kernel Port.
 *
 * The Ubicom32 Linux Kernel Port is free software: you can redistribute
 * it and/or modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation, either version 2 of the
 * License, or (at your option) any later version.
 *
 * The Ubicom32 Linux Kernel Port is distributed in the hope that it
 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
 * the GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with the Ubicom32 Linux Kernel Port.  If not,
 * see <http://www.gnu.org/licenses/>.
 *
 * Ubicom32 implementation derived from (with many thanks):
 *   arch/m68knommu
 *   arch/blackfin
 *   arch/parisc
 */

#include <linux/types.h>
#include <linux/module.h>
#include <asm/cachectl.h>

/*
 * The write queue flush procedure in mem_cache_control needs to make
 * DCACHE_WRITE_QUEUE_LENGTH writes to DDR (not OCM). Here we reserve some
 * memory for this operation.
 * Allocate array of cache lines of least DCACHE_WRITE_QUEUE_LENGTH + 1 words in
 * length rounded up to the nearest cache line.
 */
#define CACHE_WRITE_QUEUE_FLUSH_AREA_SIZE \
	ALIGN(sizeof(int) * (DCACHE_WRITE_QUEUE_LENGTH + 1), CACHE_LINE_SIZE)

static char cache_write_queue_flush_area[CACHE_WRITE_QUEUE_FLUSH_AREA_SIZE]
	__attribute__((aligned(CACHE_LINE_SIZE)));

/*
 * ONE_CCR_ADDR_OP is a helper macro that executes a single CCR operation.
 */
#define ONE_CCR_ADDR_OP(cc, op_addr, op)				\
	do {								\
		asm volatile (						\
		"	btst	"D(CCR_CTRL)"(%0), #"D(CCR_CTRL_VALID)"				\n\t" \
		"	jmpne.f	.-4								\n\t" \
		"	move.4	"D(CCR_ADDR)"(%0), %1						\n\t" \
		"	move.1	"D(CCR_CTRL+3)"(%0), %2						\n\t" \
		"	bset	"D(CCR_CTRL)"(%0), "D(CCR_CTRL)"(%0), #"D(CCR_CTRL_VALID)"	\n\t" \
		"	cycles	2								\n\t" \
		"	btst	"D(CCR_CTRL)"(%0), #"D(CCR_CTRL_DONE)"				\n\t" \
		"	jmpeq.f	.-4								\n\t" \
			:						\
			: "a"(cc), "r"(op_addr), "r"(op & 0xff)		\
			: "cc"						\
		);							\
	} while (0)

/*
 * mem_cache_control()
 *	Special cache control operation
 */
void mem_cache_control(unsigned long cc, unsigned long begin_addr,
		       unsigned long end_addr, unsigned long op)
{
	unsigned long op_addr;
	int dccr = cc == DCCR_BASE;
	if (dccr && op == CCR_CTRL_FLUSH_ADDR) {
		/*
		 * We ensure all previous writes have left the data cache write
		 * queue by sending DCACHE_WRITE_QUEUE_LENGTH writes (to
		 * different words) down the queue.  If this is not done it's
		 * possible that the data we are trying to flush hasn't even
		 * entered the data cache.
		 * The +1 ensure that the final 'flush' is actually a flush.
		 */
		int *flush_area = (int *)cache_write_queue_flush_area;
		asm volatile(
			"	.rept "D(DCACHE_WRITE_QUEUE_LENGTH + 1)"	\n\t"
			"	move.4 (%0)4++, d0				\n\t"
			"	.endr						\n\t"
			: "+a"(flush_area)
			);
	}

	if (dccr)
		UBICOM32_LOCK(DCCR_LOCK_BIT);
	else
		UBICOM32_LOCK(ICCR_LOCK_BIT);

	/*
	 * Calculate the cache lines we need to operate on that include
	 * begin_addr though end_addr.
	 */
	begin_addr = begin_addr & ~(CACHE_LINE_SIZE - 1);
	end_addr = (end_addr + CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1);
	op_addr = begin_addr;

	do {
		ONE_CCR_ADDR_OP(cc, op_addr, op);
		op_addr += CACHE_LINE_SIZE;
	} while (likely(op_addr < end_addr));

	if (dccr && op == CCR_CTRL_FLUSH_ADDR) {
		/*
		 * It turns out that when flushing the data cache the last flush
		 * isn't actually complete at this point. This is because there
		 * is another write buffer on the DDR side of the cache that is
		 * arbitrated with the I-Cache.
		 *
		 * The only foolproof method that ensures that the last data
		 * cache flush *actually* completed is to do another flush on a
		 * dirty cache line. This flush will block until the DDR write
		 * buffer is empty.
		 *
		 * Rather than creating a another dirty cache line, we use the
		 * flush_area above as we know that it is dirty from previous
		 * writes.
		 */
		ONE_CCR_ADDR_OP(cc, cache_write_queue_flush_area, op);
	}

	if (dccr)
		UBICOM32_UNLOCK(DCCR_LOCK_BIT);
	else
		UBICOM32_UNLOCK(ICCR_LOCK_BIT);

}
EXPORT_SYMBOL(mem_cache_control);