added code to selectively push areas of memory from the caches

This commit is contained in:
Markus Fröschle
2013-08-12 21:06:36 +00:00
parent 6ee9b72191
commit 9ace9866c2
2 changed files with 106 additions and 0 deletions

View File

@@ -26,6 +26,7 @@
*/ */
#include <stdint.h> #include <stdint.h>
#include <stddef.h>
/* /*
* CACR Cache Control Register * CACR Cache Control Register
@@ -54,9 +55,35 @@
#define CF_CACR_IDSP (0x00000080) /* Ins default supervisor-protect */ #define CF_CACR_IDSP (0x00000080) /* Ins default supervisor-protect */
#define CF_CACR_EUSP (0x00000020) /* Switch stacks in user mode */ #define CF_CACR_EUSP (0x00000020) /* Switch stacks in user mode */
#define _DCACHE_SET_MASK ((DCACHE_SIZE/64-1)<<CACHE_WAYS)
#define _ICACHE_SET_MASK ((ICACHE_SIZE/64-1)<<CACHE_WAYS)
#define LAST_DCACHE_ADDR _DCACHE_SET_MASK
#define LAST_ICACHE_ADDR _ICACHE_SET_MASK
#define ICACHE_SIZE 0x8000 /* instruction - 32k */
#define DCACHE_SIZE 0x8000 /* data - 32k */
#define CACHE_LINE_SIZE 0x0010 /* 16 bytes */
#define CACHE_SETS 0x0200 /* 512 sets */
#define CACHE_WAYS 0x0004 /* 4 way */
#define CACHE_DISABLE_MODE (CF_CACR_DCINVA+ \
CF_CACR_BCINVA+ \
CF_CACR_ICINVA)
#define CACHE_INITIAL_MODE (CF_CACR_DEC+ \
CF_CACR_BEC+ \
CF_CACR_IEC+ \
CF_CACR_DESB+ \
CF_CACR_EUSP)
extern void flush_and_invalidate_caches(void); extern void flush_and_invalidate_caches(void);
extern uint32_t cacr_get(void); extern uint32_t cacr_get(void);
extern void cacr_set(uint32_t); extern void cacr_set(uint32_t);
extern void flush_icache_range(void *address, size_t size);
extern void flush_dcache_range(void *address, size_t size);
#endif /* _CACHE_H_ */ #endif /* _CACHE_H_ */

View File

@@ -64,3 +64,82 @@ void flush_and_invalidate_caches(void)
/* clobber */ : "d0", "d1", "a0" /* clobber */ : "d0", "d1", "a0"
); );
} }
/*
* flush and invalidate a specific memory region from the instruction cache
*/
void flush_icache_range(void *address, size_t size)
{
uint32_t set;
uint32_t start_set;
uint32_t end_set;
void *endaddr = address + size;
start_set = (uint32_t) address & _ICACHE_SET_MASK;
end_set = (uint32_t) endaddr & _ICACHE_SET_MASK;
if (start_set > end_set) {
/* from the begining to the lowest address */
for (set = 0; set <= end_set; set += (0x10 - 3)) {
asm volatile("cpushl ic,(%0)\n\t"
"addq.l #1,%0\n\t"
"cpushl ic,(%0)\n\t"
"addq.l #1,%0\n\t"
"cpushl ic,(%0)\n\t"
"addq.l #1,%0\n\t"
"cpushl ic,(%0)" : "=a" (set) : "a" (set));
}
/* next loop will finish the cache ie pass the hole */
end_set = LAST_ICACHE_ADDR;
}
for (set = start_set; set <= end_set; set += (0x10 - 3)) {
asm volatile("cpushl ic,(%0)\n\t"
"addq.l #1,%0\n\t"
"cpushl ic,(%0)\n\t"
"addq%.l #1,%0\n\t"
"cpushl ic,(%0)\n\t"
"addq.l #1,%0\n\t"
"cpushl ic,(%0)" : "=a" (set) : "a" (set));
}
}
/*
* flush and invalidate a specific region from the data cache
*/
void flush_dcache_range(void *address, size_t size)
{
unsigned long set;
unsigned long start_set;
unsigned long end_set;
void *endaddr;
endaddr = address + size;
start_set = (uint32_t) address & _DCACHE_SET_MASK;
end_set = (uint32_t) endaddr & _DCACHE_SET_MASK;
if (start_set > end_set) {
/* from the begining to the lowest address */
for (set = 0; set <= end_set; set += (0x10 - 3)) {
asm volatile("cpushl dc,(%0)\n\t"
"addq.l #1,%0\n\t"
"cpushl dc,(%0)\n\t"
"addq.l #1,%0\n\t"
"cpushl dc,(%0)\n\t"
"addq.l #1,%0\n\t"
"cpushl dc,(%0)" : "=a" (set) : "a" (set));
}
/* next loop will finish the cache ie pass the hole */
end_set = LAST_DCACHE_ADDR;
}
for (set = start_set; set <= end_set; set += (0x10 - 3)) {
asm volatile("cpushl dc,(%0)\n\t"
"addq.l #1,%0\n\t"
"cpushl dc,(%0)\n\t"
"addq%.l #1,%0\n\t"
"cpushl dc,(%0)\n\t"
"addq.l #1,%0\n\t"
"cpushl dc,(%0)" : "=a" (set) : "a" (set));
}
}