added function to flush only a portion of the caches
This commit is contained in:
@@ -54,6 +54,7 @@
|
||||
#define CF_CACR_ICINVA (0x00000100) /* Instr Cache Invalidate All */
|
||||
#define CF_CACR_IDSP (0x00000080) /* Ins default supervisor-protect */
|
||||
#define CF_CACR_EUSP (0x00000020) /* Switch stacks in user mode */
|
||||
#define CF_CACR_DF (0x00000010) /* Disable FPU */
|
||||
|
||||
#define _DCACHE_SET_MASK ((DCACHE_SIZE/64-1)<<CACHE_WAYS)
|
||||
#define _ICACHE_SET_MASK ((ICACHE_SIZE/64-1)<<CACHE_WAYS)
|
||||
@@ -83,7 +84,7 @@ extern uint32_t cacr_get(void);
|
||||
extern void cacr_set(uint32_t);
|
||||
extern void flush_icache_range(void *address, size_t size);
|
||||
extern void flush_dcache_range(void *address, size_t size);
|
||||
|
||||
extern void flush_cache_range(void *address, size_t size);
|
||||
|
||||
|
||||
#endif /* _CACHE_H_ */
|
||||
|
||||
57
sys/cache.c
57
sys/cache.c
@@ -97,7 +97,8 @@ void flush_icache_range(void *address, size_t size)
|
||||
|
||||
if (start_set > end_set) {
|
||||
/* from the begining to the lowest address */
|
||||
for (set = 0; set <= end_set; set += (0x10 - 3)) {
|
||||
for (set = 0; set <= end_set; set += (0x10 - 3))
|
||||
{
|
||||
__asm__ __volatile__(
|
||||
" cpushl ic,(%[set]) \n\t"
|
||||
" addq.l #1,%[set] \n\t"
|
||||
@@ -182,3 +183,57 @@ void flush_dcache_range(void *address, size_t size)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* flush and invalidate a specific region from the both caches. We do not know if the area is cached
|
||||
* at all, we do not know in which of the four ways it is cached, but we know the index where they
|
||||
* would be cached if they are, so we only need to flush and invalidate only a subset of the 512 index
|
||||
* entries, but all four ways.
|
||||
*/
|
||||
void flush_cache_range(void *address, size_t size)
|
||||
{
|
||||
unsigned long set;
|
||||
unsigned long start_set;
|
||||
unsigned long end_set;
|
||||
void *endaddr;
|
||||
|
||||
endaddr = address + size;
|
||||
start_set = (uint32_t) address & _DCACHE_SET_MASK;
|
||||
end_set = (uint32_t) endaddr & _DCACHE_SET_MASK;
|
||||
|
||||
if (start_set > end_set) {
|
||||
/* from the begining to the lowest address */
|
||||
for (set = 0; set <= end_set; set += (0x10 - 3))
|
||||
{
|
||||
__asm__ __volatile__(
|
||||
" cpushl bc,(%[set]) \n\t"
|
||||
" addq.l #1,%[set] \n\t"
|
||||
" cpushl bc,(%[set]) \n\t"
|
||||
" addq.l #1,%[set] \n\t"
|
||||
" cpushl bc,(%[set]) \n\t"
|
||||
" addq.l #1,%[set] \n\t"
|
||||
" cpushl bc,(%[set]) \n\t"
|
||||
: /* output parameters */
|
||||
: [set] "a" (set)
|
||||
: "cc" /* clobbered registers */
|
||||
);
|
||||
}
|
||||
/* next loop will finish the cache ie pass the hole */
|
||||
end_set = LAST_DCACHE_ADDR;
|
||||
}
|
||||
for (set = start_set; set <= end_set; set += (0x10 - 3))
|
||||
{
|
||||
__asm__ __volatile__(
|
||||
" cpushl bc,(%[set]) \n\t"
|
||||
" addq.l #1,%[set] \n\t"
|
||||
" cpushl bc,(%[set]) \n\t"
|
||||
" addq%.l #1,%[set] \n\t"
|
||||
" cpushl bc,(%[set]) \n\t"
|
||||
" addq.l #1,%[set] \n\t"
|
||||
" cpushl bc,(%[set]) \n\t"
|
||||
: /* output parameters */
|
||||
: [set] "a" (set)
|
||||
: "cc" /* clobbered registers */
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -347,7 +347,10 @@ void mmu_init(void)
|
||||
* video RAM: read write execute normal write true
|
||||
*/
|
||||
flags.cache_mode = CACHE_WRITETHROUGH;
|
||||
flags.protection = SV_USER;
|
||||
flags.page_id = SCA_PAGE_ID;
|
||||
flags.access = ACCESS_READ | ACCESS_WRITE | ACCESS_EXECUTE;
|
||||
flags.locked = true;
|
||||
mmu_map_page(0x00d00000, 0x60d00000, MMU_PAGE_SIZE_1M, &flags);
|
||||
|
||||
video_tlb = 0x2000; /* set page as video page */
|
||||
|
||||
Reference in New Issue
Block a user