runs until EmuTOS scrolls the welcome screen?
This commit is contained in:
@@ -90,6 +90,6 @@ extern long video_tlb;
|
||||
extern long video_sbt;
|
||||
|
||||
extern void mmu_init(void);
|
||||
extern int mmu_map_page(uint32_t adr);
|
||||
extern int mmu_map_8k_page(uint32_t adr);
|
||||
|
||||
#endif /* _MMU_H_ */
|
||||
|
||||
@@ -193,7 +193,7 @@
|
||||
/**********************************************************/
|
||||
.altmacro
|
||||
.macro irq vector,int_mask,clr_int
|
||||
//move.w #0x2700,sr // disable interrupt
|
||||
move.w #0x2700,sr // disable interrupt
|
||||
subq.l #8,sp
|
||||
movem.l d0/a5,(sp) // save registers
|
||||
|
||||
|
||||
160
sys/mmu.c
160
sys/mmu.c
@@ -62,13 +62,13 @@
|
||||
#error "unknown machine!"
|
||||
#endif /* MACHINE_FIREBEE */
|
||||
|
||||
// #define DEBUG_MMU
|
||||
#define DEBUG_MMU
|
||||
#ifdef DEBUG_MMU
|
||||
#define dbg(format, arg...) do { xprintf("DEBUG (%s()): " format, __FUNCTION__, ##arg);} while(0)
|
||||
#else
|
||||
#define dbg(format, arg...) do {;} while (0)
|
||||
#endif /* DEBUG_MMU */
|
||||
#define err(format, arg...) do { xprintf("ERROR (%s()): " format, __FUNCTION__, ##arg);} while(1)
|
||||
#define err(format, arg...) do { xprintf("ERROR (%s()): " format, __FUNCTION__, ##arg); xprintf("system halted\r\n"); } while(0); while(1)
|
||||
|
||||
struct page_descriptor
|
||||
{
|
||||
@@ -219,10 +219,11 @@ static struct phys_to_virt translation[] =
|
||||
{
|
||||
{ 0x00000000, 0x01000000, 0x60000000 }, /* map first 16 MByte to first 16 Mb of video ram */
|
||||
{ 0x01000000, 0x10000000, 0x00000000 }, /* map rest of ram virt = phys */
|
||||
{ 0x1fd00000, 0x01000000, 0x00000000 }, /* accessed by EmuTOS? */
|
||||
};
|
||||
static int num_translations = sizeof(translation) / sizeof(struct phys_to_virt);
|
||||
|
||||
static inline uint32_t lookup_virtual(uint32_t phys)
|
||||
static inline uint32_t lookup_phys(uint32_t phys)
|
||||
{
|
||||
int i;
|
||||
|
||||
@@ -248,12 +249,13 @@ static inline uint32_t lookup_virtual(uint32_t phys)
|
||||
*
|
||||
*
|
||||
*/
|
||||
int mmu_map_page(uint32_t adr)
|
||||
int mmu_map_8k_page(uint32_t virt)
|
||||
{
|
||||
const int size_mask = 0xffffe000; /* 8k pagesize */
|
||||
int page_index = (adr & size_mask) / 4096; /* index into page_descriptor array */
|
||||
int page_index = (virt & size_mask) / 4096; /* index into page_descriptor array */
|
||||
struct page_descriptor *page = &pages[page_index]; /* attributes of page to map */
|
||||
uint32_t virt = lookup_virtual(adr); /* phys2virt translation of page */
|
||||
|
||||
uint32_t adr = lookup_phys(virt); /* phys2virt translation of page */
|
||||
|
||||
/*
|
||||
* add page to TLB
|
||||
@@ -277,6 +279,85 @@ int mmu_map_page(uint32_t adr)
|
||||
MCF_MMU_MMUOR_UAA; /* update allocation address field */
|
||||
NOP();
|
||||
|
||||
MCF_MMU_MMUOR = MCF_MMU_MMUOR_ITLB | /* instruction */
|
||||
MCF_MMU_MMUOR_ACC | /* access TLB */
|
||||
MCF_MMU_MMUOR_UAA; /* update allocation address field */
|
||||
dbg("mapped virt=0x%08x to phys=0x%08x\r\n", virt, adr);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
struct mmu_map_flags
|
||||
{
|
||||
unsigned cache_mode:2;
|
||||
unsigned protection:1;
|
||||
unsigned page_id:8;
|
||||
unsigned access:3;
|
||||
unsigned locked:1;
|
||||
unsigned unused:17;
|
||||
};
|
||||
|
||||
/*
|
||||
* map a page of memory using virt and phys as addresses with the Coldfire MMU.
|
||||
*
|
||||
* Theory of operation: the Coldfire MMU in the Firebee has 64 TLB entries, 32 for data (DTLB), 32 for
|
||||
* instructions (ITLB). Mappings can either be done locked (normal MMU TLB misses will not consider them
|
||||
* for replacement) or unlocked (mappings will reallocate using a LRU scheme when the MMU runs out of
|
||||
* TLB entries). For proper operation, the MMU needs at least two ITLBs and/or four free/allocatable DTLBs
|
||||
* per instruction as a minimum, more for performance. Thus locked pages (that can't be touched by the
|
||||
* LRU algorithm) should be used sparsingly.
|
||||
*
|
||||
*
|
||||
*/
|
||||
int mmu_map_page(uint32_t virt, uint32_t phys, enum mmu_page_size sz, const struct mmu_map_flags *flags)
|
||||
{
|
||||
int size_mask;
|
||||
|
||||
switch (sz)
|
||||
{
|
||||
case MMU_PAGE_SIZE_1M:
|
||||
size_mask = 0xfff00000;
|
||||
break;
|
||||
|
||||
case MMU_PAGE_SIZE_8K:
|
||||
size_mask = 0xffffe000;
|
||||
break;
|
||||
|
||||
case MMU_PAGE_SIZE_4K:
|
||||
size_mask = 0xfffff000;
|
||||
break;
|
||||
|
||||
case MMU_PAGE_SIZE_1K:
|
||||
size_mask = 0xfffff800;
|
||||
break;
|
||||
|
||||
default:
|
||||
dbg("illegal map size %d\r\n", sz);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* add page to TLB
|
||||
*/
|
||||
MCF_MMU_MMUTR = ((int) virt & size_mask) | /* virtual address */
|
||||
MCF_MMU_MMUTR_ID(flags->page_id) | /* address space id (ASID) */
|
||||
MCF_MMU_MMUTR_SG | /* shared global */
|
||||
MCF_MMU_MMUTR_V; /* valid */
|
||||
NOP();
|
||||
|
||||
MCF_MMU_MMUDR = ((int) phys & size_mask) | /* physical address */
|
||||
MCF_MMU_MMUDR_SZ(sz) | /* page size */
|
||||
MCF_MMU_MMUDR_CM(flags->cache_mode) |
|
||||
(flags->access & ACCESS_READ ? MCF_MMU_MMUDR_R : 0) | /* read access enable */
|
||||
(flags->access & ACCESS_WRITE ? MCF_MMU_MMUDR_W : 0) | /* write access enable */
|
||||
(flags->access & ACCESS_EXECUTE ? MCF_MMU_MMUDR_X : 0) | /* execute access enable */
|
||||
(flags->locked ? MCF_MMU_MMUDR_LK : 0);
|
||||
NOP();
|
||||
|
||||
MCF_MMU_MMUOR = MCF_MMU_MMUOR_ACC | /* access TLB, data */
|
||||
MCF_MMU_MMUOR_UAA; /* update allocation address field */
|
||||
NOP();
|
||||
|
||||
MCF_MMU_MMUOR = MCF_MMU_MMUOR_ITLB | /* instruction */
|
||||
MCF_MMU_MMUOR_ACC | /* access TLB */
|
||||
MCF_MMU_MMUOR_UAA; /* update allocation address field */
|
||||
@@ -291,6 +372,22 @@ void mmu_init(void)
|
||||
uint32_t MMUBAR = (uint32_t) &_MMUBAR[0];
|
||||
extern uint8_t _TOS[];
|
||||
uint32_t TOS = (uint32_t) &_TOS[0];
|
||||
struct mmu_map_flags flags;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* prelaminary initialization of page descriptor table
|
||||
*/
|
||||
for (i = 0; i < sizeof(pages); i++)
|
||||
{
|
||||
pages[i].cache_mode = CACHE_WRITETHROUGH;
|
||||
pages[i].global = 1;
|
||||
pages[i].locked = 0;
|
||||
pages[i].read = 1;
|
||||
pages[i].write = 1;
|
||||
pages[i].execute = 1;
|
||||
pages[i].supervisor_protect = 0;
|
||||
}
|
||||
|
||||
set_asid(0); /* do not use address extension (ASID provides virtual 48 bit addresses */
|
||||
|
||||
@@ -386,54 +483,46 @@ void mmu_init(void)
|
||||
* Make the TOS (in SDRAM) read-only
|
||||
* This maps virtual 0x00e0'0000 - 0x00ef'ffff to the same virtual address
|
||||
*/
|
||||
//flags.cache_mode = CACHE_COPYBACK;
|
||||
//flags.page_id = 0;
|
||||
//flags.access = ACCESS_READ | ACCESS_EXECUTE;
|
||||
//mmu_map_page(TOS, TOS, MMU_PAGE_SIZE_1M, &flags);
|
||||
flags.cache_mode = CACHE_COPYBACK;
|
||||
flags.protection = SV_PROTECT;
|
||||
flags.page_id = 0;
|
||||
flags.access = ACCESS_READ | ACCESS_EXECUTE;
|
||||
flags.locked = true;
|
||||
mmu_map_page(TOS, TOS, MMU_PAGE_SIZE_1M, &flags);
|
||||
|
||||
#if defined(MACHINE_FIREBEE)
|
||||
/*
|
||||
* Map FireBee I/O area (0xfff0'0000 - 0xffff'ffff physical) to the Falcon-compatible I/O
|
||||
* area (0x00f0'0000 - 0x00ff'ffff virtual) for the FireBee
|
||||
*/
|
||||
//flags.cache_mode = CACHE_NOCACHE_PRECISE;
|
||||
//flags.access = ACCESS_WRITE | ACCESS_READ;
|
||||
//mmu_map_page(0x00f00000, 0xfff00000, MMU_PAGE_SIZE_1M, &flags);
|
||||
flags.cache_mode = CACHE_NOCACHE_PRECISE;
|
||||
flags.access = ACCESS_WRITE | ACCESS_READ;
|
||||
mmu_map_page(0x00f00000, 0xfff00000, MMU_PAGE_SIZE_1M, &flags);
|
||||
#endif /* MACHINE_FIREBEE */
|
||||
|
||||
/*
|
||||
* Map (locked) the second last MB of physical SDRAM (this is where BaS .data and .bss reside) to the same
|
||||
* virtual address. This is also used (completely) when BaS is in RAM
|
||||
*/
|
||||
//flags.cache_mode = CACHE_COPYBACK;
|
||||
//flags.access = ACCESS_READ | ACCESS_WRITE | ACCESS_EXECUTE;
|
||||
//mmu_map_page(SDRAM_START + SDRAM_SIZE - 0X00200000, SDRAM_START + SDRAM_SIZE - 0X00200000, MMU_PAGE_SIZE_1M, &flags);
|
||||
flags.cache_mode = CACHE_COPYBACK;
|
||||
flags.access = ACCESS_READ | ACCESS_WRITE | ACCESS_EXECUTE;
|
||||
mmu_map_page(SDRAM_START + SDRAM_SIZE - 0X00200000, SDRAM_START + SDRAM_SIZE - 0X00200000, MMU_PAGE_SIZE_1M, &flags);
|
||||
|
||||
/*
|
||||
* Map (locked) the very last MB of physical SDRAM (this is where the driver buffers reside) to the same
|
||||
* virtual address. Used uncached for drivers.
|
||||
*/
|
||||
//flags.cache_mode = CACHE_NOCACHE_PRECISE;
|
||||
//flags.access = ACCESS_READ | ACCESS_WRITE;
|
||||
//flags.protection = SV_PROTECT;
|
||||
//mmu_map_page(SDRAM_START + SDRAM_SIZE - 0x00100000, SDRAM_START + SDRAM_SIZE - 0x00100000, MMU_PAGE_SIZE_1M, &flags);
|
||||
flags.cache_mode = CACHE_NOCACHE_PRECISE;
|
||||
flags.access = ACCESS_READ | ACCESS_WRITE;
|
||||
flags.protection = SV_PROTECT;
|
||||
mmu_map_page(SDRAM_START + SDRAM_SIZE - 0x00100000, SDRAM_START + SDRAM_SIZE - 0x00100000, MMU_PAGE_SIZE_1M, &flags);
|
||||
}
|
||||
|
||||
/*
|
||||
static struct mmu_map_flags flags =
|
||||
{
|
||||
.cache_mode = CACHE_COPYBACK,
|
||||
.protection = SV_USER,
|
||||
.page_id = 0,
|
||||
.access = ACCESS_READ | ACCESS_WRITE | ACCESS_EXECUTE,
|
||||
.locked = false
|
||||
};
|
||||
*/
|
||||
|
||||
void mmutr_miss(uint32_t address, uint32_t pc, uint32_t format_status)
|
||||
{
|
||||
dbg("MMU TLB MISS accessing 0x%08x\r\nFS = 0x%08x\r\nPC = 0x%08x\r\n", address, format_status, pc);
|
||||
flush_cache_range((void *) address, 8192);
|
||||
flush_and_invalidate_caches();
|
||||
|
||||
switch (address)
|
||||
{
|
||||
@@ -451,7 +540,14 @@ void mmutr_miss(uint32_t address, uint32_t pc, uint32_t format_status)
|
||||
|
||||
default:
|
||||
/* add missed page to TLB */
|
||||
mmu_map_page(address);
|
||||
mmu_map_8k_page(address);
|
||||
|
||||
flush_and_invalidate_caches();
|
||||
|
||||
// experimental; try to ensure that supervisor stack area stays in mmu TLBs
|
||||
register uint32_t sp asm("sp");
|
||||
mmu_map_8k_page(sp);
|
||||
|
||||
dbg("DTLB: MCF_MMU_MMUOR = %08x\r\n", MCF_MMU_MMUOR);
|
||||
dbg("ITLB: MCF_MMU_MMUOR = %08x\r\n\r\n", MCF_MMU_MMUOR);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user