first (untested) version of the modified MMU handling and API
This commit is contained in:
509
sys/mmu.c
509
sys/mmu.c
@@ -1,5 +1,6 @@
|
||||
#include "mmu.h"
|
||||
#include "acia.h"
|
||||
#include "exceptions.h"
|
||||
|
||||
/*
|
||||
* mmu.c
|
||||
@@ -68,6 +69,7 @@
|
||||
#else
|
||||
#define dbg(format, arg...) do {;} while (0)
|
||||
#endif /* DEBUG_MMU */
|
||||
#define err(format, arg...) do { xprintf("ERROR (%s()): " format, __FUNCTION__, ##arg); xprintf("system halted\r\n"); } while(0); while(1)
|
||||
|
||||
/*
|
||||
* set ASID register
|
||||
@@ -189,6 +191,160 @@ inline uint32_t set_mmubar(uint32_t value)
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* translation table for virtual address ranges. Holds the physical_offset (which must be added to a virtual
|
||||
* address to get its physical counterpart) for memory ranges.
|
||||
*/
|
||||
struct virt_to_phys
|
||||
{
|
||||
uint32_t start_address;
|
||||
uint32_t length;
|
||||
uint32_t physical_offset;
|
||||
};
|
||||
|
||||
static struct virt_to_phys translation[] =
|
||||
{
|
||||
/* virtual , length , offset */
|
||||
{ 0x00000000, 0x00e00000, 0x60000000 }, /* map first 14 MByte to first 14 Mb of video ram */
|
||||
{ 0x00e00000, 0x00100000, 0x00000000 }, /* map TOS to SDRAM */
|
||||
{ 0x00f00000, 0x00100000, 0xff000000 }, /* map Falcon I/O area to FPGA */
|
||||
{ 0x01000000, 0x1f000000, 0x00000000 }, /* map rest of ram virt = phys */
|
||||
};
|
||||
static int num_translations = sizeof(translation) / sizeof(struct virt_to_phys);
|
||||
|
||||
static inline uint32_t lookup_phys(uint32_t virt)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < num_translations; i++)
|
||||
{
|
||||
if (virt >= translation[i].start_address && virt < translation[i].start_address + translation[i].length)
|
||||
{
|
||||
return virt + translation[i].physical_offset;
|
||||
}
|
||||
}
|
||||
err("virtual address 0x%lx not found in translation table!\r\n", virt);
|
||||
return -1;
|
||||
}
|
||||
|
||||
struct page_descriptor
|
||||
{
|
||||
uint8_t cache_mode : 2;
|
||||
uint8_t supervisor_protect : 1;
|
||||
uint8_t read : 1;
|
||||
uint8_t write : 1;
|
||||
uint8_t execute : 1;
|
||||
uint8_t global : 1;
|
||||
uint8_t locked : 1;
|
||||
};
|
||||
|
||||
/*
|
||||
* page descriptors. Size depending on DEFAULT_PAGE_SIZE, either 1M (resulting in 512
|
||||
* bytes size) or 8k pages (64k descriptor array size)
|
||||
*/
|
||||
static struct page_descriptor pages[512UL * 1024 * 1024 / DEFAULT_PAGE_SIZE];
|
||||
|
||||
|
||||
int mmu_map_instruction_page(uint32_t virt, uint8_t asid)
|
||||
{
|
||||
const uint32_t size_mask = ~ (DEFAULT_PAGE_SIZE - 1); /* pagesize */
|
||||
int page_index = (virt & size_mask) / DEFAULT_PAGE_SIZE; /* index into page_descriptor array */
|
||||
struct page_descriptor *page = &pages[page_index]; /* attributes of page to map */
|
||||
int ipl;
|
||||
uint32_t phys = lookup_phys(virt); /* virtual to physical translation of page */
|
||||
|
||||
if (phys == -1)
|
||||
return 0;
|
||||
|
||||
#ifdef DBG_MMU
|
||||
register int sp asm("sp");
|
||||
dbg("page_descriptor: 0x%02x, ssp = 0x%08x\r\n", * (uint8_t *) page, sp);
|
||||
#endif /* DBG_MMU */
|
||||
|
||||
/*
|
||||
* add page to TLB
|
||||
*/
|
||||
|
||||
ipl = set_ipl(7); /* do not disturb */
|
||||
|
||||
MCF_MMU_MMUAR = (virt & size_mask);
|
||||
|
||||
MCF_MMU_MMUTR = (virt & size_mask) | /* virtual address */
|
||||
MCF_MMU_MMUTR_ID(asid) | /* address space id (ASID) */
|
||||
(page->global ? MCF_MMU_MMUTR_SG : 0) | /* shared global */
|
||||
MCF_MMU_MMUTR_V; /* valid */
|
||||
|
||||
MCF_MMU_MMUDR = (phys & size_mask) | /* physical address */
|
||||
MCF_MMU_MMUDR_SZ(DEFAULT_PAGE_SIZE) | /* page size */
|
||||
MCF_MMU_MMUDR_CM(page->cache_mode) | /* cache mode */
|
||||
(page->supervisor_protect ? MCF_MMU_MMUDR_SP : 0) | /* supervisor protect */
|
||||
(page->read ? MCF_MMU_MMUDR_R : 0) | /* read access enable */
|
||||
(page->write ? MCF_MMU_MMUDR_W : 0) | /* write access enable */
|
||||
(page->execute ? MCF_MMU_MMUDR_X : 0) | /* execute access enable */
|
||||
(page->locked ? MCF_MMU_MMUDR_LK : 0);
|
||||
|
||||
MCF_MMU_MMUOR = MCF_MMU_MMUOR_ITLB | /* instruction */
|
||||
MCF_MMU_MMUOR_ACC | /* access TLB */
|
||||
MCF_MMU_MMUOR_UAA; /* update allocation address field */
|
||||
|
||||
set_ipl(ipl);
|
||||
|
||||
dbg("mapped virt=0x%08x to phys=0x%08x\r\n", virt & size_mask, phys & size_mask);
|
||||
|
||||
dbg("ITLB: MCF_MMU_MMUOR = %08x\r\n", MCF_MMU_MMUOR);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int mmu_map_data_page(uint32_t virt, uint8_t asid)
|
||||
{
|
||||
uint16_t ipl;
|
||||
const uint32_t size_mask = ~ (DEFAULT_PAGE_SIZE - 1); /* pagesize */
|
||||
int page_index = (virt & size_mask) / DEFAULT_PAGE_SIZE; /* index into page_descriptor array */
|
||||
struct page_descriptor *page = &pages[page_index]; /* attributes of page to map */
|
||||
|
||||
uint32_t phys = lookup_phys(virt); /* virtual to physical translation of page */
|
||||
|
||||
if (phys == -1)
|
||||
return 0;
|
||||
|
||||
#ifdef DBG_MMU
|
||||
register int sp asm("sp");
|
||||
dbg("page_descriptor: 0x%02x, ssp = 0x%08x\r\n", * (uint8_t *) page, sp);
|
||||
#endif /* DBG_MMU */
|
||||
|
||||
/*
|
||||
* add page to TLB
|
||||
*/
|
||||
|
||||
ipl = set_ipl(7); /* do not disturb */
|
||||
|
||||
MCF_MMU_MMUTR = (virt & size_mask) | /* virtual address */
|
||||
MCF_MMU_MMUTR_ID(asid) | /* address space id (ASID) */
|
||||
(page->global ? MCF_MMU_MMUTR_SG : 0) | /* shared global */
|
||||
MCF_MMU_MMUTR_V; /* valid */
|
||||
|
||||
MCF_MMU_MMUDR = (phys & size_mask) | /* physical address */
|
||||
MCF_MMU_MMUDR_SZ(DEFAULT_PAGE_SIZE) | /* page size */
|
||||
MCF_MMU_MMUDR_CM(page->cache_mode) | /* cache mode */
|
||||
(page->supervisor_protect ? MCF_MMU_MMUDR_SP : 0) | /* supervisor protect */
|
||||
(page->read ? MCF_MMU_MMUDR_R : 0) | /* read access enable */
|
||||
(page->write ? MCF_MMU_MMUDR_W : 0) | /* write access enable */
|
||||
(page->execute ? MCF_MMU_MMUDR_X : 0) | /* execute access enable */
|
||||
(page->locked ? MCF_MMU_MMUDR_LK : 0);
|
||||
|
||||
MCF_MMU_MMUOR = MCF_MMU_MMUOR_ACC | /* access TLB, data */
|
||||
MCF_MMU_MMUOR_UAA; /* update allocation address field */
|
||||
|
||||
set_ipl(ipl);
|
||||
dbg("mapped virt=0x%08x to phys=0x%08x\r\n", virt & size_mask, phys & size_mask);
|
||||
|
||||
dbg("DTLB: MCF_MMU_MMUOR = %08x\r\n", MCF_MMU_MMUOR);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* map a page of memory using virt and phys as addresses with the Coldfire MMU.
|
||||
*
|
||||
@@ -201,26 +357,27 @@ inline uint32_t set_mmubar(uint32_t value)
|
||||
*
|
||||
*
|
||||
*/
|
||||
int mmu_map_page(uint32_t virt, uint32_t phys, enum mmu_page_size sz, const struct mmu_map_flags *flags)
|
||||
int mmu_map_page(uint32_t virt, uint32_t phys, enum mmu_page_size sz, uint8_t page_id, const struct page_descriptor *flags)
|
||||
{
|
||||
int size_mask;
|
||||
int ipl;
|
||||
|
||||
switch (sz)
|
||||
{
|
||||
case MMU_PAGE_SIZE_1M:
|
||||
size_mask = 0xfff00000;
|
||||
size_mask = ~ (SIZE_1M - 1);
|
||||
break;
|
||||
|
||||
case MMU_PAGE_SIZE_8K:
|
||||
size_mask = 0xffffe000;
|
||||
size_mask = ~ (SIZE_8K - 1);
|
||||
break;
|
||||
|
||||
case MMU_PAGE_SIZE_4K:
|
||||
size_mask = 0xfffff000;
|
||||
size_mask = ~ (SIZE_4K - 1);
|
||||
break;
|
||||
|
||||
case MMU_PAGE_SIZE_1K:
|
||||
size_mask = 0xfffff800;
|
||||
size_mask = ~ (SIZE_1K - 1);
|
||||
break;
|
||||
|
||||
default:
|
||||
@@ -231,20 +388,21 @@ int mmu_map_page(uint32_t virt, uint32_t phys, enum mmu_page_size sz, const stru
|
||||
/*
|
||||
* add page to TLB
|
||||
*/
|
||||
|
||||
ipl = set_ipl(7); /* do not disturb */
|
||||
|
||||
MCF_MMU_MMUTR = ((int) virt & size_mask) | /* virtual address */
|
||||
MCF_MMU_MMUTR_ID(flags->page_id) | /* address space id (ASID) */
|
||||
MCF_MMU_MMUTR_SG | /* shared global */
|
||||
MCF_MMU_MMUTR_ID(page_id) | /* address space id (ASID) */
|
||||
(flags->global ? MCF_MMU_MMUTR_SG : 0) | /* shared global */
|
||||
MCF_MMU_MMUTR_V; /* valid */
|
||||
NOP();
|
||||
|
||||
MCF_MMU_MMUDR = ((int) phys & size_mask) | /* physical address */
|
||||
MCF_MMU_MMUDR_SZ(sz) | /* page size */
|
||||
MCF_MMU_MMUDR_CM(flags->cache_mode) |
|
||||
(flags->access & ACCESS_READ ? MCF_MMU_MMUDR_R : 0) | /* read access enable */
|
||||
(flags->access & ACCESS_WRITE ? MCF_MMU_MMUDR_W : 0) | /* write access enable */
|
||||
(flags->access & ACCESS_EXECUTE ? MCF_MMU_MMUDR_X : 0) | /* execute access enable */
|
||||
(flags->read ? MCF_MMU_MMUDR_R : 0) | /* read access enable */
|
||||
(flags->write ? MCF_MMU_MMUDR_W : 0) | /* write access enable */
|
||||
(flags->execute ? MCF_MMU_MMUDR_X : 0) | /* execute access enable */
|
||||
(flags->locked ? MCF_MMU_MMUDR_LK : 0);
|
||||
NOP();
|
||||
|
||||
MCF_MMU_MMUOR = MCF_MMU_MMUOR_ACC | /* access TLB, data */
|
||||
MCF_MMU_MMUOR_UAA; /* update allocation address field */
|
||||
@@ -253,6 +411,9 @@ int mmu_map_page(uint32_t virt, uint32_t phys, enum mmu_page_size sz, const stru
|
||||
MCF_MMU_MMUOR = MCF_MMU_MMUOR_ITLB | /* instruction */
|
||||
MCF_MMU_MMUOR_ACC | /* access TLB */
|
||||
MCF_MMU_MMUOR_UAA; /* update allocation address field */
|
||||
|
||||
set_ipl(ipl);
|
||||
|
||||
dbg("mapped virt=0x%08x to phys=0x%08x\r\n", virt, phys);
|
||||
|
||||
return 1;
|
||||
@@ -260,12 +421,66 @@ int mmu_map_page(uint32_t virt, uint32_t phys, enum mmu_page_size sz, const stru
|
||||
|
||||
void mmu_init(void)
|
||||
{
|
||||
struct mmu_map_flags flags;
|
||||
extern uint8_t _MMUBAR[];
|
||||
uint32_t MMUBAR = (uint32_t) &_MMUBAR[0];
|
||||
struct page_descriptor flags;
|
||||
int i;
|
||||
|
||||
extern uint8_t _MMUBAR[];
|
||||
uint32_t MMUBAR = (uint32_t) &_MMUBAR[0];
|
||||
extern uint8_t _TOS[];
|
||||
uint32_t TOS = (uint32_t) &_TOS[0];
|
||||
/*
|
||||
* clear all MMU TLB entries first
|
||||
*/
|
||||
MCF_MMU_MMUOR = MCF_MMU_MMUOR_CA; /* clears _all_ TLBs (including locked ones) */
|
||||
NOP();
|
||||
|
||||
/*
|
||||
* prelaminary initialization of page descriptor 0 (root) table
|
||||
*/
|
||||
for (i = 0; i < sizeof(pages) / sizeof(struct page_descriptor); i++)
|
||||
{
|
||||
uint32_t addr = i * DEFAULT_PAGE_SIZE;
|
||||
|
||||
if (addr >= 0x00f00000 && addr < 0x00ffffff)
|
||||
{
|
||||
pages[i].cache_mode = CACHE_NOCACHE_PRECISE;
|
||||
pages[i].execute = 0;
|
||||
pages[i].read = 1;
|
||||
pages[i].write = 1;
|
||||
pages[i].execute = 0;
|
||||
pages[i].global = 1;
|
||||
pages[i].supervisor_protect = 1;
|
||||
}
|
||||
else if (addr >= 0x0 && addr < 0x00e00000) /* ST-RAM, potential video memory */
|
||||
{
|
||||
pages[i].cache_mode = CACHE_WRITETHROUGH;
|
||||
pages[i].execute = 1;
|
||||
pages[i].supervisor_protect = 0;
|
||||
pages[i].read = 1;
|
||||
pages[i].write = 1;
|
||||
pages[i].execute = 1;
|
||||
pages[i].global = 1;
|
||||
}
|
||||
else if (addr >= 0x00e00000 && addr < 0x00f00000) /* EmuTOS */
|
||||
{
|
||||
pages[i].cache_mode = CACHE_COPYBACK;
|
||||
pages[i].execute = 1;
|
||||
pages[i].supervisor_protect = 1;
|
||||
pages[i].read = 1;
|
||||
pages[i].write = 0;
|
||||
pages[i].execute = 1;
|
||||
pages[i].global = 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
pages[i].cache_mode = CACHE_COPYBACK;
|
||||
pages[i].execute = 1;
|
||||
pages[i].read = 1;
|
||||
pages[i].write = 1;
|
||||
pages[i].supervisor_protect = 0;
|
||||
pages[i].global = 1;
|
||||
}
|
||||
pages[i].locked = 0; /* not locked */
|
||||
pages[0].supervisor_protect = 0; /* protect system vectors */
|
||||
}
|
||||
|
||||
set_asid(0); /* do not use address extension (ASID provides virtual 48 bit addresses */
|
||||
|
||||
@@ -321,37 +536,38 @@ void mmu_init(void)
|
||||
ACR_ADMSK(0x7) |
|
||||
ACR_BA(0xe0000000));
|
||||
|
||||
/* disable ACR3 */
|
||||
/* disable ACR1 - 3, essentially disabling all of the above */
|
||||
set_acr1(0x0);
|
||||
set_acr2(0x0);
|
||||
set_acr3(0x0);
|
||||
|
||||
set_mmubar(MMUBAR + 1); /* set and enable MMUBAR */
|
||||
|
||||
/* clear all MMU TLB entries */
|
||||
MCF_MMU_MMUOR = MCF_MMU_MMUOR_CA;
|
||||
|
||||
/* create locked TLB entries */
|
||||
|
||||
flags.cache_mode = CACHE_COPYBACK;
|
||||
flags.protection = SV_USER;
|
||||
flags.page_id = 0;
|
||||
flags.access = ACCESS_READ | ACCESS_WRITE | ACCESS_EXECUTE;
|
||||
flags.supervisor_protect = 0;
|
||||
flags.read = 1;
|
||||
flags.write = 1;
|
||||
flags.execute = 1;
|
||||
flags.locked = true;
|
||||
|
||||
/* 0x0000_0000 - 0x000F_FFFF (first MB of physical memory) locked virt = phys */
|
||||
mmu_map_page(0x0, 0x0, MMU_PAGE_SIZE_1M, &flags);
|
||||
/* 0x00000000 - 0x00100000 (first MB of physical memory) locked virt = phys */
|
||||
mmu_map_page(0x0, 0x0, MMU_PAGE_SIZE_1M, 0, &flags);
|
||||
|
||||
#if defined(MACHINE_FIREBEE)
|
||||
/*
|
||||
* 0x00d0'0000 - 0x00df'ffff (last megabyte of ST RAM = Falcon video memory) locked ID = 6
|
||||
* 0x00d00000 - 0x00e00000 (last megabyte of ST RAM = Falcon video memory) locked ID = 6
|
||||
* mapped to physical address 0x60d0'0000 (FPGA video memory)
|
||||
* video RAM: read write execute normal write true
|
||||
*/
|
||||
flags.cache_mode = CACHE_WRITETHROUGH;
|
||||
flags.protection = SV_USER;
|
||||
flags.page_id = SCA_PAGE_ID;
|
||||
flags.access = ACCESS_READ | ACCESS_WRITE | ACCESS_EXECUTE;
|
||||
flags.supervisor_protect = 0;
|
||||
flags.read = 1;
|
||||
flags.write = 1;
|
||||
flags.execute = 1;
|
||||
flags.locked = true;
|
||||
mmu_map_page(0x00d00000, 0x60d00000, MMU_PAGE_SIZE_1M, &flags);
|
||||
mmu_map_page(0x00d00000, 0x60d00000, MMU_PAGE_SIZE_1M, SCA_PAGE_ID, &flags);
|
||||
|
||||
video_tlb = 0x2000; /* set page as video page */
|
||||
video_sbt = 0x0; /* clear time */
|
||||
@@ -362,9 +578,12 @@ void mmu_init(void)
|
||||
* This maps virtual 0x00e0'0000 - 0x00ef'ffff to the same virtual address
|
||||
*/
|
||||
flags.cache_mode = CACHE_COPYBACK;
|
||||
flags.page_id = 0;
|
||||
flags.access = ACCESS_READ | ACCESS_EXECUTE;
|
||||
mmu_map_page(TOS, TOS, MMU_PAGE_SIZE_1M, &flags);
|
||||
flags.supervisor_protect = 0;
|
||||
flags.read = 1;
|
||||
flags.write = 0;
|
||||
flags.execute = 1;
|
||||
flags.locked = 1;
|
||||
mmu_map_page(0xe00000, 0xe00000, MMU_PAGE_SIZE_1M, 0, &flags);
|
||||
|
||||
#if defined(MACHINE_FIREBEE)
|
||||
/*
|
||||
@@ -372,8 +591,12 @@ void mmu_init(void)
|
||||
* area (0x00f0'0000 - 0x00ff'ffff virtual) for the FireBee
|
||||
*/
|
||||
flags.cache_mode = CACHE_NOCACHE_PRECISE;
|
||||
flags.access = ACCESS_WRITE | ACCESS_READ;
|
||||
mmu_map_page(0x00f00000, 0xfff00000, MMU_PAGE_SIZE_1M, &flags);
|
||||
flags.supervisor_protect = 1;
|
||||
flags.read = 1;
|
||||
flags.write = 1;
|
||||
flags.execute = 0;
|
||||
flags.locked = 1;
|
||||
mmu_map_page(0x00f00000, 0xfff00000, MMU_PAGE_SIZE_1M, 0, &flags);
|
||||
#endif /* MACHINE_FIREBEE */
|
||||
|
||||
/*
|
||||
@@ -381,54 +604,206 @@ void mmu_init(void)
|
||||
* virtual address. This is also used (completely) when BaS is in RAM
|
||||
*/
|
||||
flags.cache_mode = CACHE_COPYBACK;
|
||||
flags.access = ACCESS_READ | ACCESS_WRITE | ACCESS_EXECUTE;
|
||||
mmu_map_page(SDRAM_START + SDRAM_SIZE - 0X00200000, SDRAM_START + SDRAM_SIZE - 0X00200000, MMU_PAGE_SIZE_1M, &flags);
|
||||
flags.supervisor_protect = 1;
|
||||
flags.read = 1;
|
||||
flags.write = 1;
|
||||
flags.execute = 1;
|
||||
flags.locked = 1;
|
||||
mmu_map_page(SDRAM_START + SDRAM_SIZE - 0x00200000, SDRAM_START + SDRAM_SIZE - 0x00200000, MMU_PAGE_SIZE_1M, 0, &flags);
|
||||
|
||||
/*
|
||||
* Map (locked) the very last MB of physical SDRAM (this is where the driver buffers reside) to the same
|
||||
* virtual address. Used uncached for drivers.
|
||||
*/
|
||||
flags.cache_mode = CACHE_NOCACHE_PRECISE;
|
||||
flags.access = ACCESS_READ | ACCESS_WRITE;
|
||||
flags.protection = SV_PROTECT;
|
||||
mmu_map_page(SDRAM_START + SDRAM_SIZE - 0x00100000, SDRAM_START + SDRAM_SIZE - 0x00100000, MMU_PAGE_SIZE_1M, &flags);
|
||||
flags.supervisor_protect = 1;
|
||||
flags.read = 1;
|
||||
flags.write = 1;
|
||||
flags.execute = 0;
|
||||
flags.locked = 1;
|
||||
mmu_map_page(SDRAM_START + SDRAM_SIZE - 0x00100000, SDRAM_START + SDRAM_SIZE - 0x00100000, MMU_PAGE_SIZE_1M, 0, &flags);
|
||||
}
|
||||
|
||||
static struct mmu_map_flags flags =
|
||||
{
|
||||
.cache_mode = CACHE_COPYBACK,
|
||||
.protection = SV_USER,
|
||||
.page_id = 0,
|
||||
.access = ACCESS_READ | ACCESS_WRITE | ACCESS_EXECUTE,
|
||||
.locked = false
|
||||
};
|
||||
|
||||
void mmutr_miss(uint32_t address, uint32_t pc, uint32_t format_status)
|
||||
uint32_t mmutr_miss(uint32_t mmu_sr, uint32_t fault_address, uint32_t pc,
|
||||
uint32_t format_status)
|
||||
{
|
||||
uint32_t fault = format_status & 0xc030000;
|
||||
|
||||
dbg("MMU TLB MISS accessing 0x%08x\r\nFS = 0x%08x\r\nPC = 0x%08x\r\n", address, format_status, pc);
|
||||
// flush_and_invalidate_caches();
|
||||
|
||||
switch (address)
|
||||
switch (fault)
|
||||
{
|
||||
case keyctl:
|
||||
case keybd:
|
||||
/* do something to emulate the IKBD access */
|
||||
dbg("IKBD access\r\n");
|
||||
break;
|
||||
/* if we have a real TLB miss, map the offending page */
|
||||
|
||||
case midictl:
|
||||
case midi:
|
||||
/* do something to emulate MIDI access */
|
||||
dbg("MIDI ACIA access\r\n");
|
||||
break;
|
||||
case 0x04010000: /* TLB miss on opword of instruction fetch */
|
||||
case 0x04020000: /* TLB miss on extension word of instruction fetch */
|
||||
dbg("MMU ITLB MISS accessing 0x%08x\r\n"
|
||||
"FS = 0x%08x\r\n"
|
||||
"MMUSR = 0x%08x\r\n"
|
||||
"PC = 0x%08x\r\n",
|
||||
fault_address, format_status, mmu_sr, pc);
|
||||
dbg("fault = 0x%08x\r\n", fault);
|
||||
mmu_map_instruction_page(pc, 0);
|
||||
|
||||
default:
|
||||
/* add missed page to TLB */
|
||||
mmu_map_page(address, address, MMU_PAGE_SIZE_1M, &flags);
|
||||
dbg("DTLB: MCF_MMU_MMUOR = %08x\r\n", MCF_MMU_MMUOR);
|
||||
dbg("ITLB: MCF_MMU_MMUOR = %08x\r\n\r\n", MCF_MMU_MMUOR);
|
||||
}
|
||||
/* due to prefetch, it makes sense to map the next adjacent page also for ITLBs */
|
||||
mmu_map_instruction_page(pc + DEFAULT_PAGE_SIZE, 0);
|
||||
break;
|
||||
|
||||
case 0x08020000: /* TLB miss on data write */
|
||||
case 0x0c020000: /* TLB miss on data read or read-modify-write */
|
||||
dbg("MMU DTLB MISS accessing 0x%08x\r\n"
|
||||
"FS = 0x%08x\r\n"
|
||||
"MMUSR = 0x%08x\r\n"
|
||||
"PC = 0x%08x\r\n",
|
||||
fault_address, format_status, mmu_sr, pc);
|
||||
dbg("fault = 0x%08x\r\n", fault);
|
||||
mmu_map_data_page(fault_address, 0);
|
||||
break;
|
||||
|
||||
/* else issue an bus error */
|
||||
default:
|
||||
dbg("bus error\r\n");
|
||||
return 1; /* signal bus error to caller */
|
||||
}
|
||||
#ifdef DBG_MMU
|
||||
xprintf("\r\n");
|
||||
#endif /* DBG_MMU */
|
||||
|
||||
return 0; /* signal TLB miss handled to caller */
|
||||
}
|
||||
|
||||
|
||||
/* TODO: implement */
|
||||
|
||||
/*
|
||||
* API-exposed, externally callable MMU functions
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* lock data page(s) with address space id asid from address virt to virt + size.
|
||||
*
|
||||
* FIXME: There is no check for "too many locked pages", currently.
|
||||
*
|
||||
* return: 0 if failed (page not in translation table), 1 otherwise
|
||||
*/
|
||||
int32_t mmu_map_data_page_locked(uint32_t virt, uint32_t size, int asid)
|
||||
{
|
||||
const uint32_t size_mask = ~ (DEFAULT_PAGE_SIZE - 1); /* pagesize */
|
||||
int page_index = (virt & size_mask) / DEFAULT_PAGE_SIZE; /* index into page_descriptor array */
|
||||
struct page_descriptor *page = &pages[page_index]; /* attributes of page to map */
|
||||
int i = 0;
|
||||
|
||||
while (page_index * DEFAULT_PAGE_SIZE < virt + size)
|
||||
{
|
||||
if (page->locked)
|
||||
{
|
||||
dbg("page at %p is already locked. Nothing to do\r\n", virt);
|
||||
}
|
||||
else
|
||||
{
|
||||
page->locked = 1;
|
||||
mmu_map_data_page(virt, 0);
|
||||
i++;
|
||||
}
|
||||
virt += DEFAULT_PAGE_SIZE;
|
||||
}
|
||||
|
||||
dbg("%d pages locked\r\n", i);
|
||||
|
||||
return 1; /* success */
|
||||
}
|
||||
|
||||
/*
|
||||
* the opposite: unlock data page(s) with address space id asid from address virt to virt + size_t
|
||||
*
|
||||
* return: 0 if failed (page not found), 1 otherwise
|
||||
*/
|
||||
int32_t mmu_unlock_data_page(uint32_t address, uint32_t size, int asid)
|
||||
{
|
||||
int curr_asid;
|
||||
const uint32_t size_mask = ~ (DEFAULT_PAGE_SIZE - 1);
|
||||
int page_index = (address & size_mask) / DEFAULT_PAGE_SIZE; /* index into page descriptor array */
|
||||
struct page_descriptor *page = &pages[page_index];
|
||||
|
||||
curr_asid = set_asid(asid); /* set asid to the one to search for */
|
||||
|
||||
/* TODO: check for pages[] array bounds */
|
||||
|
||||
while (page_index * DEFAULT_PAGE_SIZE < address + size)
|
||||
{
|
||||
MCF_MMU_MMUAR = address + page->supervisor_protect;
|
||||
MCF_MMU_MMUOR = MCF_MMU_MMUOR_STLB | /* search TLB */
|
||||
MCF_MMU_MMUOR_ADR |
|
||||
MCF_MMU_MMUOR_RW;
|
||||
if (MCF_MMU_MMUSR & MCF_MMU_MMUSR_HIT) /* found */
|
||||
{
|
||||
#ifdef DBG_MMU
|
||||
uint32_t tlb_aa = MCF_MMU_MMUOR >> 16; /* MMU internal allocation address for TLB */
|
||||
#endif /* DBG_MMU */
|
||||
|
||||
MCF_MMU_MMUDR &= ~MCF_MMU_MMUDR_LK; /* clear lock bit */
|
||||
MCF_MMU_MMUOR = MCF_MMU_MMUOR_UAA |
|
||||
MCF_MMU_MMUOR_ACC; /* update TLB */
|
||||
|
||||
dbg("DTLB %d unlocked\r\n", tlb_aa);
|
||||
}
|
||||
else
|
||||
{
|
||||
dbg("%p doesn't seem to be locked??\r\n");
|
||||
}
|
||||
page_index++;
|
||||
}
|
||||
set_asid(curr_asid);
|
||||
|
||||
return 1; /* success */
|
||||
}
|
||||
|
||||
int32_t mmu_report_locked_pages(uint32_t *num_itlb, uint32_t *num_dtlb)
|
||||
{
|
||||
int i;
|
||||
int li = 0;
|
||||
int ld = 0;
|
||||
|
||||
/* Coldfire V4e allocation addresses run from 0 to 63 */
|
||||
|
||||
for (i = 0; i < 31; i++) /* 0-31 = ITLB AA */
|
||||
{
|
||||
MCF_MMU_MMUAR = i;
|
||||
MCF_MMU_MMUOR = MCF_MMU_MMUOR_STLB |
|
||||
MCF_MMU_MMUOR_ITLB |
|
||||
MCF_MMU_MMUOR_RW; /* search ITLB */
|
||||
|
||||
if (MCF_MMU_MMUTR & MCF_MMU_MMUTR_V)
|
||||
{
|
||||
/* entry is valid */
|
||||
if (MCF_MMU_MMUDR & MCF_MMU_MMUDR_LK)
|
||||
{
|
||||
li++;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
for (i = 32; i < 64; i++) /* 32-63 = DTLB AA */
|
||||
{
|
||||
MCF_MMU_MMUAR = i;
|
||||
MCF_MMU_MMUOR = MCF_MMU_MMUOR_STLB |
|
||||
MCF_MMU_MMUOR_RW; /* search ITLB */
|
||||
|
||||
if (MCF_MMU_MMUTR & MCF_MMU_MMUTR_V)
|
||||
{
|
||||
/* entry is valid */
|
||||
if (MCF_MMU_MMUDR & MCF_MMU_MMUDR_LK)
|
||||
{
|
||||
ld++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
*num_itlb = li;
|
||||
*num_dtlb = ld;
|
||||
|
||||
return 1; /* success */
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user