fixed indexing into page descriptor array with wrong page size

This commit is contained in:
Markus Fröschle
2014-09-27 06:19:43 +00:00
parent 51d36b45cb
commit bb7ae911dc
6 changed files with 537 additions and 612 deletions

View File

@@ -67,10 +67,13 @@ enum mmu_page_size
/*
* cache modes
*/
#define CACHE_WRITETHROUGH 0
#define CACHE_COPYBACK 1
#define CACHE_NOCACHE_PRECISE 2
#define CACHE_NOCACHE_IMPRECISE 3
enum mmu_cache_modes
{
CACHE_WRITETHROUGH = 0,
CACHE_COPYBACK = 1,
CACHE_NOCACHE_PRECISE = 2,
CACHE_NOCACHE_IMPRECISE = 3
};
/*

View File

@@ -306,6 +306,11 @@ void BaS(void)
mmu_init();
xprintf("finished\r\n");
xprintf("enable MMU: ");
MCF_MMU_MMUCR = MCF_MMU_MMUCR_EN; /* MMU on */
NOP(); /* force pipeline sync */
xprintf("finished\r\n");
xprintf("initialize exception vector table: ");
vec_init();
xprintf("finished\r\n");
@@ -313,10 +318,6 @@ void BaS(void)
xprintf("flush caches: ");
flush_and_invalidate_caches();
xprintf("finished\r\n");
xprintf("enable MMU: ");
MCF_MMU_MMUCR = MCF_MMU_MMUCR_EN; /* MMU on */
NOP(); /* force pipeline sync */
xprintf("finished\r\n");
#ifdef MACHINE_FIREBEE
xprintf("IDE reset: ");

View File

@@ -310,12 +310,12 @@ init_vec_loop:
*/
vector_table_start:
std_exc_vec:
//move.w #0x2700,sr // disable interrupt
move.w #0x2700,sr // disable interrupt
subq.l #8,sp
movem.l d0/a5,(sp) // save registers
move.w 8(sp),d0 // fetch vector
and.l #0x3fc,d0 // mask out vector number
#define DBG_EXC
#ifdef DBG_EXC
// printout vector number of exception
@@ -388,12 +388,6 @@ access_mmu:
move.l MCF_MMU_MMUSR,d0 // did the last fault hit in TLB?
btst #1,d0 // yes, it did. So we already mapped that page
bne bus_error // and this must be a real bus error
btst #5,d0 // supervisor protection fault?
bne bus_error
btst #4,d0 // read access fault?
bne bus_error
btst #3,d0 // write access fault?
bne bus_error
move.l MCF_MMU_MMUAR,d0
cmp.l #__FASTRAM_END,d0 // above max User RAM area?
@@ -690,11 +684,13 @@ handler_gpt0:
link a6,#-4 * 4 // make room for
movem.l d0-d1/a0-a1,(sp) // gcc scratch registers and save them,
// other registers will be handled by gcc itself
move.w 4(a6),d0 // fetch vector number from stack
move.l d0,-(sp) // push it
jsr _gpt0_interrupt_handler // call C handler
addq.l #4,sp // adjust stack
movem.l (sp),d0-d1/a0-a1 // restore registers
unlk a6
rte
#endif /* MACHINE_FIREBEE */

View File

@@ -430,110 +430,8 @@ bool irq6_interrupt_handler(uint32_t sf1, uint32_t sf2)
*/
void gpt0_interrupt_handler(void)
{
uint32_t video_address;
uint32_t video_end_address;
int page_number;
bool already_set;
extern uint32_t _STRAM_END;
dbg("screen base = 0x%x\r\n", vbasehi);
if (vbasehi < 2) /* screen base lower than 0x20000? */
{
goto rearm_trigger; /* do nothing */
}
else if (vbasehi >= 0xd0) /* higher than 0xd00000 (normal Falcon address)? */
{
video_sbt = MCF_SLT0_SCNT; /* FIXME: no idea why we need to save the time here */
}
video_address = (vbasehi << 16) | (vbasemid << 8) | vbaselow;
page_number = video_address >> 20; /* calculate a page number */
already_set = (video_tlb & (1 << page_number)); /* already in bitset? */
video_tlb |= page_number; /* set it */
if (! already_set) /* newly set page, need to copy contents */
{
flush_and_invalidate_caches();
dma_memcpy((uint8_t *) video_address + 0x60000000, (uint8_t *) video_address, 0x100000);
/*
* create an MMU TLB entry for the new video page
*/
/*
* first search for an existing entry with our address. If none is found,
* the MMU will propose a new one
*/
MCF_MMU_MMUAR = video_address;
MCF_MMU_MMUOR = 0x106;
NOP();
/*
* take this MMU TLB entry and set it to our video address and page mapping
*/
MCF_MMU_MMUAR = (MCF_MMU_MMUOR >> 16) & 0xffff; /* set TLB id */
MCF_MMU_MMUTR = video_address |
MCF_MMU_MMUTR_ID(sca_page_ID) | /* set video page ID */
MCF_MMU_MMUTR_SG | /* shared global */
MCF_MMU_MMUTR_V; /* valid */
MCF_MMU_MMUDR = (video_address + 0x60000000) | /* physical address */
MCF_MMU_MMUDR_SZ(0) | /* 1 MB page size */
MCF_MMU_MMUDR_CM(0) | /* writethrough */
MCF_MMU_MMUDR_R | /* readable */
MCF_MMU_MMUDR_W | /* writeable */
MCF_MMU_MMUDR_X; /* executable */
MCF_MMU_MMUOR = 0x10b; /* update TLB entry */
}
/*
* Calculate the effective screen memory size to see if we need to map another page
* in case the new screen spans more than one single page
*/
video_end_address = video_address + (vde - vdb) * vwrap;
if (video_end_address < _STRAM_END)
{
page_number = video_end_address >> 20; /* calculate a page number */
already_set = (video_tlb & (1 << page_number)); /* already in bitset? */
video_tlb |= page_number; /* set it */
if (! already_set) /* newly set page, need to copy contents */
{
flush_and_invalidate_caches();
dma_memcpy((uint8_t *) video_end_address + 0x60000000, (uint8_t *) video_end_address, 0x100000);
/*
* create an MMU TLB entry for the new video page
*/
/*
* first search for an existing entry with our address. If none is found,
* the MMU will propose a new one
*/
MCF_MMU_MMUAR = video_end_address;
MCF_MMU_MMUOR = 0x106;
NOP();
/*
* take this MMU TLB entry and set it to our video address and page mapping
*/
MCF_MMU_MMUAR = (MCF_MMU_MMUOR >> 16) & 0xffff; /* set TLB id */
MCF_MMU_MMUTR = video_end_address |
MCF_MMU_MMUTR_ID(sca_page_ID) | /* set video page ID */
MCF_MMU_MMUTR_SG | /* shared global */
MCF_MMU_MMUTR_V; /* valid */
MCF_MMU_MMUDR = (video_end_address + 0x60000000) | /* physical address */
MCF_MMU_MMUDR_SZ(0) | /* 1 MB page size */
MCF_MMU_MMUDR_CM(0) | /* writethrough */
MCF_MMU_MMUDR_R | /* readable */
MCF_MMU_MMUDR_W | /* writeable */
MCF_MMU_MMUDR_X; /* executable */
MCF_MMU_MMUOR = 0x10b; /* update TLB entry */
}
}
rearm_trigger:
MCF_GPT0_GMS &= ~1; /* rearm trigger */
NOP();
MCF_GPT0_GMS |= 1;

View File

@@ -62,7 +62,7 @@
#error "unknown machine!"
#endif /* MACHINE_FIREBEE */
#define DEBUG_MMU
//#define DEBUG_MMU
#ifdef DEBUG_MMU
#define dbg(format, arg...) do { xprintf("DEBUG (%s()): " format, __FUNCTION__, ##arg);} while(0)
#else
@@ -206,7 +206,7 @@ static struct virt_to_phys translation[] =
{
/* virtual , length , offset */
{ 0x00000000, 0x00e00000, 0x60000000 }, /* map first 14 MByte to first 14 Mb of video ram */
{ 0x00e00000, 0x00100000, 0x00000000 }, /* map TOS to SDRAM */
//{ 0x00e00000, 0x00100000, 0x00000000 }, /* map TOS to SDRAM */
{ 0x00f00000, 0x00100000, 0xff000000 }, /* map Falcon I/O area to FPGA */
{ 0x01000000, 0x10000000, 0x00000000 }, /* map rest of ram virt = phys */
{ 0x1fd00000, 0x01000000, 0x00000000 }, /* accessed by EmuTOS? */
@@ -255,10 +255,10 @@ static struct page_descriptor pages[65536]; /* 512 Mb RAM */
int mmu_map_8k_page(uint32_t virt)
{
const int size_mask = 0xffffe000; /* 8k pagesize */
int page_index = (virt & size_mask) / 4096; /* index into page_descriptor array */
int page_index = (virt & size_mask) / DEFAULT_PAGE_SIZE; /* index into page_descriptor array */
struct page_descriptor *page = &pages[page_index]; /* attributes of page to map */
uint32_t addr = lookup_phys(virt); /* virtual to physical translation of page */
uint32_t phys = lookup_phys(virt); /* virtual to physical translation of page */
/*
* add page to TLB
@@ -269,7 +269,7 @@ int mmu_map_8k_page(uint32_t virt)
MCF_MMU_MMUTR_V; /* valid */
NOP();
MCF_MMU_MMUDR = (addr & size_mask) | /* physical address */
MCF_MMU_MMUDR = (phys & size_mask) | /* physical address */
MCF_MMU_MMUDR_SZ(MMU_PAGE_SIZE_8K) | /* page size */
MCF_MMU_MMUDR_CM(page->cache_mode) |
(page->read ? MCF_MMU_MMUDR_R : 0) | /* read access enable */
@@ -282,24 +282,19 @@ int mmu_map_8k_page(uint32_t virt)
MCF_MMU_MMUOR_UAA; /* update allocation address field */
NOP();
dbg("mapped virt=0x%08x to phys=0x%08x\r\n", virt & size_mask, phys & size_mask);
dbg("DTLB: MCF_MMU_MMUOR = %08x\r\n", MCF_MMU_MMUOR);
MCF_MMU_MMUOR = MCF_MMU_MMUOR_ITLB | /* instruction */
MCF_MMU_MMUOR_ACC | /* access TLB */
MCF_MMU_MMUOR_UAA; /* update allocation address field */
dbg("mapped virt=0x%08x to phys=0x%08x\r\n", virt, addr);
dbg("ITLB: MCF_MMU_MMUOR = %08x\r\n\r\n", MCF_MMU_MMUOR);
return 1;
}
struct mmu_map_flags
{
unsigned cache_mode:2;
unsigned protection:1;
unsigned page_id:8;
unsigned access:3;
unsigned locked:1;
unsigned unused:17;
};
/*
* map a page of memory using virt and phys as addresses with the Coldfire MMU.
*
@@ -312,7 +307,7 @@ struct mmu_map_flags
*
*
*/
int mmu_map_page(uint32_t virt, uint32_t phys, enum mmu_page_size sz, const struct mmu_map_flags *flags)
int mmu_map_page(uint32_t virt, uint32_t phys, enum mmu_page_size sz, uint8_t page_id, const struct page_descriptor *flags)
{
int size_mask;
@@ -341,18 +336,18 @@ int mmu_map_page(uint32_t virt, uint32_t phys, enum mmu_page_size sz, const stru
/*
* add page to TLB
*/
MCF_MMU_MMUTR = ((int) virt & size_mask) | /* virtual address */
MCF_MMU_MMUTR_ID(flags->page_id) | /* address space id (ASID) */
MCF_MMU_MMUTR_SG | /* shared global */
MCF_MMU_MMUTR = ((uint32_t) virt & size_mask) | /* virtual address */
MCF_MMU_MMUTR_ID(page_id) | /* address space id (ASID) */
(flags->global ? MCF_MMU_MMUTR_SG : 0) | /* shared global */
MCF_MMU_MMUTR_V; /* valid */
NOP();
MCF_MMU_MMUDR = ((int) phys & size_mask) | /* physical address */
MCF_MMU_MMUDR = ((uint32_t) phys & size_mask) | /* physical address */
MCF_MMU_MMUDR_SZ(sz) | /* page size */
MCF_MMU_MMUDR_CM(flags->cache_mode) |
(flags->access & ACCESS_READ ? MCF_MMU_MMUDR_R : 0) | /* read access enable */
(flags->access & ACCESS_WRITE ? MCF_MMU_MMUDR_W : 0) | /* write access enable */
(flags->access & ACCESS_EXECUTE ? MCF_MMU_MMUDR_X : 0) | /* execute access enable */
(flags->read ? MCF_MMU_MMUDR_R : 0) | /* read access enable */
(flags->write ? MCF_MMU_MMUDR_W : 0) | /* write access enable */
(flags->execute ? MCF_MMU_MMUDR_X : 0) | /* execute access enable */
(flags->locked ? MCF_MMU_MMUDR_LK : 0);
NOP();
@@ -372,9 +367,15 @@ void mmu_init(void)
{
extern uint8_t _MMUBAR[];
uint32_t MMUBAR = (uint32_t) &_MMUBAR[0];
struct mmu_map_flags flags;
struct page_descriptor flags;
int i;
/*
* clear all MMU TLB entries first
*/
MCF_MMU_MMUOR = MCF_MMU_MMUOR_CA;
NOP();
/*
* prelaminary initialization of page descriptor 0 (root) table
*/
@@ -385,17 +386,25 @@ void mmu_init(void)
if (addr >= 0x00f00000 && addr < 0x00ffffff)
{
pages[i].cache_mode = CACHE_NOCACHE_PRECISE;
pages[i].execute = 0;
pages[i].supervisor_protect = 1;
}
else if (addr >= 0x0 && addr < 0x00f00000) /* ST-RAM, potential video memory */
{
pages[i].cache_mode = CACHE_WRITETHROUGH;
pages[i].execute = 1;
pages[i].supervisor_protect = 0;
}
else
{
pages[i].cache_mode = CACHE_COPYBACK;
pages[i].execute = 1;
pages[i].supervisor_protect = 0;
}
pages[i].global = 1; /* all pages global by default */
pages[i].locked = 0; /* not locked */
pages[i].read = 1; /* readable, writable, executable */
pages[i].write = 1;
pages[i].execute = 1;
pages[i].supervisor_protect = 0; /* not supervisor protected */
}
set_asid(0); /* do not use address extension (ASID provides virtual 48 bit addresses) yet */
@@ -467,18 +476,33 @@ void mmu_init(void)
* virtual address. This is also used (completely) when BaS is in RAM
*/
flags.cache_mode = CACHE_COPYBACK;
flags.access = ACCESS_READ | ACCESS_WRITE | ACCESS_EXECUTE;
flags.protection = SV_PROTECT; /* supervisor access only */
mmu_map_page(SDRAM_START + SDRAM_SIZE - 0X00200000, SDRAM_START + SDRAM_SIZE - 0X00200000, MMU_PAGE_SIZE_1M, &flags);
flags.read = 1;
flags.write = 1;
flags.execute = 1;
flags.supervisor_protect = 1; /* supervisor access only */
flags.locked = 1;
mmu_map_page(SDRAM_START + SDRAM_SIZE - 0x00200000, SDRAM_START + SDRAM_SIZE - 0x00200000, 0, MMU_PAGE_SIZE_1M, &flags);
/*
* map EmuTOS (locked for now)
*/
flags.read = 1;
flags.write = 1;
flags.execute = 1;
flags.locked = 1;
mmu_map_page(0xe00000, 0xe00000, MMU_PAGE_SIZE_1M, 0, &flags);
/*
* Map (locked) the very last MB of physical SDRAM (this is where the driver buffers reside) to the same
* virtual address. Used uncached for drivers.
*/
flags.cache_mode = CACHE_NOCACHE_PRECISE;
flags.access = ACCESS_READ | ACCESS_WRITE;
flags.protection = SV_PROTECT;
mmu_map_page(SDRAM_START + SDRAM_SIZE - 0x00100000, SDRAM_START + SDRAM_SIZE - 0x00100000, MMU_PAGE_SIZE_1M, &flags);
flags.read = 1;
flags.write = 1;
flags.execute = 0;
flags.supervisor_protect = 1;
flags.locked = 1;
mmu_map_page(SDRAM_START + SDRAM_SIZE - 0x00100000, SDRAM_START + SDRAM_SIZE - 0x00100000, 0, MMU_PAGE_SIZE_1M, &flags);
}
@@ -491,10 +515,14 @@ void mmutr_miss(uint32_t address, uint32_t pc, uint32_t format_status)
// experimental; try to ensure that supervisor stack area stays in mmu TLBs
// guess what: doesn't work...
register uint32_t sp asm("sp");
dbg("stack is at %p\r\n", sp);
if (sp < 0x02000000)
{
dbg("mapped stack at 0x%08x\r\n");
mmu_map_8k_page(sp);
flush_and_invalidate_caches();
#endif /* _NOT_USED_ */
//flush_and_invalidate_caches();
}
#endif /* _NOT_USED */
switch (address)
{
@@ -513,9 +541,6 @@ void mmutr_miss(uint32_t address, uint32_t pc, uint32_t format_status)
default:
/* add missed page to TLB */
mmu_map_8k_page(address);
dbg("DTLB: MCF_MMU_MMUOR = %08x\r\n", MCF_MMU_MMUOR);
dbg("ITLB: MCF_MMU_MMUOR = %08x\r\n\r\n", MCF_MMU_MMUOR);
}
}

View File

@@ -49,6 +49,8 @@ _rom_entry:
/* set stack pointer to end of SRAM */
lea __SUP_SP,a7
move.l #0,(sp)
subq.l #4,sp
move.l #0,(sp)
/*
* Initialize the processor caches.