From 8e2fe53faba601066ca08c342bd62c099c84a66d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Markus=20Fr=C3=B6schle?= Date: Mon, 29 Sep 2014 12:32:19 +0000 Subject: [PATCH] first (untested) version of the modified MMU handling and API --- if/driver_vec.c | 6 +- include/driver_vec.h | 6 +- include/mmu.h | 26 +-- sys/exceptions.S | 274 ++++------------------- sys/mmu.c | 509 +++++++++++++++++++++++++++++++++++++------ 5 files changed, 507 insertions(+), 314 deletions(-) diff --git a/if/driver_vec.c b/if/driver_vec.c index 96cb27f..8c38fd2 100644 --- a/if/driver_vec.c +++ b/if/driver_vec.c @@ -137,9 +137,9 @@ static struct framebuffer_driver_interface framebuffer_interface = */ static struct mmu_driver_interface mmu_interface = { - .map_page_locked = &mmu_map_page_locked, - .unlock_page = &mmu_unlock_page, - .report_locked_pages = &mmu_report_locked_pages + .map_page_locked = &mmu_map_data_page_locked, + .unlock_page = &mmu_unlock_data_page, + .report_locked_pages = &mmu_report_locked_pages }; static struct generic_interface interfaces[] = diff --git a/include/driver_vec.h b/include/driver_vec.h index b45e5fd..e9a9131 100644 --- a/include/driver_vec.h +++ b/include/driver_vec.h @@ -262,9 +262,9 @@ struct pci_bios_interface { struct mmu_driver_interface { - int32_t (*map_page_locked)(uint32_t address, uint32_t length); - int32_t (*unlock_page)(uint32_t address, uint32_t length); - int32_t (*report_locked_pages)(void); + int32_t (*map_page_locked)(uint32_t address, uint32_t length, int asid); + int32_t (*unlock_page)(uint32_t address, uint32_t length, int asid); + int32_t (*report_locked_pages)(uint32_t *num_itlb, uint32_t *num_dtlb); }; union interface diff --git a/include/mmu.h b/include/mmu.h index 6a99304..87c8eb5 100644 --- a/include/mmu.h +++ b/include/mmu.h @@ -64,6 +64,13 @@ enum mmu_page_size MMU_PAGE_SIZE_1K = 3 }; +#define SIZE_1M 0x100000 /* 1 Megabyte */ +#define SIZE_4K 0x1000 /* 4 KB */ +#define SIZE_8K 0x2000 /* 8 KB */ +#define SIZE_1K 0x400 /* 1 KB */ + +#define DEFAULT_PAGE_SIZE 0x00100000 /* 1M pagesize */ + /* * cache modes */ @@ -83,15 +90,6 @@ enum mmu_page_size #define ACCESS_WRITE (1 << 1) #define ACCESS_EXECUTE (1 << 2) -struct mmu_map_flags -{ - unsigned cache_mode:2; - unsigned protection:1; - unsigned page_id:8; - unsigned access:3; - unsigned locked:1; - unsigned unused:17; -}; /* * global variables from linker script @@ -99,14 +97,16 @@ struct mmu_map_flags extern long video_tlb; extern long video_sbt; +struct page_descriptor; + extern void mmu_init(void); -extern int mmu_map_page(uint32_t virt, uint32_t phys, enum mmu_page_size sz, const struct mmu_map_flags *flags); +extern int mmu_map_page(uint32_t virt, uint32_t phys, enum mmu_page_size sz, uint8_t page_id, const struct page_descriptor *flags); /* * API functions for the BaS driver interface */ -extern int32_t mmu_map_page_locked(uint32_t address, uint32_t length); -extern int32_t mmu_unlock_page(uint32_t address, uint32_t length); -extern int32_t mmu_report_locked_pages(void); +extern int32_t mmu_map_data_page_locked(uint32_t address, uint32_t length, int asid); +extern int32_t mmu_unlock_data_page(uint32_t address, uint32_t length, int asid); +extern int32_t mmu_report_locked_pages(uint32_t *num_itlb, uint32_t *num_dtlb); #endif /* _MMU_H_ */ diff --git a/sys/exceptions.S b/sys/exceptions.S index c11e03b..bef9d06 100644 --- a/sys/exceptions.S +++ b/sys/exceptions.S @@ -45,21 +45,33 @@ .extern _irq5_handler .extern _irq7_handler -/* Register read/write macros */ - -#define MCF_EPORT_EPPAR __MBAR+0xF00 -#define MCF_EPORT_EPDDR __MBAR+0xF04 -#define MCF_EPORT_EPIER __MBAR+0xF05 -#define MCF_EPORT_EPDR __MBAR+0xF08 -#define MCF_EPORT_EPPDR __MBAR+0xF09 -#define MCF_EPORT_EPFR __MBAR+0xF0C - -#define MCF_GPIO_PODR_FEC1L __MBAR+0xA07 - -#define MCF_PSC0_PSCTB_8BIT __MBAR+0x860C - .global _vec_init +/* Register read/write equates */ + + /* MMU */ + .equ MCF_MMU_MMUCR, __MMUBAR + .equ MCF_MMU_MMUOR, __MMUBAR+0x04 + .equ MCF_MMU_MMUSR, __MMUBAR+0x08 + .equ MCF_MMU_MMUAR, __MMUBAR+0x10 + .equ MCF_MMU_MMUTR, __MMUBAR+0x14 + .equ MCF_MMU_MMUDR, __MMUBAR+0x18 + + /* EPORT flag register */ + .equ MCF_EPORT_EPFR, __MBAR+0xf0c + + /* FEC1 port output data direction register */ + .equ MCF_GPIO_PODR_FEC1L, __MBAR+0xa07 + + /* PSC0 transmit buffer register */ + .equ MCF_PSC0_PSCTB_8BIT, __MBAR+0x860c + + /* GPT mode select register */ + .equ MCF_GPT0_GMS, __MBAR+0x800 + + /* Slice timer 0 count register */ + .equ MCF_SLT0_SCNT, __MBAR+0x908 + // interrupt sources .equ INT_SOURCE_EPORT_EPF1,1 // edge port flag 1 .equ INT_SOURCE_EPORT_EPF2,2 // edge port flag 2 @@ -115,82 +127,9 @@ // Atari register equates (provided by FPGA) .equ vbasehi, 0xffff8201 -//mmu --------------------------------------------------- - -/* Register read/write macros */ -#define MCF_MMU_MMUCR __MMUBAR -#define MCF_MMU_MMUOR __MMUBAR + 0x04 -#define MCF_MMU_MMUSR __MMUBAR + 0x08 -#define MCF_MMU_MMUAR __MMUBAR + 0x10 -#define MCF_MMU_MMUTR __MMUBAR + 0x14 -#define MCF_MMU_MMUDR __MMUBAR + 0x18 - - -/* Bit definitions and macros for MCF_MMU_MMUCR */ -#define MCF_MMU_MMUCR_EN (0x1) -#define MCF_MMU_MMUCR_ASM (0x2) - -/* Bit definitions and macros for MCF_MMU_MMUOR */ -#define MCF_MMU_MMUOR_UAA (0x1) /* update allocation address, i.e. write to TLB */ -#define MCF_MMU_MMUOR_ACC (0x2) /* activate access to TLB */ -#define MCF_MMU_MMUOR_RW (0x4) /* read/write TLB */ -#define MCF_MMU_MMUOR_ADR (0x8) /* search by address/TLB address */ -#define MCF_MMU_MMUOR_ITLB (0x10) /* act on instruction/data TLBs */ -#define MCF_MMU_MMUOR_CAS (0x20) /* clear all unlocked TLBs with matching ASID */ -#define MCF_MMU_MMUOR_CNL (0x40) /* clear all unlocked TLBs regardless of ASID */ -#define MCF_MMU_MMUOR_CA (0x80) /* clear all TLBs */ -#define MCF_MMU_MMUOR_STLB (0x100) /* search TLBs */ -#define MCF_MMU_MMUOR_AA(x) (((x) & 0xFFFF) << 0x10) /* TLB allocation address */ - -/* Bit definitions and macros for MCF_MMU_MMUSR */ -#define MCF_MMU_MMUSR_HIT (0x2) /* last lookup had a hit in TLB */ -#define MCF_MMU_MMUSR_WF (0x8) /* indicate write fault */ -#define MCF_MMU_MMUSR_RF (0x10) /* indicate read fault */ -#define MCF_MMU_MMUSR_SPF (0x20) /* indicate supervisor protect fault */ - -/* Bit definitions and macros for MCF_MMU_MMUAR */ -#define MCF_MMU_MMUAR_FA(x) (((x) & 0xFFFFFFFF) << 0) - -/* Bit definitions and macros for MCF_MMU_MMUTR */ -#define MCF_MMU_MMUTR_V (0x1) /* valid bit for TLB */ -#define MCF_MMU_MMUTR_SG (0x2) /* set page as shared global */ -#define MCF_MMU_MMUTR_ID(x) (((x) & 0xFF) << 0x2) /* ASID (address space id) of page */ -#define MCF_MMU_MMUTR_VA(x) (((x) & 0x3FFFFF) << 0xA) /* virtual address of page */ - -/* Bit definitions and macros for MCF_MMU_MMUDR */ -#define MCF_MMU_MMUDR_LK (0x2) /* lock page */ -#define MCF_MMU_MMUDR_X (0x4) /* allow code execution in memory page */ -#define MCF_MMU_MMUDR_W (0x8) /* allow write to memory page */ -#define MCF_MMU_MMUDR_R (0x10) /* allow read from memory page */ -#define MCF_MMU_MMUDR_SP (0x20) /* supervisor protect memory page */ -#define MCF_MMU_MMUDR_CM(x) (((x) & 0x3) << 0x6) /* cache mode */ -#define MCF_MMU_MMUDR_SZ(x) (((x) & 0x3) << 0x8) /* page size */ -#define MCF_MMU_MMUDR_PA(x) (((x) & 0x3FFFFF) << 0xA) /* page physical address */ - -#define std_mmutr (MCF_MMU_MMUTR_SG | MCF_MMU_MMUTR_V) -#define writethrough_mmudr (MCF_MMU_MMUDR_SZ(00) | MCF_MMU_MMUDR_CM(00) | MCF_MMU_MMUDR_R | MCF_MMU_MMUDR_W | MCF_MMU_MMUDR_X) -#define copyback_mmudr (MCF_MMU_MMUDR_SZ(00) | MCF_MMU_MMUDR_CM(01) | MCF_MMU_MMUDR_R | MCF_MMU_MMUDR_W | MCF_MMU_MMUDR_X) - /* - * - * General Purpose Timers (GPT) - * + * macros */ - -/* Register read/write macros */ -#define MCF_GPT0_GMS __MBAR+0x800 - -/* - * - * Slice Timers (SLT) - * - */ - -#define MCF_SLT0_SCNT __MBAR+0x908 - -/**********************************************************/ -// macros -/**********************************************************/ .altmacro .macro irq vector,int_mask,clr_int //move.w #0x2700,sr // disable interrupt @@ -207,26 +146,7 @@ rts .endm -/* - * FIXME: this is a GNU gas kludge. Ugly, but I just can't come up with any smarter solution - * - * GNU as does not support multi-character constants. At least I don't know of any way it would. - * The following might look more than strange, but I considered the statement - * - * mchar move.l, 'T,'E,'S,'T,-(SP) - * - * somewhat more readable than - * - * move.l #1413829460,-(SP) - * - * If anybody knows of any better way on how to do this - please do! - * - */ - .macro mchar st,a,b,c,d,tgt - \st #\a << 24|\b<<16|\c<<8|\d,\tgt - .endm - -.text + .text _vec_init: move.l a2,-(sp) // Backup registers @@ -349,11 +269,12 @@ noprint: move.l 4(sp),a5 // restore a5 move.l d0,4(sp) // store exception routine address + // FIXME: not clear why we would need the following? //move.w 10(sp),d0 // restore original SR //bset #13,d0 // set supervisor bit //move.w d0,sr // move.l (sp)+,d0 // restore d0 - rts // jump to exception routine + rts // jump to exception handler exception_text: .ascii "DEBUG: EXCEPTION %d caught at %p" @@ -369,54 +290,28 @@ reset_vector: access: move.w #0x2700,sr // disable interrupts - move.l d0,-(sp) // ++ vr - move.w 4(sp),d0 // get format_status word from stack - andi.l #0x0c03,d0 // mask out fault status bits - cmpi.l #0x0401,d0 // TLB miss on opword of instruction fetch? - beq access_mmu // yes - cmpi.l #0x0402,d0 // TLB miss on extension word of instruction fetch? - beq access_mmu // yes - cmpi.l #0x0802,d0 // TLB miss on data write? - beq access_mmu // yes - cmpi.l #0x0c02,d0 // TLB miss on data read, or read-modify-write? - beq access_mmu // yes + link a6,#-4 * 4 // make room for gcc scratch registers + movem.l d0-d1/a0-a1,(sp) // save them - bra bus_error // everything else is a classic bus error + move.l 4(a6),-(sp) // push format_status + move.l 8(a6),-(sp) // pc at exception + move.l MCF_MMU_MMUAR,-(sp) // MMU fault address + move.l MCF_MMU_MMUSR,-(sp) // MMU status regisrter + move.w #0x2300,sr // can lower interrupt mask now that MMU status is safe + jsr _mmutr_miss // call C routine + lea 4 * 4(sp),sp // adjust stack -access_mmu: - move.l MCF_MMU_MMUSR,d0 // did the last fault hit in TLB? - btst #1,d0 // yes, it did. So we already mapped that page - bne bus_error // and this must be a real bus error - btst #5,d0 // supervisor protection fault? - bne bus_error - btst #4,d0 // read access fault? - bne bus_error - btst #3,d0 // write access fault? + tst.l d0 // exception handler signals bus error bne bus_error - move.l MCF_MMU_MMUAR,d0 - cmp.l #__FASTRAM_END,d0 // above max User RAM area? - bge bus_error // -> bus error - - lea -3 * 4(sp),sp // save gcc scratch registers - movem.l d1/a0-a1,(sp) - - move.l 3 * 4 + 4 (sp),-(sp) // push exception stack frame - move.l 5 * 4 + 4 (sp),-(sp) // push program counter at exception - move.l d0,-(sp) // fault address - jsr _mmutr_miss // else we have an MMU TLB miss - add.l #3 * 4,sp // adjust stack - - movem.l (sp),d1/a0-a1 // restore gcc scratch registers - lea 3 * 4(sp),sp - - move.l (sp)+,d0 // restore register - + movem.l (sp),d0-d1/a0-a1 // restore registers + unlk a6 rte bus_error: - move.l (sp)+,d0 // restore register + movem.l (sp),d0-d1/a0-a1 // restore registers + unlk a6 bra std_exc_vec zero_divide: @@ -543,79 +438,6 @@ irq6: // MFP interrupt from FPGA lea MCF_EPORT_EPFR,a5 // clear int6 from edge port bset #6,(a5) - // screen adr change timed out? - move.l _video_sbt,d0 - beq irq6_non_sca // nothing to do if 0 - - sub.l #0x70000000,d0 // substract 14 seconds - lea MCF_SLT0_SCNT,a5 - cmp.l (a5),d0 // time reached? - ble irq6_non_sca // not yet - - lea -7 * 4(sp),sp // save more registers - movem.l d0-d4/a0-a1,(sp) // - clr.l d3 // beginn mit 0 - // jsr _flush_and_invalidate_caches FIXME: why should we need that? - - // eintrag suchen - irq6_next_sca: - move.l d3,d0 - move.l d0,MCF_MMU_MMUAR // addresse - move.l #0x106,d4 - move.l d4,MCF_MMU_MMUOR // suchen -> - nop - move.l MCF_MMU_MMUOR,d4 - clr.w d4 - swap d4 - move.l d4,MCF_MMU_MMUAR - mvz.w #0x10e,d4 - move.l d4,MCF_MMU_MMUOR // einträge holen aus mmu - nop - move.l MCF_MMU_MMUTR,d4 // ID holen - lsr.l #2,d4 // bit 9 bis 2 - cmp.w #sca_page_ID,d4 // ist screen change ID? - bne irq6_sca_pn // nein -> page keine screen area next -// eintrag �ndern - add.l #std_mmutr,d0 - move.l d3,d1 // page 0? - beq irq6_sca_pn0 // ja -> - add.l #copyback_mmudr,d1 // sonst page cb - bra irq6_sca_pn1c -irq6_sca_pn0: - add.l #writethrough_mmudr/*|MCF_MMU_MMUDR_LK*/,d1 // page wt and locked -irq6_sca_pn1c: - mvz.w #0x10b,d2 // MMU update - move.l d0,MCF_MMU_MMUTR - move.l d1,MCF_MMU_MMUDR - move.l d2,MCF_MMU_MMUOR // setze tlb data only - nop -// page copy - move.l d3,a0 - add.l #0x60000000,a0 - move.l d3,a1 - move.l #0x10000,d4 // one whole page (1 MB) - - -irq6_vcd0_loop: - move.l (a0)+,(a1)+ // page copy - move.l (a0)+,(a1)+ - move.l (a0)+,(a1)+ - move.l (a0)+,(a1)+ - subq.l #1,d4 - bne irq6_vcd0_loop - -irq6_sca_pn: - add.l #0x00100000,d3 // next - cmp.l #0x00d00000,d3 // ende? - blt irq6_next_sca // nein-> - - move.l #0x2000,d0 - move.l d0,_video_tlb // anfangszustand wieder herstellen - clr.l _video_sbt // zeit löschen - - movem.l (sp),d0-d4/a0-a1 // restore registers - lea 7 * 4(sp),sp - irq6_non_sca: // test auf acsi dma ----------------------------------------------------------------- lea 0xfffffa0b,a5 @@ -665,12 +487,6 @@ acsi_dma: // atari dma move.l a1,-(sp) move.l d1,-(sp) - lea MCF_PSC0_PSCTB_8BIT,a1 // ++ vr - mchar move.l, 'D,'M','A,'\ ,(a1) - //move.l #"DMA ",(a1) - mchar move.l,'I,'N,'T,'!,(a1) - // move.l #'INT!',(a1) - lea 0xf0020110,a5 // fifo daten acsi_dma_start: move.l -12(a5),a1 // dma adresse @@ -755,7 +571,6 @@ irq7: * when the video base address gets changed */ - handler_gpt0: .extern _gpt0_interrupt_handler @@ -768,6 +583,9 @@ handler_gpt0: move.l d0,-(sp) // push it jsr _gpt0_interrupt_handler // call C handler addq.l #4,sp // adjust stack + + movem.l (sp),d0-d1/a0-a1 // restore registers + unlk a6 rte #endif /* MACHINE_FIREBEE */ diff --git a/sys/mmu.c b/sys/mmu.c index c25fde1..bdcf8e7 100644 --- a/sys/mmu.c +++ b/sys/mmu.c @@ -1,5 +1,6 @@ #include "mmu.h" #include "acia.h" +#include "exceptions.h" /* * mmu.c @@ -68,6 +69,7 @@ #else #define dbg(format, arg...) do {;} while (0) #endif /* DEBUG_MMU */ +#define err(format, arg...) do { xprintf("ERROR (%s()): " format, __FUNCTION__, ##arg); xprintf("system halted\r\n"); } while(0); while(1) /* * set ASID register @@ -189,6 +191,160 @@ inline uint32_t set_mmubar(uint32_t value) return ret; } + +/* + * translation table for virtual address ranges. Holds the physical_offset (which must be added to a virtual + * address to get its physical counterpart) for memory ranges. + */ +struct virt_to_phys +{ + uint32_t start_address; + uint32_t length; + uint32_t physical_offset; +}; + +static struct virt_to_phys translation[] = +{ + /* virtual , length , offset */ + { 0x00000000, 0x00e00000, 0x60000000 }, /* map first 14 MByte to first 14 Mb of video ram */ + { 0x00e00000, 0x00100000, 0x00000000 }, /* map TOS to SDRAM */ + { 0x00f00000, 0x00100000, 0xff000000 }, /* map Falcon I/O area to FPGA */ + { 0x01000000, 0x1f000000, 0x00000000 }, /* map rest of ram virt = phys */ +}; +static int num_translations = sizeof(translation) / sizeof(struct virt_to_phys); + +static inline uint32_t lookup_phys(uint32_t virt) +{ + int i; + + for (i = 0; i < num_translations; i++) + { + if (virt >= translation[i].start_address && virt < translation[i].start_address + translation[i].length) + { + return virt + translation[i].physical_offset; + } + } + err("virtual address 0x%lx not found in translation table!\r\n", virt); + return -1; +} + +struct page_descriptor +{ + uint8_t cache_mode : 2; + uint8_t supervisor_protect : 1; + uint8_t read : 1; + uint8_t write : 1; + uint8_t execute : 1; + uint8_t global : 1; + uint8_t locked : 1; +}; + +/* + * page descriptors. Size depending on DEFAULT_PAGE_SIZE, either 1M (resulting in 512 + * bytes size) or 8k pages (64k descriptor array size) + */ +static struct page_descriptor pages[512UL * 1024 * 1024 / DEFAULT_PAGE_SIZE]; + + +int mmu_map_instruction_page(uint32_t virt, uint8_t asid) +{ + const uint32_t size_mask = ~ (DEFAULT_PAGE_SIZE - 1); /* pagesize */ + int page_index = (virt & size_mask) / DEFAULT_PAGE_SIZE; /* index into page_descriptor array */ + struct page_descriptor *page = &pages[page_index]; /* attributes of page to map */ + int ipl; + uint32_t phys = lookup_phys(virt); /* virtual to physical translation of page */ + + if (phys == -1) + return 0; + +#ifdef DBG_MMU + register int sp asm("sp"); + dbg("page_descriptor: 0x%02x, ssp = 0x%08x\r\n", * (uint8_t *) page, sp); +#endif /* DBG_MMU */ + + /* + * add page to TLB + */ + + ipl = set_ipl(7); /* do not disturb */ + + MCF_MMU_MMUAR = (virt & size_mask); + + MCF_MMU_MMUTR = (virt & size_mask) | /* virtual address */ + MCF_MMU_MMUTR_ID(asid) | /* address space id (ASID) */ + (page->global ? MCF_MMU_MMUTR_SG : 0) | /* shared global */ + MCF_MMU_MMUTR_V; /* valid */ + + MCF_MMU_MMUDR = (phys & size_mask) | /* physical address */ + MCF_MMU_MMUDR_SZ(DEFAULT_PAGE_SIZE) | /* page size */ + MCF_MMU_MMUDR_CM(page->cache_mode) | /* cache mode */ + (page->supervisor_protect ? MCF_MMU_MMUDR_SP : 0) | /* supervisor protect */ + (page->read ? MCF_MMU_MMUDR_R : 0) | /* read access enable */ + (page->write ? MCF_MMU_MMUDR_W : 0) | /* write access enable */ + (page->execute ? MCF_MMU_MMUDR_X : 0) | /* execute access enable */ + (page->locked ? MCF_MMU_MMUDR_LK : 0); + + MCF_MMU_MMUOR = MCF_MMU_MMUOR_ITLB | /* instruction */ + MCF_MMU_MMUOR_ACC | /* access TLB */ + MCF_MMU_MMUOR_UAA; /* update allocation address field */ + + set_ipl(ipl); + + dbg("mapped virt=0x%08x to phys=0x%08x\r\n", virt & size_mask, phys & size_mask); + + dbg("ITLB: MCF_MMU_MMUOR = %08x\r\n", MCF_MMU_MMUOR); + + return 1; +} + +int mmu_map_data_page(uint32_t virt, uint8_t asid) +{ + uint16_t ipl; + const uint32_t size_mask = ~ (DEFAULT_PAGE_SIZE - 1); /* pagesize */ + int page_index = (virt & size_mask) / DEFAULT_PAGE_SIZE; /* index into page_descriptor array */ + struct page_descriptor *page = &pages[page_index]; /* attributes of page to map */ + + uint32_t phys = lookup_phys(virt); /* virtual to physical translation of page */ + + if (phys == -1) + return 0; + +#ifdef DBG_MMU + register int sp asm("sp"); + dbg("page_descriptor: 0x%02x, ssp = 0x%08x\r\n", * (uint8_t *) page, sp); +#endif /* DBG_MMU */ + + /* + * add page to TLB + */ + + ipl = set_ipl(7); /* do not disturb */ + + MCF_MMU_MMUTR = (virt & size_mask) | /* virtual address */ + MCF_MMU_MMUTR_ID(asid) | /* address space id (ASID) */ + (page->global ? MCF_MMU_MMUTR_SG : 0) | /* shared global */ + MCF_MMU_MMUTR_V; /* valid */ + + MCF_MMU_MMUDR = (phys & size_mask) | /* physical address */ + MCF_MMU_MMUDR_SZ(DEFAULT_PAGE_SIZE) | /* page size */ + MCF_MMU_MMUDR_CM(page->cache_mode) | /* cache mode */ + (page->supervisor_protect ? MCF_MMU_MMUDR_SP : 0) | /* supervisor protect */ + (page->read ? MCF_MMU_MMUDR_R : 0) | /* read access enable */ + (page->write ? MCF_MMU_MMUDR_W : 0) | /* write access enable */ + (page->execute ? MCF_MMU_MMUDR_X : 0) | /* execute access enable */ + (page->locked ? MCF_MMU_MMUDR_LK : 0); + + MCF_MMU_MMUOR = MCF_MMU_MMUOR_ACC | /* access TLB, data */ + MCF_MMU_MMUOR_UAA; /* update allocation address field */ + + set_ipl(ipl); + dbg("mapped virt=0x%08x to phys=0x%08x\r\n", virt & size_mask, phys & size_mask); + + dbg("DTLB: MCF_MMU_MMUOR = %08x\r\n", MCF_MMU_MMUOR); + + return 1; +} + /* * map a page of memory using virt and phys as addresses with the Coldfire MMU. * @@ -201,26 +357,27 @@ inline uint32_t set_mmubar(uint32_t value) * * */ -int mmu_map_page(uint32_t virt, uint32_t phys, enum mmu_page_size sz, const struct mmu_map_flags *flags) +int mmu_map_page(uint32_t virt, uint32_t phys, enum mmu_page_size sz, uint8_t page_id, const struct page_descriptor *flags) { int size_mask; + int ipl; switch (sz) { case MMU_PAGE_SIZE_1M: - size_mask = 0xfff00000; + size_mask = ~ (SIZE_1M - 1); break; case MMU_PAGE_SIZE_8K: - size_mask = 0xffffe000; + size_mask = ~ (SIZE_8K - 1); break; case MMU_PAGE_SIZE_4K: - size_mask = 0xfffff000; + size_mask = ~ (SIZE_4K - 1); break; case MMU_PAGE_SIZE_1K: - size_mask = 0xfffff800; + size_mask = ~ (SIZE_1K - 1); break; default: @@ -231,20 +388,21 @@ int mmu_map_page(uint32_t virt, uint32_t phys, enum mmu_page_size sz, const stru /* * add page to TLB */ + + ipl = set_ipl(7); /* do not disturb */ + MCF_MMU_MMUTR = ((int) virt & size_mask) | /* virtual address */ - MCF_MMU_MMUTR_ID(flags->page_id) | /* address space id (ASID) */ - MCF_MMU_MMUTR_SG | /* shared global */ + MCF_MMU_MMUTR_ID(page_id) | /* address space id (ASID) */ + (flags->global ? MCF_MMU_MMUTR_SG : 0) | /* shared global */ MCF_MMU_MMUTR_V; /* valid */ - NOP(); MCF_MMU_MMUDR = ((int) phys & size_mask) | /* physical address */ MCF_MMU_MMUDR_SZ(sz) | /* page size */ MCF_MMU_MMUDR_CM(flags->cache_mode) | - (flags->access & ACCESS_READ ? MCF_MMU_MMUDR_R : 0) | /* read access enable */ - (flags->access & ACCESS_WRITE ? MCF_MMU_MMUDR_W : 0) | /* write access enable */ - (flags->access & ACCESS_EXECUTE ? MCF_MMU_MMUDR_X : 0) | /* execute access enable */ + (flags->read ? MCF_MMU_MMUDR_R : 0) | /* read access enable */ + (flags->write ? MCF_MMU_MMUDR_W : 0) | /* write access enable */ + (flags->execute ? MCF_MMU_MMUDR_X : 0) | /* execute access enable */ (flags->locked ? MCF_MMU_MMUDR_LK : 0); - NOP(); MCF_MMU_MMUOR = MCF_MMU_MMUOR_ACC | /* access TLB, data */ MCF_MMU_MMUOR_UAA; /* update allocation address field */ @@ -253,6 +411,9 @@ int mmu_map_page(uint32_t virt, uint32_t phys, enum mmu_page_size sz, const stru MCF_MMU_MMUOR = MCF_MMU_MMUOR_ITLB | /* instruction */ MCF_MMU_MMUOR_ACC | /* access TLB */ MCF_MMU_MMUOR_UAA; /* update allocation address field */ + + set_ipl(ipl); + dbg("mapped virt=0x%08x to phys=0x%08x\r\n", virt, phys); return 1; @@ -260,12 +421,66 @@ int mmu_map_page(uint32_t virt, uint32_t phys, enum mmu_page_size sz, const stru void mmu_init(void) { - struct mmu_map_flags flags; + extern uint8_t _MMUBAR[]; + uint32_t MMUBAR = (uint32_t) &_MMUBAR[0]; + struct page_descriptor flags; + int i; - extern uint8_t _MMUBAR[]; - uint32_t MMUBAR = (uint32_t) &_MMUBAR[0]; - extern uint8_t _TOS[]; - uint32_t TOS = (uint32_t) &_TOS[0]; + /* + * clear all MMU TLB entries first + */ + MCF_MMU_MMUOR = MCF_MMU_MMUOR_CA; /* clears _all_ TLBs (including locked ones) */ + NOP(); + + /* + * prelaminary initialization of page descriptor 0 (root) table + */ + for (i = 0; i < sizeof(pages) / sizeof(struct page_descriptor); i++) + { + uint32_t addr = i * DEFAULT_PAGE_SIZE; + + if (addr >= 0x00f00000 && addr < 0x00ffffff) + { + pages[i].cache_mode = CACHE_NOCACHE_PRECISE; + pages[i].execute = 0; + pages[i].read = 1; + pages[i].write = 1; + pages[i].execute = 0; + pages[i].global = 1; + pages[i].supervisor_protect = 1; + } + else if (addr >= 0x0 && addr < 0x00e00000) /* ST-RAM, potential video memory */ + { + pages[i].cache_mode = CACHE_WRITETHROUGH; + pages[i].execute = 1; + pages[i].supervisor_protect = 0; + pages[i].read = 1; + pages[i].write = 1; + pages[i].execute = 1; + pages[i].global = 1; + } + else if (addr >= 0x00e00000 && addr < 0x00f00000) /* EmuTOS */ + { + pages[i].cache_mode = CACHE_COPYBACK; + pages[i].execute = 1; + pages[i].supervisor_protect = 1; + pages[i].read = 1; + pages[i].write = 0; + pages[i].execute = 1; + pages[i].global = 1; + } + else + { + pages[i].cache_mode = CACHE_COPYBACK; + pages[i].execute = 1; + pages[i].read = 1; + pages[i].write = 1; + pages[i].supervisor_protect = 0; + pages[i].global = 1; + } + pages[i].locked = 0; /* not locked */ + pages[0].supervisor_protect = 0; /* protect system vectors */ + } set_asid(0); /* do not use address extension (ASID provides virtual 48 bit addresses */ @@ -321,37 +536,38 @@ void mmu_init(void) ACR_ADMSK(0x7) | ACR_BA(0xe0000000)); - /* disable ACR3 */ + /* disable ACR1 - 3, essentially disabling all of the above */ + set_acr1(0x0); + set_acr2(0x0); set_acr3(0x0); set_mmubar(MMUBAR + 1); /* set and enable MMUBAR */ - /* clear all MMU TLB entries */ - MCF_MMU_MMUOR = MCF_MMU_MMUOR_CA; - /* create locked TLB entries */ flags.cache_mode = CACHE_COPYBACK; - flags.protection = SV_USER; - flags.page_id = 0; - flags.access = ACCESS_READ | ACCESS_WRITE | ACCESS_EXECUTE; + flags.supervisor_protect = 0; + flags.read = 1; + flags.write = 1; + flags.execute = 1; flags.locked = true; - /* 0x0000_0000 - 0x000F_FFFF (first MB of physical memory) locked virt = phys */ - mmu_map_page(0x0, 0x0, MMU_PAGE_SIZE_1M, &flags); + /* 0x00000000 - 0x00100000 (first MB of physical memory) locked virt = phys */ + mmu_map_page(0x0, 0x0, MMU_PAGE_SIZE_1M, 0, &flags); #if defined(MACHINE_FIREBEE) /* - * 0x00d0'0000 - 0x00df'ffff (last megabyte of ST RAM = Falcon video memory) locked ID = 6 + * 0x00d00000 - 0x00e00000 (last megabyte of ST RAM = Falcon video memory) locked ID = 6 * mapped to physical address 0x60d0'0000 (FPGA video memory) * video RAM: read write execute normal write true */ flags.cache_mode = CACHE_WRITETHROUGH; - flags.protection = SV_USER; - flags.page_id = SCA_PAGE_ID; - flags.access = ACCESS_READ | ACCESS_WRITE | ACCESS_EXECUTE; + flags.supervisor_protect = 0; + flags.read = 1; + flags.write = 1; + flags.execute = 1; flags.locked = true; - mmu_map_page(0x00d00000, 0x60d00000, MMU_PAGE_SIZE_1M, &flags); + mmu_map_page(0x00d00000, 0x60d00000, MMU_PAGE_SIZE_1M, SCA_PAGE_ID, &flags); video_tlb = 0x2000; /* set page as video page */ video_sbt = 0x0; /* clear time */ @@ -362,9 +578,12 @@ void mmu_init(void) * This maps virtual 0x00e0'0000 - 0x00ef'ffff to the same virtual address */ flags.cache_mode = CACHE_COPYBACK; - flags.page_id = 0; - flags.access = ACCESS_READ | ACCESS_EXECUTE; - mmu_map_page(TOS, TOS, MMU_PAGE_SIZE_1M, &flags); + flags.supervisor_protect = 0; + flags.read = 1; + flags.write = 0; + flags.execute = 1; + flags.locked = 1; + mmu_map_page(0xe00000, 0xe00000, MMU_PAGE_SIZE_1M, 0, &flags); #if defined(MACHINE_FIREBEE) /* @@ -372,8 +591,12 @@ void mmu_init(void) * area (0x00f0'0000 - 0x00ff'ffff virtual) for the FireBee */ flags.cache_mode = CACHE_NOCACHE_PRECISE; - flags.access = ACCESS_WRITE | ACCESS_READ; - mmu_map_page(0x00f00000, 0xfff00000, MMU_PAGE_SIZE_1M, &flags); + flags.supervisor_protect = 1; + flags.read = 1; + flags.write = 1; + flags.execute = 0; + flags.locked = 1; + mmu_map_page(0x00f00000, 0xfff00000, MMU_PAGE_SIZE_1M, 0, &flags); #endif /* MACHINE_FIREBEE */ /* @@ -381,54 +604,206 @@ void mmu_init(void) * virtual address. This is also used (completely) when BaS is in RAM */ flags.cache_mode = CACHE_COPYBACK; - flags.access = ACCESS_READ | ACCESS_WRITE | ACCESS_EXECUTE; - mmu_map_page(SDRAM_START + SDRAM_SIZE - 0X00200000, SDRAM_START + SDRAM_SIZE - 0X00200000, MMU_PAGE_SIZE_1M, &flags); + flags.supervisor_protect = 1; + flags.read = 1; + flags.write = 1; + flags.execute = 1; + flags.locked = 1; + mmu_map_page(SDRAM_START + SDRAM_SIZE - 0x00200000, SDRAM_START + SDRAM_SIZE - 0x00200000, MMU_PAGE_SIZE_1M, 0, &flags); /* * Map (locked) the very last MB of physical SDRAM (this is where the driver buffers reside) to the same * virtual address. Used uncached for drivers. */ flags.cache_mode = CACHE_NOCACHE_PRECISE; - flags.access = ACCESS_READ | ACCESS_WRITE; - flags.protection = SV_PROTECT; - mmu_map_page(SDRAM_START + SDRAM_SIZE - 0x00100000, SDRAM_START + SDRAM_SIZE - 0x00100000, MMU_PAGE_SIZE_1M, &flags); + flags.supervisor_protect = 1; + flags.read = 1; + flags.write = 1; + flags.execute = 0; + flags.locked = 1; + mmu_map_page(SDRAM_START + SDRAM_SIZE - 0x00100000, SDRAM_START + SDRAM_SIZE - 0x00100000, MMU_PAGE_SIZE_1M, 0, &flags); } -static struct mmu_map_flags flags = -{ - .cache_mode = CACHE_COPYBACK, - .protection = SV_USER, - .page_id = 0, - .access = ACCESS_READ | ACCESS_WRITE | ACCESS_EXECUTE, - .locked = false -}; -void mmutr_miss(uint32_t address, uint32_t pc, uint32_t format_status) +uint32_t mmutr_miss(uint32_t mmu_sr, uint32_t fault_address, uint32_t pc, + uint32_t format_status) { + uint32_t fault = format_status & 0xc030000; + dbg("MMU TLB MISS accessing 0x%08x\r\nFS = 0x%08x\r\nPC = 0x%08x\r\n", address, format_status, pc); // flush_and_invalidate_caches(); - switch (address) + switch (fault) { - case keyctl: - case keybd: - /* do something to emulate the IKBD access */ - dbg("IKBD access\r\n"); - break; + /* if we have a real TLB miss, map the offending page */ - case midictl: - case midi: - /* do something to emulate MIDI access */ - dbg("MIDI ACIA access\r\n"); - break; + case 0x04010000: /* TLB miss on opword of instruction fetch */ + case 0x04020000: /* TLB miss on extension word of instruction fetch */ + dbg("MMU ITLB MISS accessing 0x%08x\r\n" + "FS = 0x%08x\r\n" + "MMUSR = 0x%08x\r\n" + "PC = 0x%08x\r\n", + fault_address, format_status, mmu_sr, pc); + dbg("fault = 0x%08x\r\n", fault); + mmu_map_instruction_page(pc, 0); - default: - /* add missed page to TLB */ - mmu_map_page(address, address, MMU_PAGE_SIZE_1M, &flags); - dbg("DTLB: MCF_MMU_MMUOR = %08x\r\n", MCF_MMU_MMUOR); - dbg("ITLB: MCF_MMU_MMUOR = %08x\r\n\r\n", MCF_MMU_MMUOR); - } + /* due to prefetch, it makes sense to map the next adjacent page also for ITLBs */ + mmu_map_instruction_page(pc + DEFAULT_PAGE_SIZE, 0); + break; + + case 0x08020000: /* TLB miss on data write */ + case 0x0c020000: /* TLB miss on data read or read-modify-write */ + dbg("MMU DTLB MISS accessing 0x%08x\r\n" + "FS = 0x%08x\r\n" + "MMUSR = 0x%08x\r\n" + "PC = 0x%08x\r\n", + fault_address, format_status, mmu_sr, pc); + dbg("fault = 0x%08x\r\n", fault); + mmu_map_data_page(fault_address, 0); + break; + + /* else issue an bus error */ + default: + dbg("bus error\r\n"); + return 1; /* signal bus error to caller */ + } +#ifdef DBG_MMU + xprintf("\r\n"); +#endif /* DBG_MMU */ + + return 0; /* signal TLB miss handled to caller */ } +/* TODO: implement */ +/* + * API-exposed, externally callable MMU functions + */ + + +/* + * lock data page(s) with address space id asid from address virt to virt + size. + * + * FIXME: There is no check for "too many locked pages", currently. + * + * return: 0 if failed (page not in translation table), 1 otherwise + */ +int32_t mmu_map_data_page_locked(uint32_t virt, uint32_t size, int asid) +{ + const uint32_t size_mask = ~ (DEFAULT_PAGE_SIZE - 1); /* pagesize */ + int page_index = (virt & size_mask) / DEFAULT_PAGE_SIZE; /* index into page_descriptor array */ + struct page_descriptor *page = &pages[page_index]; /* attributes of page to map */ + int i = 0; + + while (page_index * DEFAULT_PAGE_SIZE < virt + size) + { + if (page->locked) + { + dbg("page at %p is already locked. Nothing to do\r\n", virt); + } + else + { + page->locked = 1; + mmu_map_data_page(virt, 0); + i++; + } + virt += DEFAULT_PAGE_SIZE; + } + + dbg("%d pages locked\r\n", i); + + return 1; /* success */ +} + +/* + * the opposite: unlock data page(s) with address space id asid from address virt to virt + size_t + * + * return: 0 if failed (page not found), 1 otherwise + */ +int32_t mmu_unlock_data_page(uint32_t address, uint32_t size, int asid) +{ + int curr_asid; + const uint32_t size_mask = ~ (DEFAULT_PAGE_SIZE - 1); + int page_index = (address & size_mask) / DEFAULT_PAGE_SIZE; /* index into page descriptor array */ + struct page_descriptor *page = &pages[page_index]; + + curr_asid = set_asid(asid); /* set asid to the one to search for */ + + /* TODO: check for pages[] array bounds */ + + while (page_index * DEFAULT_PAGE_SIZE < address + size) + { + MCF_MMU_MMUAR = address + page->supervisor_protect; + MCF_MMU_MMUOR = MCF_MMU_MMUOR_STLB | /* search TLB */ + MCF_MMU_MMUOR_ADR | + MCF_MMU_MMUOR_RW; + if (MCF_MMU_MMUSR & MCF_MMU_MMUSR_HIT) /* found */ + { +#ifdef DBG_MMU + uint32_t tlb_aa = MCF_MMU_MMUOR >> 16; /* MMU internal allocation address for TLB */ +#endif /* DBG_MMU */ + + MCF_MMU_MMUDR &= ~MCF_MMU_MMUDR_LK; /* clear lock bit */ + MCF_MMU_MMUOR = MCF_MMU_MMUOR_UAA | + MCF_MMU_MMUOR_ACC; /* update TLB */ + + dbg("DTLB %d unlocked\r\n", tlb_aa); + } + else + { + dbg("%p doesn't seem to be locked??\r\n"); + } + page_index++; + } + set_asid(curr_asid); + + return 1; /* success */ +} + +int32_t mmu_report_locked_pages(uint32_t *num_itlb, uint32_t *num_dtlb) +{ + int i; + int li = 0; + int ld = 0; + + /* Coldfire V4e allocation addresses run from 0 to 63 */ + + for (i = 0; i < 31; i++) /* 0-31 = ITLB AA */ + { + MCF_MMU_MMUAR = i; + MCF_MMU_MMUOR = MCF_MMU_MMUOR_STLB | + MCF_MMU_MMUOR_ITLB | + MCF_MMU_MMUOR_RW; /* search ITLB */ + + if (MCF_MMU_MMUTR & MCF_MMU_MMUTR_V) + { + /* entry is valid */ + if (MCF_MMU_MMUDR & MCF_MMU_MMUDR_LK) + { + li++; + } + } + + } + for (i = 32; i < 64; i++) /* 32-63 = DTLB AA */ + { + MCF_MMU_MMUAR = i; + MCF_MMU_MMUOR = MCF_MMU_MMUOR_STLB | + MCF_MMU_MMUOR_RW; /* search ITLB */ + + if (MCF_MMU_MMUTR & MCF_MMU_MMUTR_V) + { + /* entry is valid */ + if (MCF_MMU_MMUDR & MCF_MMU_MMUDR_LK) + { + ld++; + } + } + } + + *num_itlb = li; + *num_dtlb = ld; + + return 1; /* success */ +}