Compare commits

...

11 Commits

Author SHA1 Message Date
Markus Fröschle
c70dc9ae0d for ITLB misses, map following page also since sometimes (prefetch?) the
fault PC reported by the access error exceptions seems to be off a few
instructions.
2014-09-28 16:44:39 +00:00
Markus Fröschle
c427fea43a lowered interrupt mask where not needed 2014-09-28 16:36:55 +00:00
Markus Fröschle
69539e93a6 replaced constant values with symbolic names 2014-09-28 16:04:15 +00:00
Markus Fröschle
aa3ae8ecba fixed debug printouts. Removed unused ACR settings 2014-09-28 12:31:14 +00:00
Markus Fröschle
4d68242185 first version with C page table handling that works 2014-09-28 11:27:07 +00:00
Markus Fröschle
68b4240355 fixed indexing into page descriptor array with wrong page size 2014-09-27 06:19:43 +00:00
Markus Fröschle
83666ba2f5 fixed (rough) comments 2014-09-26 05:59:02 +00:00
Markus Fröschle
f044fbbe72 for some reason the mapping of EmuTOS doesn't seem to work anymore? 2014-09-25 20:23:28 +00:00
Markus Fröschle
6a1bcae947 runs until EmuTOS scrolls the welcome screen? 2014-09-25 18:41:26 +00:00
Markus Fröschle
a1c4fdff47 experimental branh to use 8k memory pages in MMU TLBs - might ease
implementing memory protection for Coldfire in MiNT
2014-09-25 13:50:07 +00:00
Markus Fröschle
8d6154e69b new experimental branch with changed MMU behaviour 2014-09-25 08:00:36 +00:00
12 changed files with 1321 additions and 1344 deletions

View File

@@ -143,7 +143,6 @@ CSRCS= \
ASRCS= \
startcf.S \
printf_helper.S \
exceptions.S \
xhdi_vec.S \
pci_wrappers.S

View File

@@ -78,7 +78,6 @@ SECTIONS
OBJDIR/s19reader.o(.text)
OBJDIR/bas_printf.o(.text)
OBJDIR/bas_string.o(.text)
OBJDIR/printf_helper.o(.text)
OBJDIR/cache.o(.text)
OBJDIR/dma.o(.text)
OBJDIR/MCD_dmaApi.o(.text)

View File

@@ -55,11 +55,6 @@
#define CF_CACR_EUSP (0x00000020) /* Switch stacks in user mode */
#define CF_CACR_DF (0x00000010) /* Disable FPU */
#define _DCACHE_SET_MASK ((DCACHE_SIZE/64-1)<<CACHE_WAYS)
#define _ICACHE_SET_MASK ((ICACHE_SIZE/64-1)<<CACHE_WAYS)
#define LAST_DCACHE_ADDR _DCACHE_SET_MASK
#define LAST_ICACHE_ADDR _ICACHE_SET_MASK
#define ICACHE_SIZE 0x8000 /* instruction - 32k */
#define DCACHE_SIZE 0x8000 /* data - 32k */
@@ -67,6 +62,10 @@
#define CACHE_SETS 0x0200 /* 512 sets */
#define CACHE_WAYS 0x0004 /* 4 way */
#define _DCACHE_SET_MASK ((DCACHE_SIZE / 64 - 1) << CACHE_WAYS)
#define _ICACHE_SET_MASK ((ICACHE_SIZE / 64 - 1) << CACHE_WAYS)
#define LAST_DCACHE_ADDR _DCACHE_SET_MASK
#define LAST_ICACHE_ADDR _ICACHE_SET_MASK
#define CACHE_DISABLE_MODE (CF_CACR_DCINVA+ \
CF_CACR_BCINVA+ \

View File

@@ -51,7 +51,7 @@
*/
#define SCA_PAGE_ID 6 /* indicates video memory page */
#define DEFAULT_PAGE_SIZE 0x2000 /* use 8k pages for MiNT compatibility */
/*
* MMU page sizes
*/
@@ -67,10 +67,13 @@ enum mmu_page_size
/*
* cache modes
*/
#define CACHE_WRITETHROUGH 0
#define CACHE_COPYBACK 1
#define CACHE_NOCACHE_PRECISE 2
#define CACHE_NOCACHE_IMPRECISE 3
enum mmu_cache_modes
{
CACHE_WRITETHROUGH = 0,
CACHE_COPYBACK = 1,
CACHE_NOCACHE_PRECISE = 2,
CACHE_NOCACHE_IMPRECISE = 3
};
/*
@@ -83,23 +86,14 @@ enum mmu_page_size
#define ACCESS_WRITE (1 << 1)
#define ACCESS_EXECUTE (1 << 2)
struct mmu_map_flags
{
unsigned cache_mode:2;
unsigned protection:1;
unsigned page_id:8;
unsigned access:3;
unsigned locked:1;
unsigned unused:17;
};
/*
* global variables from linker script
*/
extern long video_tlb;
extern long video_sbt;
extern void mmu_enable(void);
extern void mmu_init(void);
extern int mmu_map_page(uint32_t virt, uint32_t phys, enum mmu_page_size sz, const struct mmu_map_flags *flags);
extern int mmu_map_8k_page(uint32_t adr, uint8_t asid);
#endif /* _MMU_H_ */

View File

@@ -306,6 +306,12 @@ void BaS(void)
mmu_init();
xprintf("finished\r\n");
xprintf("enable MMU: ");
mmu_enable(); /* force pipeline sync */
xprintf("finished\r\n");
xprintf("initialize exception vector table: ");
vec_init();
xprintf("finished\r\n");
@@ -313,10 +319,6 @@ void BaS(void)
xprintf("flush caches: ");
flush_and_invalidate_caches();
xprintf("finished\r\n");
xprintf("enable MMU: ");
MCF_MMU_MMUCR = MCF_MMU_MMUCR_EN; /* MMU on */
NOP(); /* force pipeline sync */
xprintf("finished\r\n");
#ifdef MACHINE_FIREBEE
xprintf("IDE reset: ");

View File

@@ -95,7 +95,8 @@ void flush_icache_range(void *address, size_t size)
start_set = (uint32_t) address & _ICACHE_SET_MASK;
end_set = (uint32_t) endaddr & _ICACHE_SET_MASK;
if (start_set > end_set) {
if (start_set > end_set)
{
/* from the begining to the lowest address */
for (set = 0; set <= end_set; set += (0x10 - 3))
{
@@ -115,7 +116,8 @@ void flush_icache_range(void *address, size_t size)
/* next loop will finish the cache ie pass the hole */
end_set = LAST_ICACHE_ADDR;
}
for (set = start_set; set <= end_set; set += (0x10 - 3)) {
for (set = start_set; set <= end_set; set += (0x10 - 3))
{
__asm__ __volatile__(
" cpushl ic,(%[set]) \n\t"
" addq.l #1,%[set] \n\t"

View File

@@ -45,22 +45,37 @@
.extern _irq5_handler
.extern _irq7_handler
/* Register read/write macros */
#define MCF_EPORT_EPPAR __MBAR+0xF00
#define MCF_EPORT_EPDDR __MBAR+0xF04
#define MCF_EPORT_EPIER __MBAR+0xF05
#define MCF_EPORT_EPDR __MBAR+0xF08
#define MCF_EPORT_EPPDR __MBAR+0xF09
#define MCF_EPORT_EPFR __MBAR+0xF0C
#define MCF_GPIO_PODR_FEC1L __MBAR+0xA07
#define MCF_PSC0_PSCTB_8BIT __MBAR+0x860C
.global _vec_init
// interrupt sources
/* Register read/write equates */
/* MMU */
.equ MCF_MMU_MMUCR, __MMUBAR
.equ MCF_MMU_MMUOR, __MMUBAR+0x04
.equ MCF_MMU_MMUSR, __MMUBAR+0x08
.equ MCF_MMU_MMUAR, __MMUBAR+0x10
.equ MCF_MMU_MMUTR, __MMUBAR+0x14
.equ MCF_MMU_MMUDR, __MMUBAR+0x18
/* EPORT flag register */
.equ MCF_EPORT_EPFR, __MBAR+0xf0c
/* FEC1 port output data direction register */
.equ MCF_GPIO_PODR_FEC1L, __MBAR+0xa07
/* PSC0 transmit buffer register */
.equ MCF_PSC0_PSCTB_8BIT, __MBAR+0x860c
/* GPT mode select register */
.equ MCF_GPT0_GMS, __MBAR+0x800
/* Slice timer 0 count register */
.equ MCF_SLT0_SCNT, __MBAR+0x908
/* interrupt sources */
.equ INT_SOURCE_EPORT_EPF1,1 // edge port flag 1
.equ INT_SOURCE_EPORT_EPF2,2 // edge port flag 2
.equ INT_SOURCE_EPORT_EPF3,3 // edge port flag 3
@@ -115,85 +130,15 @@
// Atari register equates (provided by FPGA)
.equ vbasehi, 0xffff8201
//mmu ---------------------------------------------------
/* Register read/write macros */
#define MCF_MMU_MMUCR __MMUBAR
#define MCF_MMU_MMUOR __MMUBAR + 0x04
#define MCF_MMU_MMUSR __MMUBAR + 0x08
#define MCF_MMU_MMUAR __MMUBAR + 0x10
#define MCF_MMU_MMUTR __MMUBAR + 0x14
#define MCF_MMU_MMUDR __MMUBAR + 0x18
/* Bit definitions and macros for MCF_MMU_MMUCR */
#define MCF_MMU_MMUCR_EN (0x1)
#define MCF_MMU_MMUCR_ASM (0x2)
/* Bit definitions and macros for MCF_MMU_MMUOR */
#define MCF_MMU_MMUOR_UAA (0x1) /* update allocation address, i.e. write to TLB */
#define MCF_MMU_MMUOR_ACC (0x2) /* activate access to TLB */
#define MCF_MMU_MMUOR_RW (0x4) /* read/write TLB */
#define MCF_MMU_MMUOR_ADR (0x8) /* search by address/TLB address */
#define MCF_MMU_MMUOR_ITLB (0x10) /* act on instruction/data TLBs */
#define MCF_MMU_MMUOR_CAS (0x20) /* clear all unlocked TLBs with matching ASID */
#define MCF_MMU_MMUOR_CNL (0x40) /* clear all unlocked TLBs regardless of ASID */
#define MCF_MMU_MMUOR_CA (0x80) /* clear all TLBs */
#define MCF_MMU_MMUOR_STLB (0x100) /* search TLBs */
#define MCF_MMU_MMUOR_AA(x) (((x) & 0xFFFF) << 0x10) /* TLB allocation address */
/* Bit definitions and macros for MCF_MMU_MMUSR */
#define MCF_MMU_MMUSR_HIT (0x2) /* last lookup had a hit in TLB */
#define MCF_MMU_MMUSR_WF (0x8) /* indicate write fault */
#define MCF_MMU_MMUSR_RF (0x10) /* indicate read fault */
#define MCF_MMU_MMUSR_SPF (0x20) /* indicate supervisor protect fault */
/* Bit definitions and macros for MCF_MMU_MMUAR */
#define MCF_MMU_MMUAR_FA(x) (((x) & 0xFFFFFFFF) << 0)
/* Bit definitions and macros for MCF_MMU_MMUTR */
#define MCF_MMU_MMUTR_V (0x1) /* valid bit for TLB */
#define MCF_MMU_MMUTR_SG (0x2) /* set page as shared global */
#define MCF_MMU_MMUTR_ID(x) (((x) & 0xFF) << 0x2) /* ASID (address space id) of page */
#define MCF_MMU_MMUTR_VA(x) (((x) & 0x3FFFFF) << 0xA) /* virtual address of page */
/* Bit definitions and macros for MCF_MMU_MMUDR */
#define MCF_MMU_MMUDR_LK (0x2) /* lock page */
#define MCF_MMU_MMUDR_X (0x4) /* allow code execution in memory page */
#define MCF_MMU_MMUDR_W (0x8) /* allow write to memory page */
#define MCF_MMU_MMUDR_R (0x10) /* allow read from memory page */
#define MCF_MMU_MMUDR_SP (0x20) /* supervisor protect memory page */
#define MCF_MMU_MMUDR_CM(x) (((x) & 0x3) << 0x6) /* cache mode */
#define MCF_MMU_MMUDR_SZ(x) (((x) & 0x3) << 0x8) /* page size */
#define MCF_MMU_MMUDR_PA(x) (((x) & 0x3FFFFF) << 0xA) /* page physical address */
#define std_mmutr (MCF_MMU_MMUTR_SG | MCF_MMU_MMUTR_V)
#define writethrough_mmudr (MCF_MMU_MMUDR_SZ(00) | MCF_MMU_MMUDR_CM(00) | MCF_MMU_MMUDR_R | MCF_MMU_MMUDR_W | MCF_MMU_MMUDR_X)
#define copyback_mmudr (MCF_MMU_MMUDR_SZ(00) | MCF_MMU_MMUDR_CM(01) | MCF_MMU_MMUDR_R | MCF_MMU_MMUDR_W | MCF_MMU_MMUDR_X)
/*
*
* General Purpose Timers (GPT)
*
* macros
*/
/* Register read/write macros */
#define MCF_GPT0_GMS __MBAR+0x800
/*
*
* Slice Timers (SLT)
*
*/
#define MCF_SLT0_SCNT __MBAR+0x908
/**********************************************************/
// macros
/**********************************************************/
.altmacro
.macro irq vector,int_mask,clr_int
//move.w #0x2700,sr // disable interrupt
move.w #0x2700,sr // disable interrupt
subq.l #8,sp
movem.l d0/a5,(sp) // save registers
@@ -207,25 +152,6 @@
rts
.endm
/*
* FIXME: this is a GNU gas kludge. Ugly, but I just can't come up with any smarter solution
*
* GNU as does not support multi-character constants. At least I don't know of any way it would.
* The following might look more than strange, but I considered the statement
*
* mchar move.l, 'T,'E,'S,'T,-(SP)
*
* somewhat more readable than
*
* move.l #1413829460,-(SP)
*
* If anybody knows of any better way on how to do this - please do!
*
*/
.macro mchar st,a,b,c,d,tgt
\st #\a << 24|\b<<16|\c<<8|\d,\tgt
.endm
.text
_vec_init:
move.l a2,-(sp) // Backup registers
@@ -310,12 +236,12 @@ init_vec_loop:
*/
vector_table_start:
std_exc_vec:
//move.w #0x2700,sr // disable interrupt
move.w #0x2700,sr // disable interrupt
subq.l #8,sp
movem.l d0/a5,(sp) // save registers
move.w 8(sp),d0 // fetch vector
and.l #0x3fc,d0 // mask out vector number
#define DBG_EXC
#ifdef DBG_EXC
// printout vector number of exception
@@ -323,16 +249,17 @@ std_exc_vec:
movem.l d0-d1/a0-a1,(sp) // save gcc scratch registers
lsr.l #2,d0 // shift vector number in place
cmp.l #33,d0
beq noprint
cmp.l #34,d0
cmp.l #33,d0 // do not debug-print various traps
beq noprint // this would slow down interrupt
cmp.l #34,d0 // processing enormously
beq noprint
cmp.l #45,d0
beq noprint
cmp.l #46,d0
beq noprint
move.l 4 * 4 + 8 + 4(sp),-(sp) // pc at exception
move.l d0,-(sp) // provide it to xprintf()
move.l d0,-(sp) // vector number
pea exception_text
jsr _xprintf // call xprintf()
add.l #3*4,sp // adjust stack
@@ -349,14 +276,15 @@ noprint:
move.l 4(sp),a5 // restore a5
move.l d0,4(sp) // store exception routine address
//move.w 10(sp),d0 // restore original SR
//bset #13,d0 // set supervisor bit
//move.w d0,sr //
// FIXME: what does this do and why?
move.w 10(sp),d0 // restore original SR
bset #13,d0 // set supervisor bit
move.w d0,sr //
move.l (sp)+,d0 // restore d0
rts // jump to exception routine
exception_text:
.ascii "DEBUG: EXCEPTION %d caught at %p"
.ascii "DEBUG: EXCEPTION 0x%x caught at %p"
.byte 13, 10, 0
.align 4
@@ -369,54 +297,28 @@ reset_vector:
access:
move.w #0x2700,sr // disable interrupts
move.l d0,-(sp) // ++ vr
move.w 4(sp),d0 // get format_status word from stack
andi.l #0x0c03,d0 // mask out fault status bits
cmpi.l #0x0401,d0 // TLB miss on opword of instruction fetch?
beq access_mmu // yes
cmpi.l #0x0402,d0 // TLB miss on extension word of instruction fetch?
beq access_mmu // yes
cmpi.l #0x0802,d0 // TLB miss on data write?
beq access_mmu // yes
cmpi.l #0x0c02,d0 // TLB miss on data read, or read-modify-write?
beq access_mmu // yes
link a6,#-4 * 4 // make room for gcc scratch registers
movem.l d0-d1/a0-a1,(sp) // and save them
bra bus_error // everything else is a classic bus error
move.l 4(a6),-(sp) // get format_status longword
move.l 8(a6),-(sp) // PC at exception
move.l MCF_MMU_MMUAR,-(sp) // fault address
move.l MCF_MMU_MMUSR,-(sp) // MMU status register
move.w #0x2300,sr // can lower interrupt mask once MMU status is safe
jsr _mmutr_miss
lea 4 * 4(sp),sp // adjust stack
access_mmu:
move.l MCF_MMU_MMUSR,d0 // did the last fault hit in TLB?
btst #1,d0 // yes, it did. So we already mapped that page
bne bus_error // and this must be a real bus error
btst #5,d0 // supervisor protection fault?
bne bus_error
btst #4,d0 // read access fault?
bne bus_error
btst #3,d0 // write access fault?
tst.l d0 // exception handler signals bus error
bne bus_error
move.l MCF_MMU_MMUAR,d0
cmp.l #__FASTRAM_END,d0 // above max User RAM area?
bge bus_error // -> bus error
lea -3 * 4(sp),sp // save gcc scratch registers
movem.l d1/a0-a1,(sp)
move.l 3 * 4 + 4 (sp),-(sp) // push exception stack frame
move.l 5 * 4 + 4 (sp),-(sp) // push program counter at exception
move.l d0,-(sp) // fault address
jsr _mmutr_miss // else we have an MMU TLB miss
add.l #3 * 4,sp // adjust stack
movem.l (sp),d1/a0-a1 // restore gcc scratch registers
lea 3 * 4(sp),sp
move.l (sp)+,d0 // restore register
movem.l (sp),d0-d1/a0-a1 // restore stack
unlk a6
rte
bus_error:
move.l (sp)+,d0 // restore register
movem.l (sp),d0-d1/a0-a1
unlk a6
bra std_exc_vec
zero_divide:
@@ -543,79 +445,6 @@ irq6: // MFP interrupt from FPGA
lea MCF_EPORT_EPFR,a5 // clear int6 from edge port
bset #6,(a5)
// screen adr change timed out?
move.l _video_sbt,d0
beq irq6_non_sca // nothing to do if 0
sub.l #0x70000000,d0 // substract 14 seconds
lea MCF_SLT0_SCNT,a5
cmp.l (a5),d0 // time reached?
ble irq6_non_sca // not yet
lea -7 * 4(sp),sp // save more registers
movem.l d0-d4/a0-a1,(sp) //
clr.l d3 // beginn mit 0
// jsr _flush_and_invalidate_caches FIXME: why should we need that?
// eintrag suchen
irq6_next_sca:
move.l d3,d0
move.l d0,MCF_MMU_MMUAR // addresse
move.l #0x106,d4
move.l d4,MCF_MMU_MMUOR // suchen ->
nop
move.l MCF_MMU_MMUOR,d4
clr.w d4
swap d4
move.l d4,MCF_MMU_MMUAR
mvz.w #0x10e,d4
move.l d4,MCF_MMU_MMUOR // einträge holen aus mmu
nop
move.l MCF_MMU_MMUTR,d4 // ID holen
lsr.l #2,d4 // bit 9 bis 2
cmp.w #sca_page_ID,d4 // ist screen change ID?
bne irq6_sca_pn // nein -> page keine screen area next
// eintrag <EFBFBD>ndern
add.l #std_mmutr,d0
move.l d3,d1 // page 0?
beq irq6_sca_pn0 // ja ->
add.l #copyback_mmudr,d1 // sonst page cb
bra irq6_sca_pn1c
irq6_sca_pn0:
add.l #writethrough_mmudr/*|MCF_MMU_MMUDR_LK*/,d1 // page wt and locked
irq6_sca_pn1c:
mvz.w #0x10b,d2 // MMU update
move.l d0,MCF_MMU_MMUTR
move.l d1,MCF_MMU_MMUDR
move.l d2,MCF_MMU_MMUOR // setze tlb data only
nop
// page copy
move.l d3,a0
add.l #0x60000000,a0
move.l d3,a1
move.l #0x10000,d4 // one whole page (1 MB)
irq6_vcd0_loop:
move.l (a0)+,(a1)+ // page copy
move.l (a0)+,(a1)+
move.l (a0)+,(a1)+
move.l (a0)+,(a1)+
subq.l #1,d4
bne irq6_vcd0_loop
irq6_sca_pn:
add.l #0x00100000,d3 // next
cmp.l #0x00d00000,d3 // ende?
blt irq6_next_sca // nein->
move.l #0x2000,d0
move.l d0,_video_tlb // anfangszustand wieder herstellen
clr.l _video_sbt // zeit löschen
movem.l (sp),d0-d4/a0-a1 // restore registers
lea 7 * 4(sp),sp
irq6_non_sca:
// test auf acsi dma -----------------------------------------------------------------
lea 0xfffffa0b,a5
@@ -666,10 +495,6 @@ acsi_dma: // atari dma
move.l d1,-(sp)
lea MCF_PSC0_PSCTB_8BIT,a1 // ++ vr
mchar move.l, 'D,'M','A,'\ ,(a1)
//move.l #"DMA ",(a1)
mchar move.l,'I,'N,'T,'!,(a1)
// move.l #'INT!',(a1)
lea 0xf0020110,a5 // fifo daten
acsi_dma_start:
@@ -752,7 +577,10 @@ irq7:
*
* GPT0 is used as input trigger. It is connected to the TIN0 signal of
* the FPGA and triggers everytime vbasehi is written to, i.e.
* when the video base address gets changed
* when the video base address gets changed. In the "MiNT-compatible MMU"-version this
* doesn't do anything, currently, but
* TODO: could be used for e.g. activating copyback cache mode on those ST-RAM pages
* that aren't video pages.
*/
@@ -763,11 +591,13 @@ handler_gpt0:
link a6,#-4 * 4 // make room for
movem.l d0-d1/a0-a1,(sp) // gcc scratch registers and save them,
// other registers will be handled by gcc itself
move.w 4(a6),d0 // fetch vector number from stack
move.l d0,-(sp) // push it
jsr _gpt0_interrupt_handler // call C handler
addq.l #4,sp // adjust stack
movem.l (sp),d0-d1/a0-a1 // restore registers
unlk a6
rte
#endif /* MACHINE_FIREBEE */

View File

@@ -430,110 +430,8 @@ bool irq6_interrupt_handler(uint32_t sf1, uint32_t sf2)
*/
void gpt0_interrupt_handler(void)
{
uint32_t video_address;
uint32_t video_end_address;
int page_number;
bool already_set;
extern uint32_t _STRAM_END;
dbg("screen base = 0x%x\r\n", vbasehi);
if (vbasehi < 2) /* screen base lower than 0x20000? */
{
goto rearm_trigger; /* do nothing */
}
else if (vbasehi >= 0xd0) /* higher than 0xd00000 (normal Falcon address)? */
{
video_sbt = MCF_SLT0_SCNT; /* FIXME: no idea why we need to save the time here */
}
video_address = (vbasehi << 16) | (vbasemid << 8) | vbaselow;
page_number = video_address >> 20; /* calculate a page number */
already_set = (video_tlb & (1 << page_number)); /* already in bitset? */
video_tlb |= page_number; /* set it */
if (! already_set) /* newly set page, need to copy contents */
{
flush_and_invalidate_caches();
dma_memcpy((uint8_t *) video_address + 0x60000000, (uint8_t *) video_address, 0x100000);
/*
* create an MMU TLB entry for the new video page
*/
/*
* first search for an existing entry with our address. If none is found,
* the MMU will propose a new one
*/
MCF_MMU_MMUAR = video_address;
MCF_MMU_MMUOR = 0x106;
NOP();
/*
* take this MMU TLB entry and set it to our video address and page mapping
*/
MCF_MMU_MMUAR = (MCF_MMU_MMUOR >> 16) & 0xffff; /* set TLB id */
MCF_MMU_MMUTR = video_address |
MCF_MMU_MMUTR_ID(sca_page_ID) | /* set video page ID */
MCF_MMU_MMUTR_SG | /* shared global */
MCF_MMU_MMUTR_V; /* valid */
MCF_MMU_MMUDR = (video_address + 0x60000000) | /* physical address */
MCF_MMU_MMUDR_SZ(0) | /* 1 MB page size */
MCF_MMU_MMUDR_CM(0) | /* writethrough */
MCF_MMU_MMUDR_R | /* readable */
MCF_MMU_MMUDR_W | /* writeable */
MCF_MMU_MMUDR_X; /* executable */
MCF_MMU_MMUOR = 0x10b; /* update TLB entry */
}
/*
* Calculate the effective screen memory size to see if we need to map another page
* in case the new screen spans more than one single page
*/
video_end_address = video_address + (vde - vdb) * vwrap;
if (video_end_address < _STRAM_END)
{
page_number = video_end_address >> 20; /* calculate a page number */
already_set = (video_tlb & (1 << page_number)); /* already in bitset? */
video_tlb |= page_number; /* set it */
if (! already_set) /* newly set page, need to copy contents */
{
flush_and_invalidate_caches();
dma_memcpy((uint8_t *) video_end_address + 0x60000000, (uint8_t *) video_end_address, 0x100000);
/*
* create an MMU TLB entry for the new video page
*/
/*
* first search for an existing entry with our address. If none is found,
* the MMU will propose a new one
*/
MCF_MMU_MMUAR = video_end_address;
MCF_MMU_MMUOR = 0x106;
NOP();
/*
* take this MMU TLB entry and set it to our video address and page mapping
*/
MCF_MMU_MMUAR = (MCF_MMU_MMUOR >> 16) & 0xffff; /* set TLB id */
MCF_MMU_MMUTR = video_end_address |
MCF_MMU_MMUTR_ID(sca_page_ID) | /* set video page ID */
MCF_MMU_MMUTR_SG | /* shared global */
MCF_MMU_MMUTR_V; /* valid */
MCF_MMU_MMUDR = (video_end_address + 0x60000000) | /* physical address */
MCF_MMU_MMUDR_SZ(0) | /* 1 MB page size */
MCF_MMU_MMUDR_CM(0) | /* writethrough */
MCF_MMU_MMUDR_R | /* readable */
MCF_MMU_MMUDR_W | /* writeable */
MCF_MMU_MMUDR_X; /* executable */
MCF_MMU_MMUOR = 0x10b; /* update TLB entry */
}
}
rearm_trigger:
MCF_GPT0_GMS &= ~1; /* rearm trigger */
NOP();
MCF_GPT0_GMS |= 1;

505
sys/mmu.c
View File

@@ -1,5 +1,6 @@
#include "mmu.h"
#include "acia.h"
#include "exceptions.h"
/*
* mmu.c
@@ -62,12 +63,14 @@
#error "unknown machine!"
#endif /* MACHINE_FIREBEE */
// #define DEBUG_MMU
#ifdef DEBUG_MMU
//#define DBG_MMU
#ifdef DBG_MMU
#define dbg(format, arg...) do { xprintf("DEBUG (%s()): " format, __FUNCTION__, ##arg);} while(0)
#else
#define dbg(format, arg...) do {;} while (0)
#endif /* DEBUG_MMU */
#endif /* DBG_MMU */
#define err(format, arg...) do { xprintf("ERROR (%s()): " format, __FUNCTION__, ##arg); xprintf("system halted\r\n"); } while(0); while(1)
/*
* set ASID register
@@ -82,7 +85,7 @@ inline uint32_t set_asid(uint32_t value)
"movec %[value],ASID\n\t"
: /* no output */
: [value] "r" (value)
:
: "memory"
);
rt_asid = value;
@@ -104,7 +107,7 @@ inline uint32_t set_acr0(uint32_t value)
"movec %[value],ACR0\n\t"
: /* not output */
: [value] "r" (value)
:
: "memory"
);
rt_acr0 = value;
@@ -124,7 +127,7 @@ inline uint32_t set_acr1(uint32_t value)
"movec %[value],ACR1\n\t"
: /* not output */
: [value] "r" (value)
:
: "memory"
);
rt_acr1 = value;
@@ -145,7 +148,7 @@ inline uint32_t set_acr2(uint32_t value)
"movec %[value],ACR2\n\t"
: /* not output */
: [value] "r" (value)
:
: "memory"
);
rt_acr2 = value;
@@ -165,7 +168,7 @@ inline uint32_t set_acr3(uint32_t value)
"movec %[value],ACR3\n\t"
: /* not output */
: [value] "r" (value)
:
: "memory"
);
rt_acr3 = value;
@@ -181,7 +184,7 @@ inline uint32_t set_mmubar(uint32_t value)
"movec %[value],MMUBAR\n\t"
: /* no output */
: [value] "r" (value)
: /* no clobber */
: "memory"
);
rt_mmubar = value;
NOP();
@@ -189,6 +192,218 @@ inline uint32_t set_mmubar(uint32_t value)
return ret;
}
/*
* translation table for virtual address ranges. Holds the physical_offset (which must be added to a virtual
* address to get its physical counterpart) for memory ranges.
*/
struct virt_to_phys
{
uint32_t start_address;
uint32_t length;
uint32_t physical_offset;
};
static struct virt_to_phys translation[] =
{
/* virtual , length , offset */
{ 0x00000000, 0x00e00000, 0x60000000 }, /* map first 14 MByte to first 14 Mb of video ram */
{ 0x00e00000, 0x00100000, 0x00000000 }, /* map TOS to SDRAM */
{ 0x00f00000, 0x00100000, 0xff000000 }, /* map Falcon I/O area to FPGA */
{ 0x01000000, 0x1f000000, 0x00000000 }, /* map rest of ram virt = phys */
};
static int num_translations = sizeof(translation) / sizeof(struct virt_to_phys);
static inline uint32_t lookup_phys(uint32_t virt)
{
int i;
for (i = 0; i < num_translations; i++)
{
if (virt >= translation[i].start_address && virt < translation[i].start_address + translation[i].length)
{
return virt + translation[i].physical_offset;
}
}
err("virtual address 0x%lx not found in translation table!\r\n", virt);
return -1;
}
struct page_descriptor
{
uint8_t cache_mode : 2;
uint8_t supervisor_protect : 1;
uint8_t read : 1;
uint8_t write : 1;
uint8_t execute : 1;
uint8_t global : 1;
uint8_t locked : 1;
};
static struct page_descriptor pages[65536]; /* 512 Mb RAM */
/*
* map a page of memory using virt addresses with the Coldfire MMU.
*
* Theory of operation: the Coldfire MMU in the Firebee has 64 TLB entries, 32 for data (DTLB), 32 for
* instructions (ITLB). Mappings can either be done locked (normal MMU TLB misses will not consider them
* for replacement) or unlocked (mappings will reallocate using a LRU scheme when the MMU runs out of
* TLB entries). For proper operation, the MMU needs at least two ITLBs and/or four free/allocatable DTLBs
* per instruction as a minimum, more for performance. Thus locked pages (that can't be touched by the
* LRU algorithm) should be used sparsingly.
*
*
*/
int mmu_map_8k_page(uint32_t virt, uint8_t asid)
{
const uint32_t size_mask = 0xffffe000; /* 8k pagesize */
int page_index = (virt & size_mask) / DEFAULT_PAGE_SIZE; /* index into page_descriptor array */
struct page_descriptor *page = &pages[page_index]; /* attributes of page to map */
uint32_t phys = lookup_phys(virt); /* virtual to physical translation of page */
if (phys == -1)
return 0;
#ifdef DBG_MMU
register int sp asm("sp");
dbg("page_descriptor: 0x%02x, ssp = 0x%08x\r\n", * (uint8_t *) page, sp);
#endif /* DBG_MMU */
/*
* add page to TLB
*/
MCF_MMU_MMUTR = (virt & 0xfffffc00) | /* virtual address */
MCF_MMU_MMUTR_ID(asid) | /* address space id (ASID) */
(page->global ? MCF_MMU_MMUTR_SG : 0) | /* shared global */
MCF_MMU_MMUTR_V; /* valid */
MCF_MMU_MMUDR = (phys & 0xfffffc00) | /* physical address */
MCF_MMU_MMUDR_SZ(MMU_PAGE_SIZE_8K) | /* page size */
MCF_MMU_MMUDR_CM(page->cache_mode) | /* cache mode */
(page->supervisor_protect ? MCF_MMU_MMUDR_SP : 0) | /* supervisor protect */
(page->read ? MCF_MMU_MMUDR_R : 0) | /* read access enable */
(page->write ? MCF_MMU_MMUDR_W : 0) | /* write access enable */
(page->execute ? MCF_MMU_MMUDR_X : 0) | /* execute access enable */
(page->locked ? MCF_MMU_MMUDR_LK : 0);
MCF_MMU_MMUOR = MCF_MMU_MMUOR_ITLB | /* instruction */
MCF_MMU_MMUOR_ACC | /* access TLB */
MCF_MMU_MMUOR_UAA; /* update allocation address field */
dbg("ITLB: MCF_MMU_MMUOR = %08x\r\n", MCF_MMU_MMUOR);
MCF_MMU_MMUOR = MCF_MMU_MMUOR_ACC | /* access TLB, data */
MCF_MMU_MMUOR_UAA; /* update allocation address field */
dbg("mapped virt=0x%08x to phys=0x%08x\r\n", virt & size_mask, phys & size_mask);
dbg("DTLB: MCF_MMU_MMUOR = %08x\r\n", MCF_MMU_MMUOR);
return 1;
}
int mmu_map_8k_instruction_page(uint32_t virt, uint8_t asid)
{
const uint32_t size_mask = ~ (DEFAULT_PAGE_SIZE - 1); /* 8k pagesize */
int page_index = (virt & size_mask) / DEFAULT_PAGE_SIZE; /* index into page_descriptor array */
struct page_descriptor *page = &pages[page_index]; /* attributes of page to map */
int ipl;
uint32_t phys = lookup_phys(virt); /* virtual to physical translation of page */
if (phys == -1)
return 0;
#ifdef DBG_MMU
register int sp asm("sp");
dbg("page_descriptor: 0x%02x, ssp = 0x%08x\r\n", * (uint8_t *) page, sp);
#endif /* DBG_MMU */
/*
* add page to TLB
*/
ipl = set_ipl(7); /* do not disturb */
MCF_MMU_MMUAR = (virt & size_mask);
MCF_MMU_MMUTR = (virt & size_mask) | /* virtual address */
MCF_MMU_MMUTR_ID(asid) | /* address space id (ASID) */
(page->global ? MCF_MMU_MMUTR_SG : 0) | /* shared global */
MCF_MMU_MMUTR_V; /* valid */
__asm__ __volatile("" : : : "memory"); /* MMU commands must be exactly in sequence */
MCF_MMU_MMUDR = (phys & size_mask) | /* physical address */
MCF_MMU_MMUDR_SZ(MMU_PAGE_SIZE_8K) | /* page size */
MCF_MMU_MMUDR_CM(page->cache_mode) | /* cache mode */
(page->supervisor_protect ? MCF_MMU_MMUDR_SP : 0) | /* supervisor protect */
(page->read ? MCF_MMU_MMUDR_R : 0) | /* read access enable */
(page->write ? MCF_MMU_MMUDR_W : 0) | /* write access enable */
(page->execute ? MCF_MMU_MMUDR_X : 0) | /* execute access enable */
(page->locked ? MCF_MMU_MMUDR_LK : 0);
__asm__ __volatile("" : : : "memory"); /* MMU commands must be exactly in sequence */
MCF_MMU_MMUOR = MCF_MMU_MMUOR_ITLB | /* instruction */
MCF_MMU_MMUOR_ACC | /* access TLB */
MCF_MMU_MMUOR_UAA; /* update allocation address field */
__asm__ __volatile("" : : : "memory"); /* MMU commands must be exactly in sequence */
set_ipl(ipl);
dbg("mapped virt=0x%08x to phys=0x%08x\r\n", virt & size_mask, phys & size_mask);
dbg("ITLB: MCF_MMU_MMUOR = %08x\r\n", MCF_MMU_MMUOR);
return 1;
}
int mmu_map_8k_data_page(uint32_t virt, uint8_t asid)
{
uint16_t ipl;
const uint32_t size_mask = ~ (DEFAULT_PAGE_SIZE - 1); /* 8k pagesize */
int page_index = (virt & size_mask) / DEFAULT_PAGE_SIZE; /* index into page_descriptor array */
struct page_descriptor *page = &pages[page_index]; /* attributes of page to map */
uint32_t phys = lookup_phys(virt); /* virtual to physical translation of page */
if (phys == -1)
return 0;
#ifdef DBG_MMU
register int sp asm("sp");
dbg("page_descriptor: 0x%02x, ssp = 0x%08x\r\n", * (uint8_t *) page, sp);
#endif /* DBG_MMU */
/*
* add page to TLB
*/
ipl = set_ipl(7); /* do not disturb */
MCF_MMU_MMUTR = (virt & size_mask) | /* virtual address */
MCF_MMU_MMUTR_ID(asid) | /* address space id (ASID) */
(page->global ? MCF_MMU_MMUTR_SG : 0) | /* shared global */
MCF_MMU_MMUTR_V; /* valid */
MCF_MMU_MMUDR = (phys & size_mask) | /* physical address */
MCF_MMU_MMUDR_SZ(MMU_PAGE_SIZE_8K) | /* page size */
MCF_MMU_MMUDR_CM(page->cache_mode) | /* cache mode */
(page->supervisor_protect ? MCF_MMU_MMUDR_SP : 0) | /* supervisor protect */
(page->read ? MCF_MMU_MMUDR_R : 0) | /* read access enable */
(page->write ? MCF_MMU_MMUDR_W : 0) | /* write access enable */
(page->execute ? MCF_MMU_MMUDR_X : 0) | /* execute access enable */
(page->locked ? MCF_MMU_MMUDR_LK : 0);
MCF_MMU_MMUOR = MCF_MMU_MMUOR_ACC | /* access TLB, data */
MCF_MMU_MMUOR_UAA; /* update allocation address field */
set_ipl(ipl);
dbg("mapped virt=0x%08x to phys=0x%08x\r\n", virt & size_mask, phys & size_mask);
dbg("DTLB: MCF_MMU_MMUOR = %08x\r\n", MCF_MMU_MMUOR);
return 1;
}
/*
* map a page of memory using virt and phys as addresses with the Coldfire MMU.
*
@@ -201,48 +416,51 @@ inline uint32_t set_mmubar(uint32_t value)
*
*
*/
int mmu_map_page(uint32_t virt, uint32_t phys, enum mmu_page_size sz, const struct mmu_map_flags *flags)
int mmu_map_page(uint32_t virt, uint32_t phys, enum mmu_page_size sz, uint8_t page_id, const struct page_descriptor *flags)
{
int size_mask;
int ipl;
switch (sz)
{
case MMU_PAGE_SIZE_1M:
size_mask = 0xfff00000;
size_mask = ~ (0x00010000 - 1);
break;
case MMU_PAGE_SIZE_8K:
size_mask = 0xffffe000;
size_mask = ~ (0x2000 - 1);
break;
case MMU_PAGE_SIZE_4K:
size_mask = 0xfffff000;
size_mask = ~ (0x1000 - 1);
break;
case MMU_PAGE_SIZE_1K:
size_mask = 0xfffff800;
size_mask = ~ (0x400 - 1);
break;
default:
dbg("illegal map size %d\r\n", sz);
return 0;
err("illegal map size %d\r\n", sz);
}
/*
* add page to TLB
*/
MCF_MMU_MMUTR = ((int) virt & size_mask) | /* virtual address */
MCF_MMU_MMUTR_ID(flags->page_id) | /* address space id (ASID) */
MCF_MMU_MMUTR_SG | /* shared global */
ipl = set_ipl(7);
MCF_MMU_MMUTR = ((uint32_t) virt & size_mask) | /* virtual address */
MCF_MMU_MMUTR_ID(page_id) | /* address space id (ASID) */
(flags->global ? MCF_MMU_MMUTR_SG : 0) | /* shared global */
MCF_MMU_MMUTR_V; /* valid */
NOP();
MCF_MMU_MMUDR = ((int) phys & size_mask) | /* physical address */
MCF_MMU_MMUDR = ((uint32_t) phys & size_mask) | /* physical address */
MCF_MMU_MMUDR_SZ(sz) | /* page size */
MCF_MMU_MMUDR_CM(flags->cache_mode) |
(flags->access & ACCESS_READ ? MCF_MMU_MMUDR_R : 0) | /* read access enable */
(flags->access & ACCESS_WRITE ? MCF_MMU_MMUDR_W : 0) | /* write access enable */
(flags->access & ACCESS_EXECUTE ? MCF_MMU_MMUDR_X : 0) | /* execute access enable */
(flags->read ? MCF_MMU_MMUDR_R : 0) | /* read access enable */
(flags->write ? MCF_MMU_MMUDR_W : 0) | /* write access enable */
(flags->execute ? MCF_MMU_MMUDR_X : 0) | /* execute access enable */
(flags->locked ? MCF_MMU_MMUDR_LK : 0);
NOP();
@@ -253,6 +471,9 @@ int mmu_map_page(uint32_t virt, uint32_t phys, enum mmu_page_size sz, const stru
MCF_MMU_MMUOR = MCF_MMU_MMUOR_ITLB | /* instruction */
MCF_MMU_MMUOR_ACC | /* access TLB */
MCF_MMU_MMUOR_UAA; /* update allocation address field */
set_ipl(ipl);
dbg("mapped virt=0x%08x to phys=0x%08x\r\n", virt, phys);
return 1;
@@ -260,19 +481,74 @@ int mmu_map_page(uint32_t virt, uint32_t phys, enum mmu_page_size sz, const stru
void mmu_init(void)
{
struct mmu_map_flags flags;
extern uint8_t _MMUBAR[];
uint32_t MMUBAR = (uint32_t) &_MMUBAR[0];
extern uint8_t _TOS[];
uint32_t TOS = (uint32_t) &_TOS[0];
struct page_descriptor flags;
int i;
/*
* clear all MMU TLB entries first
*/
MCF_MMU_MMUOR = MCF_MMU_MMUOR_CA; /* clears _all_ TLBs (including locked ones) */
NOP();
/*
* prelaminary initialization of page descriptor 0 (root) table
*/
for (i = 0; i < sizeof(pages) / sizeof(struct page_descriptor); i++)
{
uint32_t addr = i * DEFAULT_PAGE_SIZE;
if (addr >= 0x00f00000 && addr < 0x00ffffff)
{
pages[i].cache_mode = CACHE_NOCACHE_PRECISE;
pages[i].execute = 0;
pages[i].read = 1;
pages[i].write = 1;
pages[i].execute = 0;
pages[i].global = 1;
pages[i].supervisor_protect = 1;
}
else if (addr >= 0x0 && addr < 0x00e00000) /* ST-RAM, potential video memory */
{
pages[i].cache_mode = CACHE_WRITETHROUGH;
pages[i].execute = 1;
pages[i].supervisor_protect = 0;
pages[i].read = 1;
pages[i].write = 1;
pages[i].execute = 1;
pages[i].global = 1;
}
else if (addr >= 0x00e00000 && addr < 0x00f00000) /* EmuTOS */
{
pages[i].cache_mode = CACHE_COPYBACK;
pages[i].execute = 1;
pages[i].supervisor_protect = 1;
pages[i].read = 1;
pages[i].write = 0;
pages[i].execute = 1;
pages[i].global = 1;
}
else
{
pages[i].cache_mode = CACHE_COPYBACK;
pages[i].execute = 1;
pages[i].read = 1;
pages[i].write = 1;
pages[i].supervisor_protect = 0;
pages[i].global = 1;
}
pages[i].locked = 0; /* not locked */
pages[0].supervisor_protect = 0; /* protect system vectors */
}
set_asid(0); /* do not use address extension (ASID provides virtual 48 bit addresses) yet */
set_asid(0); /* do not use address extension (ASID provides virtual 48 bit addresses */
/* set data access attributes in ACR0 and ACR1 */
set_acr0(ACR_W(0) | /* read and write accesses permitted */
ACR_SP(0) | /* supervisor and user mode access permitted */
ACR_CM(ACR_CM_CACHE_INH_PRECISE) | /* cache inhibit, precise */
ACR_CM(ACR_CM_CACHE_INH_PRECISE) | /* cache inhibit, precise (i/o area!) */
ACR_AMM(0) | /* control region > 16 MB */
ACR_S(ACR_S_ALL) | /* match addresses in user and supervisor mode */
ACR_E(1) | /* enable ACR */
@@ -284,43 +560,49 @@ void mmu_init(void)
ACR_BA(0x80000000));
#elif defined(MACHINE_M54455)
ACR_ADMSK(0x7f) |
ACR_BA(0x80000000)); /* FIXME: not determined yet */
ACR_BA(0x80000000)); /* FIXME: not determined yet for this machine */
#else
#error unknown machine!
#endif /* MACHINE_FIREBEE */
// set_acr1(0x601fc000);
set_acr1(ACR_W(0) |
ACR_SP(0) |
ACR_CM(0) |
#if defined(MACHINE_FIREBEE)
ACR_CM(ACR_CM_CACHEABLE_WT) | /* video RAM on the Firebee */
ACR_CM(ACR_CM_CACHE_INH_PRECISE) | /* ST RAM on the Firebee */
#elif defined(MACHINE_M5484LITE)
ACR_CM(ACR_CM_CACHE_INH_PRECISE) | /* Compact Flash on the M548xLITE */
#elif defined(MACHINE_M54455)
ACR_CM(ACR_CM_CACHE_INH_PRECISE) | /* FIXME: not determined yet */
ACR_CM(ACR_CM_CACHE_INH_PRECISE) | /* FIXME: not determined yet for this machine */
#else
#error unknown machine!
#endif /* MACHINE_FIREBEE */
ACR_AMM(0) |
ACR_S(ACR_S_ALL) |
ACR_E(1) |
ACR_ADMSK(0x1f) |
ACR_BA(0x60000000));
ACR_ADMSK(0x7f) |
ACR_BA(0x00100000));
#ifdef _NOT_USED_
/* set instruction access attributes in ACR2 and ACR3 */
//set_acr2(0xe007c400);
//set_acr2(0xe007c400); /* flash area */
set_acr2(ACR_W(0) |
ACR_SP(0) |
ACR_CM(0) |
ACR_CM(ACR_CM_CACHEABLE_WT) |
ACR_CM(ACR_CM_CACHE_INH_PRECISE) |
ACR_AMM(1) |
ACR_S(ACR_S_ALL) |
ACR_E(1) |
ACR_ADMSK(0x7) |
ACR_BA(0xe0000000));
#endif /* _NOT_USED_ */
set_acr1(0x0);
set_acr2(0x0);
/* disable ACR3 */
set_acr3(0x0);
@@ -331,103 +613,106 @@ void mmu_init(void)
/* create locked TLB entries */
flags.cache_mode = CACHE_COPYBACK;
flags.protection = SV_USER;
flags.page_id = 0;
flags.access = ACCESS_READ | ACCESS_WRITE | ACCESS_EXECUTE;
flags.locked = true;
/* 0x0000_0000 - 0x000F_FFFF (first MB of physical memory) locked virt = phys */
mmu_map_page(0x0, 0x0, MMU_PAGE_SIZE_1M, &flags);
#if defined(MACHINE_FIREBEE)
/*
* 0x00d0'0000 - 0x00df'ffff (last megabyte of ST RAM = Falcon video memory) locked ID = 6
* mapped to physical address 0x60d0'0000 (FPGA video memory)
* video RAM: read write execute normal write true
*/
flags.cache_mode = CACHE_WRITETHROUGH;
flags.protection = SV_USER;
flags.page_id = SCA_PAGE_ID;
flags.access = ACCESS_READ | ACCESS_WRITE | ACCESS_EXECUTE;
flags.locked = true;
mmu_map_page(0x00d00000, 0x60d00000, MMU_PAGE_SIZE_1M, &flags);
video_tlb = 0x2000; /* set page as video page */
video_sbt = 0x0; /* clear time */
#endif /* MACHINE_FIREBEE */
/*
* Make the TOS (in SDRAM) read-only
* This maps virtual 0x00e0'0000 - 0x00ef'ffff to the same virtual address
*/
flags.cache_mode = CACHE_COPYBACK;
flags.page_id = 0;
flags.access = ACCESS_READ | ACCESS_EXECUTE;
mmu_map_page(TOS, TOS, MMU_PAGE_SIZE_1M, &flags);
#if defined(MACHINE_FIREBEE)
/*
* Map FireBee I/O area (0xfff0'0000 - 0xffff'ffff physical) to the Falcon-compatible I/O
* area (0x00f0'0000 - 0x00ff'ffff virtual) for the FireBee
*/
flags.cache_mode = CACHE_NOCACHE_PRECISE;
flags.access = ACCESS_WRITE | ACCESS_READ;
mmu_map_page(0x00f00000, 0xfff00000, MMU_PAGE_SIZE_1M, &flags);
#endif /* MACHINE_FIREBEE */
/*
* Map (locked) the second last MB of physical SDRAM (this is where BaS .data and .bss reside) to the same
* virtual address. This is also used (completely) when BaS is in RAM
*/
flags.cache_mode = CACHE_COPYBACK;
flags.access = ACCESS_READ | ACCESS_WRITE | ACCESS_EXECUTE;
mmu_map_page(SDRAM_START + SDRAM_SIZE - 0X00200000, SDRAM_START + SDRAM_SIZE - 0X00200000, MMU_PAGE_SIZE_1M, &flags);
flags.cache_mode = CACHE_NOCACHE_PRECISE;
flags.read = 1;
flags.write = 1;
flags.execute = 1;
flags.supervisor_protect = 1; /* supervisor access only */
flags.locked = 1;
mmu_map_page(SDRAM_START + SDRAM_SIZE - 0x00200000, SDRAM_START + SDRAM_SIZE - 0x00200000, 0, MMU_PAGE_SIZE_1M, &flags);
/*
* map EmuTOS (locked for now)
*/
flags.read = 1;
flags.write = 1;
flags.execute = 1;
flags.locked = 1;
//mmu_map_page(0xe00000, 0xe00000, MMU_PAGE_SIZE_1M, 0, &flags);
/*
* Map (locked) the very last MB of physical SDRAM (this is where the driver buffers reside) to the same
* virtual address. Used uncached for drivers.
*/
flags.cache_mode = CACHE_NOCACHE_PRECISE;
flags.access = ACCESS_READ | ACCESS_WRITE;
flags.protection = SV_PROTECT;
mmu_map_page(SDRAM_START + SDRAM_SIZE - 0x00100000, SDRAM_START + SDRAM_SIZE - 0x00100000, MMU_PAGE_SIZE_1M, &flags);
flags.read = 1;
flags.write = 1;
flags.execute = 0;
flags.supervisor_protect = 1;
flags.locked = 1;
mmu_map_page(SDRAM_START + SDRAM_SIZE - 0x00100000, SDRAM_START + SDRAM_SIZE - 0x00100000, 0, MMU_PAGE_SIZE_1M, &flags);
}
static struct mmu_map_flags flags =
/*
* enable the MMU. The Coldfire MMU can be used in two different modes
* ... FIXME:
*/
void mmu_enable(void)
{
.cache_mode = CACHE_COPYBACK,
.protection = SV_USER,
.page_id = 0,
.access = ACCESS_READ | ACCESS_WRITE | ACCESS_EXECUTE,
.locked = false
};
MCF_MMU_MMUCR = MCF_MMU_MMUCR_EN; /* MMU on */
NOP(); /* force pipeline sync */
}
void mmutr_miss(uint32_t address, uint32_t pc, uint32_t format_status)
#ifdef DBG_MMU
void verify_mapping(uint32_t address)
{
dbg("MMU TLB MISS accessing 0x%08x\r\nFS = 0x%08x\r\nPC = 0x%08x\r\n", address, format_status, pc);
// flush_and_invalidate_caches();
/* retrieve mapped page from MMU and make sure everything is correct */
int ds;
switch (address)
ds = * (int *) address;
dbg("found 0x%08x at address\r\n", ds);
}
#endif /* DBG_MMU */
uint32_t mmutr_miss(uint32_t mmu_sr, uint32_t fault_address, uint32_t pc,
uint32_t format_status)
{
case keyctl:
case keybd:
/* do something to emulate the IKBD access */
dbg("IKBD access\r\n");
uint32_t fault = format_status & 0x0c030000;
switch (fault)
{
/* if we have a real TLB miss, map the offending page */
case 0x04010000: /* TLB miss on opword of instruction fetch */
case 0x04020000: /* TLB miss on extension word of instruction fetch */
dbg("MMU ITLB MISS accessing 0x%08x\r\n"
"FS = 0x%08x\r\n"
"MMUSR = 0x%08x\r\n"
"PC = 0x%08x\r\n",
fault_address, format_status, mmu_sr, pc);
dbg("fault = 0x%08x\r\n", fault);
mmu_map_8k_instruction_page(pc, 0);
/* due to prefetch, it makes sense to map the next adjacent page also for ITLBs */
mmu_map_8k_instruction_page(pc + DEFAULT_PAGE_SIZE, 0);
break;
case midictl:
case midi:
/* do something to emulate MIDI access */
dbg("MIDI ACIA access\r\n");
case 0x08020000: /* TLB miss on data write */
case 0x0c020000: /* TLB miss on data read or read-modify-write */
dbg("MMU DTLB MISS accessing 0x%08x\r\n"
"FS = 0x%08x\r\n"
"MMUSR = 0x%08x\r\n"
"PC = 0x%08x\r\n",
fault_address, format_status, mmu_sr, pc);
dbg("fault = 0x%08x\r\n", fault);
mmu_map_8k_data_page(fault_address, 0);
break;
/* else issue an bus error */
default:
/* add missed page to TLB */
mmu_map_page(address, address, MMU_PAGE_SIZE_1M, &flags);
dbg("DTLB: MCF_MMU_MMUOR = %08x\r\n", MCF_MMU_MMUOR);
dbg("ITLB: MCF_MMU_MMUOR = %08x\r\n\r\n", MCF_MMU_MMUOR);
dbg("bus error\r\n");
return 1; /* signal bus error to caller */
}
#ifdef DBG_MMU
xprintf("\r\n");
#endif /* DBG_MMU */
return 0; /* signal TLB miss handled to caller */
}

View File

@@ -49,6 +49,8 @@ _rom_entry:
/* set stack pointer to end of SRAM */
lea __SUP_SP,a7
move.l #0,(sp)
subq.l #4,sp
move.l #0,(sp)
/*
* Initialize the processor caches.

View File

@@ -63,9 +63,14 @@ void xputchar(int c)
{
__asm__ __volatile__
(
".extern printf_helper\n\t"
" .extern __MBAR \n\t"
" move.b %0,d0 \n\t"
"bsr printf_helper\n\t"
".wait_txready: \n\t"
" move.w __MBAR + 0x8604,d2 \n\t" // PSCSCR0 status register
" btst #10,d2 \n\t" // space left in TX fifo?
" beq.s .wait_txready \n\t" // no, loop
" lea __MBAR + 0x860C,a0 \n\t" // PSCSTB0 transmitter buffer register
" move.b d0,(a0) \n\t" // send byte
/* output */:
/* input */: "r" (c)
/* clobber */: "d0","d2","a0","memory"

View File

@@ -1,38 +0,0 @@
/*
* printf_helper.S
*
* assembler trampoline to let printf (compiled -mpcrel) indirectly reference __MBAR
*
* This file is part of BaS_gcc.
*
* BaS_gcc is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* BaS_gcc is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with BaS_gcc. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2010 - 2012 F. Aschwanden
* Copyright 2011 - 2012 V. Riviere
* Copyright 2012 M. Froeschle
*/
.global printf_helper
printf_helper:
.extern __MBAR
.wait_txready:
move.w __MBAR+0x8604,d2 // PSCSCR0 status register
btst #10,d2 // space left in TX fifo?
beq.s .wait_txready // no, loop
lea __MBAR+0x860C,a0 // PSCSTB0 transmitter buffer register
move.b d0,(a0) // send byte
rts
// vim: set syntax=asm68k :