translated more of the assembler code into C

This commit is contained in:
Markus Fröschle
2014-02-02 21:35:46 +00:00
parent 0b39fef36b
commit 9b4c437bef
5 changed files with 178 additions and 87 deletions

View File

@@ -32,7 +32,7 @@ NATIVECC=gcc
INCLUDE=-Iinclude INCLUDE=-Iinclude
CFLAGS=-mcpu=5474 \ CFLAGS=-mcpu=5474 \
-Wall \ -Wall \
-Os \ -g3 \
-fomit-frame-pointer \ -fomit-frame-pointer \
-ffreestanding \ -ffreestanding \
-fleading-underscore \ -fleading-underscore \
@@ -260,7 +260,7 @@ endif
$(1)_MAPFILE_RAM=$(1)/$$(basename $$(RAM_EXEC)).map $(1)_MAPFILE_RAM=$(1)/$$(basename $$(RAM_EXEC)).map
$(1)/$$(RAM_EXEC): $(1)/$(LIBBAS) $(LDCSRC) $(1)/$$(RAM_EXEC): $(1)/$(LIBBAS) $(LDCSRC)
$(CPP) $(INCLUDE) -DCOMPILE_RAM -DOBJDIR=$(1)/objs -P -DFORMAT_ELF=$(FORMAT_ELF) -D$$(MACHINE) $(LDCSRC) -o $(1)/$$(LDRFILE) $(CPP) $(INCLUDE) -DCOMPILE_RAM -DOBJDIR=$(1)/objs -P -DFORMAT_ELF=$(FORMAT_ELF) -D$$(MACHINE) $(LDCSRC) -o $(1)/$$(LDRFILE)
$(LD) --oformat $$(FORMAT) -Map $$($(1)_MAPFILE_RAM) --cref -T $(1)/$$(LDRFILE) -o $$@ $(LD) -g --oformat $$(FORMAT) -Map $$($(1)_MAPFILE_RAM) --cref -T $(1)/$$(LDRFILE) -o $$@
ifeq ($(COMPILE_ELF),Y) ifeq ($(COMPILE_ELF),Y)
$(OBJCOPY) -O srec $$@ $$(basename $$@).s19 $(OBJCOPY) -O srec $$@ $$(basename $$@).s19
else else

View File

@@ -179,6 +179,7 @@ SECTIONS
#else #else
__FASTRAM_END = TARGET_ADDRESS; __FASTRAM_END = TARGET_ADDRESS;
#endif #endif
__FASTRAM_SIZE = __FASTRAM_END - __FASTRAM;
/* Init CS0 (BootFLASH @ E000_0000 - E07F_FFFF 8Mbytes) */ /* Init CS0 (BootFLASH @ E000_0000 - E07F_FFFF 8Mbytes) */
___BOOT_FLASH = BOOTFLASH_BASE_ADDRESS; ___BOOT_FLASH = BOOTFLASH_BASE_ADDRESS;

View File

@@ -20,6 +20,8 @@
* Author: Markus Fröschle * Author: Markus Fröschle
*/ */
#define DBG_EXC
#include "startcf.h" #include "startcf.h"
#if MACHINE_FIREBEE #if MACHINE_FIREBEE
#include "firebee.h" #include "firebee.h"
@@ -388,6 +390,9 @@ reset_vector:
// This is either a "classic" bus error or the MMU hit a "legal" page not yet mapped. // This is either a "classic" bus error or the MMU hit a "legal" page not yet mapped.
// //
access_exception: access_exception:
move.w #0x2700,sr // avoid us being interrupted by the video handler
// (this would probably overwrite the MMUAR register)
// save gcc scratch registers, others will be handled by called function // save gcc scratch registers, others will be handled by called function
lea -4*4(sp),sp lea -4*4(sp),sp
movem.l d0-d1/a0-a1,(sp) movem.l d0-d1/a0-a1,(sp)
@@ -501,8 +506,40 @@ irq7text:
irq5: irq5:
irq 0x74,5,0x20 irq 0x74,5,0x20
.extern _irq6_interrupt_handler // highlevel C handler
irq6: // MFP interrupt from FPGA irq6: // MFP interrupt from FPGA
move.w #0x2700,sr // disable interrupt move.w #0x2700,sr // disable interrupts
lea -4 * 4(sp),sp // save gcc scratch registers
movem.l d0-d1/a0-a1,(sp)
move.l 4 * 4(sp),-(sp) // push original exception stack frame
move.l 5 * 4(sp),-(sp)
jsr _irq6_interrupt_handler // call highlevel C handler
lea.l 2 * 4(sp),sp
tst.l d0 // completely handled?
movem.l (sp),d0-d1/a0-a1 // restore registers saved above
lea 4 * 4(sp),sp // adjust stack
beq irq6_os // call OS handler
rte
irq6_os: // call native OS irq6 handler
move.l a5,-(sp) // save registers: TODO: this could be done more effective
move.l d0,-(sp)
move.l 0xf0020000,a5 // fetch vector
add.l _rt_vbr,a5 // add vector base
move.l (a5),d0 // fetch handler
move.l 4(sp),a5 // restore a5
move.l d0,4(sp) // prepare indirect return
move.l (sp)+,d0 // restore d0
move.w #0x2600,sr // set interrupt mask
rts
#ifdef _NOT_USED_
subq.l #8,a7 subq.l #8,a7
movem.l d0/a5,(a7) // save registers movem.l d0/a5,(a7) // save registers
@@ -720,6 +757,8 @@ acsi_dma_end:
move.l (a7)+,d1 move.l (a7)+,d1
move.l (a7)+,a1 move.l (a7)+,a1
rts rts
#endif /* _NOT_USED_ */
/* /*
* irq 7 = pseudo bus error * irq 7 = pseudo bus error
*/ */
@@ -762,7 +801,7 @@ handler_psc3:
/* /*
* general purpose timer 0 (GPT0): video change, later also others. GPT0 is used as * general purpose timer 0 (GPT0): video change, later also others. GPT0 is used as
* input trigger. It is connected to the TIN0 signal of the FPGA and triggers everytime * input trigger. It is connected to the TIN0 signal of the FPGA which triggers it everytime
* vbasehi is written to, i.e. when the video base address gets changed * vbasehi is written to, i.e. when the video base address gets changed
*/ */
handler_gpt0: handler_gpt0:
@@ -781,6 +820,7 @@ handler_gpt0:
lea MCF_SLT0_SCNT,a0 lea MCF_SLT0_SCNT,a0
move.l (a0),_video_sbt // save time move.l (a0),_video_sbt // save time
bra video_chg_end
// FIXME: don't we need to get out here? // FIXME: don't we need to get out here?
sca_other: sca_other:
@@ -809,11 +849,12 @@ video_copy_data:
add.l #0x60000000,a1 add.l #0x60000000,a1
move.l #0x10000,d4 // whole page move.l #0x10000,d4 // whole page
#define _DO_CPU_COPY
#ifndef _DO_CPU_COPY #ifndef _DO_CPU_COPY
// experiment: do video page copy using Coldfire DMA // experiment: do video page copy using Coldfire DMA
lea -15 * 4(sp),sp lea -4 * 4(sp),sp
movem.l d0-d1/a0-a1,(sp) // save gcc scratch registers movem.l d0-d1/a0-a1,(sp) // save gcc scratch registers
clr.l -(sp) // no special functions clr.l -(sp) // no special functions
@@ -838,7 +879,7 @@ video_copy_data:
bne .wait_dma_finished bne .wait_dma_finished
movem.l (sp),d0-d1/a0-a1 // restore gcc scratch registers movem.l (sp),d0-d1/a0-a1 // restore gcc scratch registers
lea 15 * 4(sp),sp // adjust stack lea 4 * 4(sp),sp // adjust stack
#else #else

View File

@@ -234,3 +234,23 @@ void pic_interrupt_handler(void)
} }
} }
void video_addr_timeout(void)
{
dbg("%s:\r\n", __FUNCTION__);
}
extern int32_t video_sbt;
bool irq6_interrupt_handler(uint32_t sf1, uint32_t sf2)
{
MCF_EPORT_EPFR = 0x40; /* clear int6 from edge port */
dbg("%s: irq6!\r\n", __FUNCTION__);
if (video_sbt != 0 && (video_sbt - 0x70000000) > MCF_SLT0_SCNT)
{
video_addr_timeout();
}
return false;
}

View File

@@ -169,70 +169,6 @@ inline uint32_t set_mmubar(uint32_t value)
return ret; return ret;
} }
void mmu_init(void)
{
extern uint8_t _MMUBAR[];
uint32_t MMUBAR = (uint32_t) &_MMUBAR[0];
set_asid(0); /* do not use address extension (ASID provides virtual 48 bit addresses */
/*
* need to set data ACRs in a way that supervisor access to all memory regions
* becomes possible. Otherways it might be that the supervisor stack ends up in an unmapped
* region when further MMU TLB entries force a page steal. This would lead to a double
* fault since the CPU wouldn't be able to push its exception stack frame during an access
* exception
*/
/* set data access attributes in ACR0 and ACR1 */
set_acr0(ACR_WRITE_PROTECT(0) | /* read and write accesses permitted */
ACR_SUPERVISOR_PROTECT(0) | /* supervisor and user mode access permitted */
ACR_CACHE_MODE(CACHE_WRITETHROUGH) | /* cacheable, write through */
ACR_ADDRESS_MASK_MODE(1) | /* region 13 MByte */
ACR_S(ACR_S_SUPERVISOR_MODE) | /* memory only visible from supervisor mode */
ACR_E(1) | /* enable ACR */
ACR_ADMSK(0x0d) | /* cover 12 MByte from 0x0 */
ACR_BA(0)); /* start from 0x0 */
set_acr1(ACR_WRITE_PROTECT(0) | /* read and write accesses permitted */
ACR_SUPERVISOR_PROTECT(0) | /* supervisor and user mode access permitted */
ACR_CACHE_MODE(CACHE_WRITETHROUGH) | /* cacheable, write through */
ACR_ADDRESS_MASK_MODE(0) | /* region > 16 MByte */
ACR_S(ACR_S_SUPERVISOR_MODE) | /* memory only visible from supervisor mode */
ACR_E(1) | /* enable ACR */
ACR_ADMSK(0x1f) | /* cover 495 MByte from 0x1000000 */
ACR_BA(0x01000000)); /* all Fast RAM */
/*
* set instruction access attributes in ACR2 and ACR3. This is the same as above, basically:
* enable supervisor access to all SDRAM
*/
set_acr2(ACR_WRITE_PROTECT(0) |
ACR_SUPERVISOR_PROTECT(0) |
ACR_CACHE_MODE(CACHE_WRITETHROUGH) |
ACR_ADDRESS_MASK_MODE(1) |
ACR_S(ACR_S_SUPERVISOR_MODE) |
ACR_E(1) |
ACR_ADMSK(0x0c) |
ACR_BA(0x0));
set_acr3(ACR_WRITE_PROTECT(0) |
ACR_SUPERVISOR_PROTECT(0) |
ACR_CACHE_MODE(CACHE_WRITETHROUGH) |
ACR_ADDRESS_MASK_MODE(0) |
ACR_S(ACR_S_SUPERVISOR_MODE) |
ACR_E(1) |
ACR_ADMSK(0x1f) |
ACR_BA(0x0f));
set_mmubar(MMUBAR + 1); /* set and enable MMUBAR */
/* clear all MMU TLB entries */
MCF_MMU_MMUOR = MCF_MMU_MMUOR_CA;
}
/* /*
@@ -241,6 +177,7 @@ void mmu_init(void)
extern uint8_t _SYS_SRAM[]; extern uint8_t _SYS_SRAM[];
#define SYS_SRAM_ADDRESS ((uint32_t) &_SYS_SRAM[0]) #define SYS_SRAM_ADDRESS ((uint32_t) &_SYS_SRAM[0])
extern uint8_t _SYS_SRAM_SIZE[]; extern uint8_t _SYS_SRAM_SIZE[];
extern uint8_t _FASTRAM_END[];
struct mmu_mapping struct mmu_mapping
{ {
@@ -251,6 +188,20 @@ struct mmu_mapping
struct map_flags flags; struct map_flags flags;
}; };
static struct mmu_mapping locked_map[] =
{
{
/* Falcon video memory. Needs special care */
0xd00000,
0x60d00000,
0x100000,
MMU_PAGE_SIZE_1M,
{ CACHE_WRITETHROUGH, SV_USER, SCA_PAGE_ID, ACCESS_READ | ACCESS_WRITE | ACCESS_EXECUTE },
},
};
static int num_locked_mmu_maps = sizeof(locked_map) / sizeof(struct mmu_mapping);
static struct mmu_mapping memory_map[] = static struct mmu_mapping memory_map[] =
{ {
/* map system vectors supervisor-protected */ /* map system vectors supervisor-protected */
@@ -284,14 +235,7 @@ static struct mmu_mapping memory_map[] =
MMU_PAGE_SIZE_1M, MMU_PAGE_SIZE_1M,
{ CACHE_WRITETHROUGH, SV_USER, 0, ACCESS_READ | ACCESS_WRITE | ACCESS_EXECUTE }, { CACHE_WRITETHROUGH, SV_USER, 0, ACCESS_READ | ACCESS_WRITE | ACCESS_EXECUTE },
}, },
{ /* Falcon video ram left out intentionally here (see above) */
/* Falcon video memory. Needs special care */
0xd00000,
0x60d00000,
0x100000,
MMU_PAGE_SIZE_8K,
{ CACHE_WRITETHROUGH, SV_USER, SCA_PAGE_ID, ACCESS_READ | ACCESS_WRITE | ACCESS_EXECUTE },
},
{ {
/* ROM */ /* ROM */
0xe00000, 0xe00000,
@@ -300,6 +244,14 @@ static struct mmu_mapping memory_map[] =
MMU_PAGE_SIZE_1M, MMU_PAGE_SIZE_1M,
{ CACHE_WRITETHROUGH, SV_USER, 0, ACCESS_READ | ACCESS_EXECUTE }, { CACHE_WRITETHROUGH, SV_USER, 0, ACCESS_READ | ACCESS_EXECUTE },
}, },
{
/* FASTRAM */
0x1000000,
0x1000000,
_FASTRAM_END - 0x1000000,
MMU_PAGE_SIZE_1M,
{ CACHE_WRITETHROUGH, SV_USER, 0, ACCESS_READ | ACCESS_WRITE | ACCESS_EXECUTE },
},
{ {
/* MBAR */ /* MBAR */
MBAR_ADDRESS, MBAR_ADDRESS,
@@ -376,6 +328,84 @@ static struct mmu_mapping *lookup_mapping(uint32_t virt)
return NULL; return NULL;
} }
void mmu_init(void)
{
extern uint8_t _MMUBAR[];
uint32_t MMUBAR = (uint32_t) &_MMUBAR[0];
int i;
set_asid(0); /* do not use address extension (ASID provides virtual 48 bit addresses */
/*
* need to set data ACRs in a way that supervisor access to all memory regions
* becomes possible. Otherways it might be that the supervisor stack ends up in an unmapped
* region when further MMU TLB entries force a page steal. This would lead to a double
* fault since the CPU wouldn't be able to push its exception stack frame during an access
* exception
*/
/* set data access attributes in ACR0 and ACR1 */
set_acr0(ACR_WRITE_PROTECT(0) | /* read and write accesses permitted */
ACR_SUPERVISOR_PROTECT(0) | /* supervisor and user mode access permitted */
ACR_CACHE_MODE(CACHE_WRITETHROUGH) | /* cacheable, write through */
ACR_ADDRESS_MASK_MODE(1) | /* region 13 MByte */
ACR_S(ACR_S_SUPERVISOR_MODE) | /* memory only visible from supervisor mode */
ACR_E(1) | /* enable ACR */
ACR_ADMSK(0x0a) | /* cover 12 MByte from 0x0 */
ACR_BA(0)); /* start from 0x0 */
set_acr1(ACR_WRITE_PROTECT(0) | /* read and write accesses permitted */
ACR_SUPERVISOR_PROTECT(0) | /* supervisor and user mode access permitted */
ACR_CACHE_MODE(CACHE_WRITETHROUGH) | /* cacheable, write through */
ACR_ADDRESS_MASK_MODE(0) | /* region > 16 MByte */
ACR_S(ACR_S_SUPERVISOR_MODE) | /* memory only visible from supervisor mode */
ACR_E(1) | /* enable ACR */
ACR_ADMSK(0x1f) | /* cover 495 MByte from 0x1000000 */
ACR_BA(0x01000000)); /* all Fast RAM */
/*
* set instruction access attributes in ACR2 and ACR3. This is the same as above, basically:
* enable supervisor access to all SDRAM
*/
set_acr2(ACR_WRITE_PROTECT(0) |
ACR_SUPERVISOR_PROTECT(0) |
ACR_CACHE_MODE(CACHE_WRITETHROUGH) |
ACR_ADDRESS_MASK_MODE(1) |
ACR_S(ACR_S_SUPERVISOR_MODE) |
ACR_E(1) |
ACR_ADMSK(0x0c) |
ACR_BA(0x0));
set_acr3(ACR_WRITE_PROTECT(0) |
ACR_SUPERVISOR_PROTECT(0) |
ACR_CACHE_MODE(CACHE_WRITETHROUGH) |
ACR_ADDRESS_MASK_MODE(0) |
ACR_S(ACR_S_SUPERVISOR_MODE) |
ACR_E(1) |
ACR_ADMSK(0x1f) |
ACR_BA(0x0f));
set_mmubar(MMUBAR + 1); /* set and enable MMUBAR */
/* clear all MMU TLB entries */
MCF_MMU_MMUOR = MCF_MMU_MMUOR_CA;
/* map locked TLB entries */
for (i = 0; i < num_locked_mmu_maps; i++)
{
mmu_map_page(locked_map[i].virt, locked_map[i].phys, locked_map->pagesize, locked_map->flags);
if (locked_map[i].flags.page_id == SCA_PAGE_ID)
{
video_tlb = 0x2000;
video_sbt = 0x0;
}
}
}
/* /*
* handle an access error * handle an access error
* upper level routine called from access_exception inside exceptions.S * upper level routine called from access_exception inside exceptions.S
@@ -384,11 +414,13 @@ bool access_exception(uint32_t pc, uint32_t format_status)
{ {
int fault_status; int fault_status;
uint32_t fault_address; uint32_t fault_address;
uint32_t mmu_status;
/* /*
* extract fault status from format_status exception stack field * extract fault status from format_status exception stack field
*/ */
fault_status = format_status & 0xc030000; fault_status = format_status & 0xc030000;
mmu_status = MCF_MMU_MMUSR;
/* /*
* determine if access fault was caused by a TLB miss * determine if access fault was caused by a TLB miss
@@ -397,8 +429,11 @@ bool access_exception(uint32_t pc, uint32_t format_status)
{ {
case 0x4010000: /* TLB miss on opword of instruction fetch */ case 0x4010000: /* TLB miss on opword of instruction fetch */
case 0x4020000: /* TLB miss on extension word of instruction fetch */ case 0x4020000: /* TLB miss on extension word of instruction fetch */
fault_address = pc;
break;
case 0x8020000: /* TLB miss on data write */ case 0x8020000: /* TLB miss on data write */
case 0xc020000: /* TLB miss on data read or read-modify-write */ case 0xc020000: /* TLB miss on data read or read-modify-write */
fault_address = MCF_MMU_MMUAR;
//dbg("%s: access fault - TLB miss at %p. Fault status = 0x0%x\r\n", __FUNCTION__, pc, fault_status); //dbg("%s: access fault - TLB miss at %p. Fault status = 0x0%x\r\n", __FUNCTION__, pc, fault_status);
break; break;
@@ -406,7 +441,8 @@ bool access_exception(uint32_t pc, uint32_t format_status)
return false; return false;
} }
if (MCF_MMU_MMUSR & 2) /* did the last fault hit in TLB? */
if (mmu_status & MCF_MMU_MMUSR_HIT) /* did the last fault hit in TLB? */
{ {
/* /*
* if yes, then we already mapped that page during a previous turn and this is in fact a bus error * if yes, then we already mapped that page during a previous turn and this is in fact a bus error
@@ -417,7 +453,6 @@ bool access_exception(uint32_t pc, uint32_t format_status)
{ {
struct mmu_mapping *map; struct mmu_mapping *map;
fault_address = MCF_MMU_MMUAR;
if ((map = lookup_mapping(fault_address)) != NULL) if ((map = lookup_mapping(fault_address)) != NULL)
{ {
@@ -439,13 +474,7 @@ bool access_exception(uint32_t pc, uint32_t format_status)
break; break;
} }
mmu_map_page(map->phys & mask, map->virt & mask, map->pagesize, map->flags); mmu_map_page(fault_address & mask, fault_address & mask, map->pagesize, map->flags);
if (map->flags.page_id == SCA_PAGE_ID)
{
video_tlb = 0x2000;
video_sbt = 0x0;
}
return true; return true;
} }
} }