Add support for mips64n32

This only needs some support to save/load state with 64 bit registers.
Since pointers remain 32 bit, no extra changes are needed in the
dynarec. Verified with qemu (qemu-mipsn32el) and miniretro.
This commit is contained in:
David Guillen Fandos 2021-06-21 19:17:19 +02:00
parent e0a31952db
commit f8d4276e12
2 changed files with 50 additions and 27 deletions

View file

@ -379,6 +379,16 @@ else ifeq ($(platform), mips32)
HAVE_DYNAREC := 1 HAVE_DYNAREC := 1
CPU_ARCH := mips CPU_ARCH := mips
# MIPS64
else ifeq ($(platform), mips64n32)
TARGET := $(TARGET_NAME)_libretro.so
SHARED := -shared -nostdlib -Wl,--version-script=link.T
fpic := -fPIC -DPIC
CFLAGS += -fomit-frame-pointer -ffast-math -march=mips64 -mabi=n32 -mhard-float
CFLAGS += -fno-caller-saves
HAVE_DYNAREC := 1
CPU_ARCH := mips
# emscripten # emscripten
else ifeq ($(platform), emscripten) else ifeq ($(platform), emscripten)
TARGET := $(TARGET_NAME)_libretro_$(platform).bc TARGET := $(TARGET_NAME)_libretro_$(platform).bc

View file

@ -18,7 +18,19 @@
#include "../gpsp_config.h" #include "../gpsp_config.h"
.set mips32r2 // This is also defined in sys/asm.h but doesn't seem portable?
#ifdef __mips64
.set mips64
#define SZREG 8
#define REG_L ld
#define REG_S sd
#else
.set mips32r2
#define SZREG 4
#define REG_L lw
#define REG_S sw
#endif
.align 4 .align 4
.global mips_update_gba .global mips_update_gba
@ -117,6 +129,7 @@
.equ COMPLETED_FRAME, (32 * 4) .equ COMPLETED_FRAME, (32 * 4)
.equ OAM_UPDATED, (33 * 4) .equ OAM_UPDATED, (33 * 4)
.equ GP_SAVE, (34 * 4) .equ GP_SAVE, (34 * 4)
.equ GP_SAVE_HI, (35 * 4)
.equ SPSR_BASE, (0x100 + 0x400 * 3) .equ SPSR_BASE, (0x100 + 0x400 * 3)
.equ REGMODE_BASE, (SPSR_BASE + 24) .equ REGMODE_BASE, (SPSR_BASE + 24)
@ -190,7 +203,7 @@
sw $28, REG_R13($16) sw $28, REG_R13($16)
sw $30, REG_R14($16) sw $30, REG_R14($16)
lw $28, GP_SAVE($16) REG_L $28, GP_SAVE($16)
.endm .endm
.macro restore_registers .macro restore_registers
@ -279,20 +292,19 @@ mips_cheat_hook:
# Loads the main context and returns to it. # Loads the main context and returns to it.
# ARM regs must be saved before branching here # ARM regs must be saved before branching here
return_to_main: return_to_main:
lw $28, GP_SAVE($16) # Restore previous state REG_L $28, GP_SAVE($16) # Restore previous state
lw $s0, 0($sp) REG_L $s0, 0*SZREG($sp)
lw $s1, 4($sp) REG_L $s1, 1*SZREG($sp)
lw $s2, 8($sp) REG_L $s2, 2*SZREG($sp)
lw $s3, 12($sp) REG_L $s3, 3*SZREG($sp)
lw $s4, 16($sp) REG_L $s4, 4*SZREG($sp)
lw $s5, 20($sp) REG_L $s5, 5*SZREG($sp)
lw $s6, 24($sp) REG_L $s6, 6*SZREG($sp)
lw $s7, 28($sp) REG_L $s7, 7*SZREG($sp)
lw $fp, 32($sp) REG_L $fp, 8*SZREG($sp)
lw $ra, 36($sp) REG_L $ra, 9*SZREG($sp)
jr $ra # Return to main jr $ra # Return to main
addiu $sp, $sp, 48 # Restore stack pointer (delay slot) addiu $sp, $sp, 80 # Restore stack pointer (delay slot)
# Perform an indirect branch. # Perform an indirect branch.
@ -577,20 +589,21 @@ asr_shift_high:
# $5: pointer to reg # $5: pointer to reg
execute_arm_translate_internal: execute_arm_translate_internal:
addiu $sp, $sp, -48 # Store the main thread context
sw $s0, 0($sp) addiu $sp, $sp, -80 # Store the main thread context
sw $s1, 4($sp) REG_S $s0, 0*SZREG($sp)
sw $s2, 8($sp) REG_S $s1, 1*SZREG($sp)
sw $s3, 12($sp) REG_S $s2, 2*SZREG($sp)
sw $s4, 16($sp) REG_S $s3, 3*SZREG($sp)
sw $s5, 20($sp) REG_S $s4, 4*SZREG($sp)
sw $s6, 24($sp) REG_S $s5, 5*SZREG($sp)
sw $s7, 28($sp) REG_S $s6, 6*SZREG($sp)
sw $fp, 32($sp) REG_S $s7, 7*SZREG($sp)
sw $ra, 36($sp) REG_S $fp, 8*SZREG($sp)
REG_S $ra, 9*SZREG($sp)
move $16, $5 move $16, $5
sw $28, GP_SAVE($16) REG_S $28, GP_SAVE($16)
addu $17, $4, $0 # load cycle counter register addu $17, $4, $0 # load cycle counter register