1
0
mirror of https://github.com/golang/go synced 2024-11-11 23:20:24 -07:00

runtime: add support for linux/riscv64

Based on riscv-go port.

Updates #27532

Change-Id: If522807a382130be3c8d40f4b4c1131d1de7c9e3
Reviewed-on: https://go-review.googlesource.com/c/go/+/204632
Run-TryBot: Joel Sing <joel@sing.id.au>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
This commit is contained in:
Joel Sing 2019-11-04 04:58:37 +11:00
parent cbaa666682
commit 8e0be05ec7
20 changed files with 2090 additions and 3 deletions

670
src/runtime/asm_riscv64.s Normal file
View File

@ -0,0 +1,670 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "go_asm.h"
#include "funcdata.h"
#include "textflag.h"
// func rt0_go()
TEXT runtime·rt0_go(SB),NOSPLIT,$0
// X2 = stack; A0 = argc; A1 = argv
ADD $-24, X2
MOV A0, 8(X2) // argc
MOV A1, 16(X2) // argv
// create istack out of the given (operating system) stack.
// _cgo_init may update stackguard.
MOV $runtime·g0(SB), g
MOV $(-64*1024), T0
ADD T0, X2, T1
MOV T1, g_stackguard0(g)
MOV T1, g_stackguard1(g)
MOV T1, (g_stack+stack_lo)(g)
MOV X2, (g_stack+stack_hi)(g)
// if there is a _cgo_init, call it using the gcc ABI.
MOV _cgo_init(SB), T0
BEQ T0, ZERO, nocgo
MOV ZERO, A3 // arg 3: not used
MOV ZERO, A2 // arg 2: not used
MOV $setg_gcc<>(SB), A1 // arg 1: setg
MOV g, A0 // arg 0: G
JALR RA, T0
nocgo:
// update stackguard after _cgo_init
MOV (g_stack+stack_lo)(g), T0
ADD $const__StackGuard, T0
MOV T0, g_stackguard0(g)
MOV T0, g_stackguard1(g)
// set the per-goroutine and per-mach "registers"
MOV $runtime·m0(SB), T0
// save m->g0 = g0
MOV g, m_g0(T0)
// save m0 to g0->m
MOV T0, g_m(g)
CALL runtime·check(SB)
// args are already prepared
CALL runtime·args(SB)
CALL runtime·osinit(SB)
CALL runtime·schedinit(SB)
// create a new goroutine to start program
MOV $runtime·mainPC(SB), T0 // entry
ADD $-24, X2
MOV T0, 16(X2)
MOV ZERO, 8(X2)
MOV ZERO, 0(X2)
CALL runtime·newproc(SB)
ADD $24, X2
// start this M
CALL runtime·mstart(SB)
WORD $0 // crash if reached
RET
// void setg_gcc(G*); set g called from gcc with g in A0
TEXT setg_gcc<>(SB),NOSPLIT,$0-0
MOV A0, g
CALL runtime·save_g(SB)
RET
// func cputicks() int64
TEXT runtime·cputicks(SB),NOSPLIT,$0-8
WORD $0xc0102573 // rdtime a0
MOV A0, ret+0(FP)
RET
// systemstack_switch is a dummy routine that systemstack leaves at the bottom
// of the G stack. We need to distinguish the routine that
// lives at the bottom of the G stack from the one that lives
// at the top of the system stack because the one at the top of
// the system stack terminates the stack walk (see topofstack()).
TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0
UNDEF
JALR RA, ZERO // make sure this function is not leaf
RET
// func systemstack(fn func())
TEXT runtime·systemstack(SB), NOSPLIT, $0-8
MOV fn+0(FP), CTXT // CTXT = fn
MOV g_m(g), T0 // T0 = m
MOV m_gsignal(T0), T1 // T1 = gsignal
BEQ g, T1, noswitch
MOV m_g0(T0), T1 // T1 = g0
BEQ g, T1, noswitch
MOV m_curg(T0), T2
BEQ g, T2, switch
// Bad: g is not gsignal, not g0, not curg. What is it?
// Hide call from linker nosplit analysis.
MOV $runtime·badsystemstack(SB), T1
JALR RA, T1
switch:
// save our state in g->sched. Pretend to
// be systemstack_switch if the G stack is scanned.
MOV $runtime·systemstack_switch(SB), T2
ADD $8, T2 // get past prologue
MOV T2, (g_sched+gobuf_pc)(g)
MOV X2, (g_sched+gobuf_sp)(g)
MOV ZERO, (g_sched+gobuf_lr)(g)
MOV g, (g_sched+gobuf_g)(g)
// switch to g0
MOV T1, g
CALL runtime·save_g(SB)
MOV (g_sched+gobuf_sp)(g), T0
// make it look like mstart called systemstack on g0, to stop traceback
ADD $-8, T0
MOV $runtime·mstart(SB), T1
MOV T1, 0(T0)
MOV T0, X2
// call target function
MOV 0(CTXT), T1 // code pointer
JALR RA, T1
// switch back to g
MOV g_m(g), T0
MOV m_curg(T0), g
CALL runtime·save_g(SB)
MOV (g_sched+gobuf_sp)(g), X2
MOV ZERO, (g_sched+gobuf_sp)(g)
RET
noswitch:
// already on m stack, just call directly
// Using a tail call here cleans up tracebacks since we won't stop
// at an intermediate systemstack.
MOV 0(CTXT), T1 // code pointer
ADD $8, X2
JMP (T1)
TEXT runtime·getcallerpc(SB),NOSPLIT|NOFRAME,$0-8
MOV 0(X2), T0 // LR saved by caller
MOV T0, ret+0(FP)
RET
/*
* support for morestack
*/
// Called during function prolog when more stack is needed.
// Caller has already loaded:
// R1: framesize, R2: argsize, R3: LR
//
// The traceback routines see morestack on a g0 as being
// the top of a stack (for example, morestack calling newstack
// calling the scheduler calling newm calling gc), so we must
// record an argument size. For that purpose, it has no arguments.
// func morestack()
TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0
// Cannot grow scheduler stack (m->g0).
MOV g_m(g), A0
MOV m_g0(A0), A1
BNE g, A1, 3(PC)
CALL runtime·badmorestackg0(SB)
CALL runtime·abort(SB)
// Cannot grow signal stack (m->gsignal).
MOV m_gsignal(A0), A1
BNE g, A1, 3(PC)
CALL runtime·badmorestackgsignal(SB)
CALL runtime·abort(SB)
// Called from f.
// Set g->sched to context in f.
MOV X2, (g_sched+gobuf_sp)(g)
MOV T0, (g_sched+gobuf_pc)(g)
MOV RA, (g_sched+gobuf_lr)(g)
MOV CTXT, (g_sched+gobuf_ctxt)(g)
// Called from f.
// Set m->morebuf to f's caller.
MOV RA, (m_morebuf+gobuf_pc)(A0) // f's caller's PC
MOV X2, (m_morebuf+gobuf_sp)(A0) // f's caller's SP
MOV g, (m_morebuf+gobuf_g)(A0)
// Call newstack on m->g0's stack.
MOV m_g0(A0), g
CALL runtime·save_g(SB)
MOV (g_sched+gobuf_sp)(g), X2
// Create a stack frame on g0 to call newstack.
MOV ZERO, -8(X2) // Zero saved LR in frame
ADD $-8, X2
CALL runtime·newstack(SB)
// Not reached, but make sure the return PC from the call to newstack
// is still in this function, and not the beginning of the next.
UNDEF
// func morestack_noctxt()
TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0
MOV ZERO, CTXT
JMP runtime·morestack(SB)
// AES hashing not implemented for riscv64
TEXT runtime·memhash(SB),NOSPLIT|NOFRAME,$0-32
JMP runtime·memhashFallback(SB)
TEXT runtime·strhash(SB),NOSPLIT|NOFRAME,$0-24
JMP runtime·strhashFallback(SB)
TEXT runtime·memhash32(SB),NOSPLIT|NOFRAME,$0-24
JMP runtime·memhash32Fallback(SB)
TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-24
JMP runtime·memhash64Fallback(SB)
// func return0()
TEXT runtime·return0(SB), NOSPLIT, $0
MOV $0, A0
RET
// restore state from Gobuf; longjmp
// func gogo(buf *gobuf)
TEXT runtime·gogo(SB), NOSPLIT, $16-8
MOV buf+0(FP), T0
MOV gobuf_g(T0), g // make sure g is not nil
CALL runtime·save_g(SB)
MOV (g), ZERO // make sure g is not nil
MOV gobuf_sp(T0), X2
MOV gobuf_lr(T0), RA
MOV gobuf_ret(T0), A0
MOV gobuf_ctxt(T0), CTXT
MOV ZERO, gobuf_sp(T0)
MOV ZERO, gobuf_ret(T0)
MOV ZERO, gobuf_lr(T0)
MOV ZERO, gobuf_ctxt(T0)
MOV gobuf_pc(T0), T0
JALR ZERO, T0
// func jmpdefer(fv *funcval, argp uintptr)
// called from deferreturn
// 1. grab stored return address from the caller's frame
// 2. sub 12 bytes to get back to JAL deferreturn
// 3. JMP to fn
// TODO(sorear): There are shorter jump sequences. This function will need to be updated when we use them.
TEXT runtime·jmpdefer(SB), NOSPLIT|NOFRAME, $0-16
MOV 0(X2), RA
ADD $-12, RA
MOV fv+0(FP), CTXT
MOV argp+8(FP), X2
ADD $-8, X2
MOV 0(CTXT), T0
JALR ZERO, T0
// func procyield(cycles uint32)
TEXT runtime·procyield(SB),NOSPLIT,$0-0
RET
// Switch to m->g0's stack, call fn(g).
// Fn must never return. It should gogo(&g->sched)
// to keep running g.
// func mcall(fn func(*g))
TEXT runtime·mcall(SB), NOSPLIT|NOFRAME, $0-8
// Save caller state in g->sched
MOV X2, (g_sched+gobuf_sp)(g)
MOV RA, (g_sched+gobuf_pc)(g)
MOV ZERO, (g_sched+gobuf_lr)(g)
MOV g, (g_sched+gobuf_g)(g)
// Switch to m->g0 & its stack, call fn.
MOV g, T0
MOV g_m(g), T1
MOV m_g0(T1), g
CALL runtime·save_g(SB)
BNE g, T0, 2(PC)
JMP runtime·badmcall(SB)
MOV fn+0(FP), CTXT // context
MOV 0(CTXT), T1 // code pointer
MOV (g_sched+gobuf_sp)(g), X2 // sp = m->g0->sched.sp
ADD $-16, X2
MOV T0, 8(X2)
MOV ZERO, 0(X2)
JALR RA, T1
JMP runtime·badmcall2(SB)
// func gosave(buf *gobuf)
// save state in Gobuf; setjmp
TEXT runtime·gosave(SB), NOSPLIT|NOFRAME, $0-8
MOV buf+0(FP), T1
MOV X2, gobuf_sp(T1)
MOV RA, gobuf_pc(T1)
MOV g, gobuf_g(T1)
MOV ZERO, gobuf_lr(T1)
MOV ZERO, gobuf_ret(T1)
// Assert ctxt is zero. See func save.
MOV gobuf_ctxt(T1), T1
BEQ T1, ZERO, 2(PC)
CALL runtime·badctxt(SB)
RET
// func asmcgocall(fn, arg unsafe.Pointer) int32
TEXT ·asmcgocall(SB),NOSPLIT,$0-20
// TODO(jsing): Add support for cgo - issue #36641.
WORD $0 // crash
// func asminit()
TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0
RET
// reflectcall: call a function with the given argument list
// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32).
// we don't have variable-sized frames, so we use a small number
// of constant-sized-frame functions to encode a few bits of size in the pc.
// Caution: ugly multiline assembly macros in your future!
#define DISPATCH(NAME,MAXSIZE) \
MOV $MAXSIZE, T1 \
BLTU T1, T0, 3(PC) \
MOV $NAME(SB), T2; \
JALR ZERO, T2
// Note: can't just "BR NAME(SB)" - bad inlining results.
// func call(argtype *rtype, fn, arg unsafe.Pointer, n uint32, retoffset uint32)
TEXT reflect·call(SB), NOSPLIT, $0-0
JMP ·reflectcall(SB)
// func reflectcall(argtype *_type, fn, arg unsafe.Pointer, argsize uint32, retoffset uint32)
TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32
MOVWU argsize+24(FP), T0
DISPATCH(runtime·call32, 32)
DISPATCH(runtime·call64, 64)
DISPATCH(runtime·call128, 128)
DISPATCH(runtime·call256, 256)
DISPATCH(runtime·call512, 512)
DISPATCH(runtime·call1024, 1024)
DISPATCH(runtime·call2048, 2048)
DISPATCH(runtime·call4096, 4096)
DISPATCH(runtime·call8192, 8192)
DISPATCH(runtime·call16384, 16384)
DISPATCH(runtime·call32768, 32768)
DISPATCH(runtime·call65536, 65536)
DISPATCH(runtime·call131072, 131072)
DISPATCH(runtime·call262144, 262144)
DISPATCH(runtime·call524288, 524288)
DISPATCH(runtime·call1048576, 1048576)
DISPATCH(runtime·call2097152, 2097152)
DISPATCH(runtime·call4194304, 4194304)
DISPATCH(runtime·call8388608, 8388608)
DISPATCH(runtime·call16777216, 16777216)
DISPATCH(runtime·call33554432, 33554432)
DISPATCH(runtime·call67108864, 67108864)
DISPATCH(runtime·call134217728, 134217728)
DISPATCH(runtime·call268435456, 268435456)
DISPATCH(runtime·call536870912, 536870912)
DISPATCH(runtime·call1073741824, 1073741824)
MOV $runtime·badreflectcall(SB), T2
JALR ZERO, T2
#define CALLFN(NAME,MAXSIZE) \
TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \
NO_LOCAL_POINTERS; \
/* copy arguments to stack */ \
MOV arg+16(FP), A1; \
MOVWU argsize+24(FP), A2; \
MOV X2, A3; \
ADD $8, A3; \
ADD A3, A2; \
BEQ A3, A2, 6(PC); \
MOVBU (A1), A4; \
ADD $1, A1; \
MOVB A4, (A3); \
ADD $1, A3; \
JMP -5(PC); \
/* call function */ \
MOV f+8(FP), CTXT; \
MOV (CTXT), A4; \
PCDATA $PCDATA_StackMapIndex, $0; \
JALR RA, A4; \
/* copy return values back */ \
MOV argtype+0(FP), A5; \
MOV arg+16(FP), A1; \
MOVWU n+24(FP), A2; \
MOVWU retoffset+28(FP), A4; \
ADD $8, X2, A3; \
ADD A4, A3; \
ADD A4, A1; \
SUB A4, A2; \
CALL callRet<>(SB); \
RET
// callRet copies return values back at the end of call*. This is a
// separate function so it can allocate stack space for the arguments
// to reflectcallmove. It does not follow the Go ABI; it expects its
// arguments in registers.
TEXT callRet<>(SB), NOSPLIT, $32-0
MOV A5, 8(X2)
MOV A1, 16(X2)
MOV A3, 24(X2)
MOV A2, 32(X2)
CALL runtime·reflectcallmove(SB)
RET
CALLFN(·call16, 16)
CALLFN(·call32, 32)
CALLFN(·call64, 64)
CALLFN(·call128, 128)
CALLFN(·call256, 256)
CALLFN(·call512, 512)
CALLFN(·call1024, 1024)
CALLFN(·call2048, 2048)
CALLFN(·call4096, 4096)
CALLFN(·call8192, 8192)
CALLFN(·call16384, 16384)
CALLFN(·call32768, 32768)
CALLFN(·call65536, 65536)
CALLFN(·call131072, 131072)
CALLFN(·call262144, 262144)
CALLFN(·call524288, 524288)
CALLFN(·call1048576, 1048576)
CALLFN(·call2097152, 2097152)
CALLFN(·call4194304, 4194304)
CALLFN(·call8388608, 8388608)
CALLFN(·call16777216, 16777216)
CALLFN(·call33554432, 33554432)
CALLFN(·call67108864, 67108864)
CALLFN(·call134217728, 134217728)
CALLFN(·call268435456, 268435456)
CALLFN(·call536870912, 536870912)
CALLFN(·call1073741824, 1073741824)
// func goexit(neverCallThisFunction)
// The top-most function running on a goroutine
// returns to goexit+PCQuantum.
TEXT runtime·goexit(SB),NOSPLIT|NOFRAME,$0-0
MOV ZERO, ZERO // NOP
JMP runtime·goexit1(SB) // does not return
// traceback from goexit1 must hit code range of goexit
MOV ZERO, ZERO // NOP
// func cgocallback_gofunc(fv uintptr, frame uintptr, framesize, ctxt uintptr)
TEXT ·cgocallback_gofunc(SB),NOSPLIT,$24-32
// TODO(jsing): Add support for cgo - issue #36641.
WORD $0 // crash
TEXT runtime·breakpoint(SB),NOSPLIT|NOFRAME,$0-0
EBREAK
RET
TEXT runtime·abort(SB),NOSPLIT|NOFRAME,$0-0
EBREAK
RET
// void setg(G*); set g. for use by needm.
TEXT runtime·setg(SB), NOSPLIT, $0-8
MOV gg+0(FP), g
// This only happens if iscgo, so jump straight to save_g
CALL runtime·save_g(SB)
RET
TEXT ·checkASM(SB),NOSPLIT,$0-1
MOV $1, T0
MOV T0, ret+0(FP)
RET
// gcWriteBarrier performs a heap pointer write and informs the GC.
//
// gcWriteBarrier does NOT follow the Go ABI. It takes two arguments:
// - T0 is the destination of the write
// - T1 is the value being written at T0.
// It clobbers R30 (the linker temp register - REG_TMP).
// The act of CALLing gcWriteBarrier will clobber RA (LR).
// It does not clobber any other general-purpose registers,
// but may clobber others (e.g., floating point registers).
TEXT runtime·gcWriteBarrier(SB),NOSPLIT,$296
// Save the registers clobbered by the fast path.
MOV A0, 280(X2)
MOV A1, 288(X2)
MOV g_m(g), A0
MOV m_p(A0), A0
MOV (p_wbBuf+wbBuf_next)(A0), A1
// Increment wbBuf.next position.
ADD $16, A1
MOV A1, (p_wbBuf+wbBuf_next)(A0)
MOV (p_wbBuf+wbBuf_end)(A0), A0
MOV A0, T6 // T6 is linker temp register (REG_TMP)
// Record the write.
MOV T1, -16(A1) // Record value
MOV (T0), A0 // TODO: This turns bad writes into bad reads.
MOV A0, -8(A1) // Record *slot
// Is the buffer full?
BEQ A1, T6, flush
ret:
MOV 280(X2), A0
MOV 288(X2), A1
// Do the write.
MOV T1, (T0)
RET
flush:
// Save all general purpose registers since these could be
// clobbered by wbBufFlush and were not saved by the caller.
MOV T0, 8(X2) // Also first argument to wbBufFlush
MOV T1, 16(X2) // Also second argument to wbBufFlush
// TODO: Optimise
// R3 is g.
// R4 already saved (T0)
// R5 already saved (T1)
// R9 already saved (A0)
// R10 already saved (A1)
// R30 is tmp register.
MOV X0, 24(X2)
MOV X1, 32(X2)
MOV X2, 40(X2)
MOV X3, 48(X2)
MOV X4, 56(X2)
MOV X5, 64(X2)
MOV X6, 72(X2)
MOV X7, 80(X2)
MOV X8, 88(X2)
MOV X9, 96(X2)
MOV X10, 104(X2)
MOV X11, 112(X2)
MOV X12, 120(X2)
MOV X13, 128(X2)
MOV X14, 136(X2)
MOV X15, 144(X2)
MOV X16, 152(X2)
MOV X17, 160(X2)
MOV X18, 168(X2)
MOV X19, 176(X2)
MOV X20, 184(X2)
MOV X21, 192(X2)
MOV X22, 200(X2)
MOV X23, 208(X2)
MOV X24, 216(X2)
MOV X25, 224(X2)
MOV X26, 232(X2)
MOV X27, 240(X2)
MOV X28, 248(X2)
MOV X29, 256(X2)
MOV X30, 264(X2)
MOV X31, 272(X2)
// This takes arguments T0 and T1.
CALL runtime·wbBufFlush(SB)
MOV 24(X2), X0
MOV 32(X2), X1
MOV 40(X2), X2
MOV 48(X2), X3
MOV 56(X2), X4
MOV 64(X2), X5
MOV 72(X2), X6
MOV 80(X2), X7
MOV 88(X2), X8
MOV 96(X2), X9
MOV 104(X2), X10
MOV 112(X2), X11
MOV 120(X2), X12
MOV 128(X2), X13
MOV 136(X2), X14
MOV 144(X2), X15
MOV 152(X2), X16
MOV 160(X2), X17
MOV 168(X2), X18
MOV 176(X2), X19
MOV 184(X2), X20
MOV 192(X2), X21
MOV 200(X2), X22
MOV 208(X2), X23
MOV 216(X2), X24
MOV 224(X2), X25
MOV 232(X2), X26
MOV 240(X2), X27
MOV 248(X2), X28
MOV 256(X2), X29
MOV 264(X2), X30
MOV 272(X2), X31
JMP ret
// Note: these functions use a special calling convention to save generated code space.
// Arguments are passed in registers, but the space for those arguments are allocated
// in the caller's stack frame. These stubs write the args into that stack space and
// then tail call to the corresponding runtime handler.
// The tail call makes these stubs disappear in backtraces.
TEXT runtime·panicIndex(SB),NOSPLIT,$0-16
MOV T0, x+0(FP)
MOV T1, y+8(FP)
JMP runtime·goPanicIndex(SB)
TEXT runtime·panicIndexU(SB),NOSPLIT,$0-16
MOV T0, x+0(FP)
MOV T1, y+8(FP)
JMP runtime·goPanicIndexU(SB)
TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-16
MOV T1, x+0(FP)
MOV T2, y+8(FP)
JMP runtime·goPanicSliceAlen(SB)
TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-16
MOV T1, x+0(FP)
MOV T2, y+8(FP)
JMP runtime·goPanicSliceAlenU(SB)
TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-16
MOV T1, x+0(FP)
MOV T2, y+8(FP)
JMP runtime·goPanicSliceAcap(SB)
TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-16
MOV T1, x+0(FP)
MOV T2, y+8(FP)
JMP runtime·goPanicSliceAcapU(SB)
TEXT runtime·panicSliceB(SB),NOSPLIT,$0-16
MOV T0, x+0(FP)
MOV T1, y+8(FP)
JMP runtime·goPanicSliceB(SB)
TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-16
MOV T0, x+0(FP)
MOV T1, y+8(FP)
JMP runtime·goPanicSliceBU(SB)
TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-16
MOV T2, x+0(FP)
MOV T3, y+8(FP)
JMP runtime·goPanicSlice3Alen(SB)
TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-16
MOV T2, x+0(FP)
MOV T3, y+8(FP)
JMP runtime·goPanicSlice3AlenU(SB)
TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-16
MOV T2, x+0(FP)
MOV T3, y+8(FP)
JMP runtime·goPanicSlice3Acap(SB)
TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-16
MOV T2, x+0(FP)
MOV T3, y+8(FP)
JMP runtime·goPanicSlice3AcapU(SB)
TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-16
MOV T1, x+0(FP)
MOV T2, y+8(FP)
JMP runtime·goPanicSlice3B(SB)
TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-16
MOV T1, x+0(FP)
MOV T2, y+8(FP)
JMP runtime·goPanicSlice3BU(SB)
TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-16
MOV T0, x+0(FP)
MOV T1, y+8(FP)
JMP runtime·goPanicSlice3C(SB)
TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-16
MOV T0, x+0(FP)
MOV T1, y+8(FP)
JMP runtime·goPanicSlice3CU(SB)
DATA runtime·mainPC+0(SB)/8,$runtime·main(SB)
GLOBL runtime·mainPC(SB),RODATA,$8

View File

@ -0,0 +1,12 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
#define FENCE WORD $0x0ff0000f
// func publicationBarrier()
TEXT ·publicationBarrier(SB),NOSPLIT|NOFRAME,$0-0
FENCE
RET

View File

@ -0,0 +1,209 @@
// Generated using cgo, then manually converted into appropriate naming and code
// for the Go runtime.
// go tool cgo -godefs defs_linux.go defs1_linux.go defs2_linux.go
package runtime
const (
_EINTR = 0x4
_EAGAIN = 0xb
_ENOMEM = 0xc
_ENOSYS = 0x26
_PROT_NONE = 0x0
_PROT_READ = 0x1
_PROT_WRITE = 0x2
_PROT_EXEC = 0x4
_MAP_ANON = 0x20
_MAP_PRIVATE = 0x2
_MAP_FIXED = 0x10
_MADV_DONTNEED = 0x4
_MADV_FREE = 0x8
_MADV_HUGEPAGE = 0xe
_MADV_NOHUGEPAGE = 0xf
_SA_RESTART = 0x10000000
_SA_ONSTACK = 0x8000000
_SA_RESTORER = 0x0
_SA_SIGINFO = 0x4
_SIGHUP = 0x1
_SIGINT = 0x2
_SIGQUIT = 0x3
_SIGILL = 0x4
_SIGTRAP = 0x5
_SIGABRT = 0x6
_SIGBUS = 0x7
_SIGFPE = 0x8
_SIGKILL = 0x9
_SIGUSR1 = 0xa
_SIGSEGV = 0xb
_SIGUSR2 = 0xc
_SIGPIPE = 0xd
_SIGALRM = 0xe
_SIGSTKFLT = 0x10
_SIGCHLD = 0x11
_SIGCONT = 0x12
_SIGSTOP = 0x13
_SIGTSTP = 0x14
_SIGTTIN = 0x15
_SIGTTOU = 0x16
_SIGURG = 0x17
_SIGXCPU = 0x18
_SIGXFSZ = 0x19
_SIGVTALRM = 0x1a
_SIGPROF = 0x1b
_SIGWINCH = 0x1c
_SIGIO = 0x1d
_SIGPWR = 0x1e
_SIGSYS = 0x1f
_FPE_INTDIV = 0x1
_FPE_INTOVF = 0x2
_FPE_FLTDIV = 0x3
_FPE_FLTOVF = 0x4
_FPE_FLTUND = 0x5
_FPE_FLTRES = 0x6
_FPE_FLTINV = 0x7
_FPE_FLTSUB = 0x8
_BUS_ADRALN = 0x1
_BUS_ADRERR = 0x2
_BUS_OBJERR = 0x3
_SEGV_MAPERR = 0x1
_SEGV_ACCERR = 0x2
_ITIMER_REAL = 0x0
_ITIMER_VIRTUAL = 0x1
_ITIMER_PROF = 0x2
_EPOLLIN = 0x1
_EPOLLOUT = 0x4
_EPOLLERR = 0x8
_EPOLLHUP = 0x10
_EPOLLRDHUP = 0x2000
_EPOLLET = 0x80000000
_EPOLL_CLOEXEC = 0x80000
_EPOLL_CTL_ADD = 0x1
_EPOLL_CTL_DEL = 0x2
_EPOLL_CTL_MOD = 0x3
)
type timespec struct {
tv_sec int64
tv_nsec int64
}
//go:nosplit
func (ts *timespec) setNsec(ns int64) {
ts.tv_sec = ns / 1e9
ts.tv_nsec = ns % 1e9
}
type timeval struct {
tv_sec int64
tv_usec int64
}
func (tv *timeval) set_usec(x int32) {
tv.tv_usec = int64(x)
}
type sigactiont struct {
sa_handler uintptr
sa_flags uint64
sa_restorer uintptr
sa_mask uint64
}
type siginfo struct {
si_signo int32
si_errno int32
si_code int32
// below here is a union; si_addr is the only field we use
si_addr uint64
}
type itimerval struct {
it_interval timeval
it_value timeval
}
type epollevent struct {
events uint32
pad_cgo_0 [4]byte
data [8]byte // unaligned uintptr
}
const (
_O_RDONLY = 0x0
_O_NONBLOCK = 0x800
_O_CLOEXEC = 0x80000
)
type user_regs_struct struct {
pc uint64
ra uint64
sp uint64
gp uint64
tp uint64
t0 uint64
t1 uint64
t2 uint64
s0 uint64
s1 uint64
a0 uint64
a1 uint64
a2 uint64
a3 uint64
a4 uint64
a5 uint64
a6 uint64
a7 uint64
s2 uint64
s3 uint64
s4 uint64
s5 uint64
s6 uint64
s7 uint64
s8 uint64
s9 uint64
s10 uint64
s11 uint64
t3 uint64
t4 uint64
t5 uint64
t6 uint64
}
type user_fpregs_struct struct {
f [528]byte
}
type usigset struct {
us_x__val [16]uint64
}
type sigcontext struct {
sc_regs user_regs_struct
sc_fpregs user_fpregs_struct
}
type stackt struct {
ss_sp *byte
ss_flags int32
ss_size uintptr
}
type ucontext struct {
uc_flags uint64
uc_link *ucontext
uc_stack stackt
uc_sigmask usigset
uc_x__unused [0]uint8
uc_pad_cgo_0 [8]byte
uc_mcontext sigcontext
}

View File

@ -179,7 +179,7 @@ func infoBigStruct() []byte {
typeScalar, typeScalar, typeScalar, typeScalar, // t int; y uint16; u uint64
typePointer, typeScalar, // i string
}
case "arm64", "amd64", "mips64", "mips64le", "ppc64", "ppc64le", "s390x", "wasm":
case "arm64", "amd64", "mips64", "mips64le", "ppc64", "ppc64le", "riscv64", "s390x", "wasm":
return []byte{
typePointer, // q *int
typeScalar, typeScalar, typeScalar, // w byte; e [17]byte

View File

@ -6,7 +6,7 @@
// xxhash: https://code.google.com/p/xxhash/
// cityhash: https://code.google.com/p/cityhash/
// +build amd64 arm64 mips64 mips64le ppc64 ppc64le s390x wasm
// +build amd64 arm64 mips64 mips64le ppc64 ppc64le riscv64 s390x wasm
package runtime

View File

@ -0,0 +1,67 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package atomic
import "unsafe"
//go:noescape
func Xadd(ptr *uint32, delta int32) uint32
//go:noescape
func Xadd64(ptr *uint64, delta int64) uint64
//go:noescape
func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
//go:noescape
func Xchg(ptr *uint32, new uint32) uint32
//go:noescape
func Xchg64(ptr *uint64, new uint64) uint64
//go:noescape
func Xchguintptr(ptr *uintptr, new uintptr) uintptr
//go:noescape
func Load(ptr *uint32) uint32
//go:noescape
func Load8(ptr *uint8) uint8
//go:noescape
func Load64(ptr *uint64) uint64
// NO go:noescape annotation; *ptr escapes if result escapes (#31525)
func Loadp(ptr unsafe.Pointer) unsafe.Pointer
//go:noescape
func LoadAcq(ptr *uint32) uint32
//go:noescape
func Or8(ptr *uint8, val uint8)
//go:noescape
func And8(ptr *uint8, val uint8)
//go:noescape
func Cas64(ptr *uint64, old, new uint64) bool
//go:noescape
func CasRel(ptr *uint32, old, new uint32) bool
//go:noescape
func Store(ptr *uint32, val uint32)
//go:noescape
func Store8(ptr *uint8, val uint8)
//go:noescape
func Store64(ptr *uint64, val uint64)
// NO go:noescape annotation; see atomic_pointer.go.
func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer)
//go:noescape
func StoreRel(ptr *uint32, val uint32)

View File

@ -0,0 +1,242 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// RISC-V's atomic operations have two bits, aq ("acquire") and rl ("release"),
// which may be toggled on and off. Their precise semantics are defined in
// section 6.3 of the specification, but the basic idea is as follows:
//
// - If neither aq nor rl is set, the CPU may reorder the atomic arbitrarily.
// It guarantees only that it will execute atomically.
//
// - If aq is set, the CPU may move the instruction backward, but not forward.
//
// - If rl is set, the CPU may move the instruction forward, but not backward.
//
// - If both are set, the CPU may not reorder the instruction at all.
//
// These four modes correspond to other well-known memory models on other CPUs.
// On ARM, aq corresponds to a dmb ishst, aq+rl corresponds to a dmb ish. On
// Intel, aq corresponds to an lfence, rl to an sfence, and aq+rl to an mfence
// (or a lock prefix).
//
// Go's memory model requires that
// - if a read happens after a write, the read must observe the write, and
// that
// - if a read happens concurrently with a write, the read may observe the
// write.
// aq is sufficient to guarantee this, so that's what we use here. (This jibes
// with ARM, which uses dmb ishst.)
#include "textflag.h"
#define AMOWSC(op,rd,rs1,rs2) WORD $0x0600202f+rd<<7+rs1<<15+rs2<<20+op<<27
#define AMODSC(op,rd,rs1,rs2) WORD $0x0600302f+rd<<7+rs1<<15+rs2<<20+op<<27
#define ADD_ 0
#define SWAP_ 1
#define LR_ 2
#define SC_ 3
#define OR_ 8
#define AND_ 12
#define FENCE WORD $0x0ff0000f
// Atomically:
// if(*val == *old){
// *val = new;
// return 1;
// } else {
// return 0;
// }
TEXT ·Cas(SB), NOSPLIT, $0-17
MOV ptr+0(FP), A0
MOVW old+8(FP), A1
MOVW new+12(FP), A2
cas_again:
AMOWSC(LR_,13,10,0) // lr.w.aq a3,(a0)
BNE A3, A1, cas_fail
AMOWSC(SC_,14,10,12) // sc.w.aq a4,a2,(a0)
BNE A4, ZERO, cas_again
MOV $1, A0
MOVB A0, ret+16(FP)
RET
cas_fail:
MOV $0, A0
MOV A0, ret+16(FP)
RET
// func Cas64(ptr *uint64, old, new uint64) bool
TEXT ·Cas64(SB), NOSPLIT, $0-25
MOV ptr+0(FP), A0
MOV old+8(FP), A1
MOV new+16(FP), A2
cas_again:
AMODSC(LR_,13,10,0) // lr.d.aq a3,(a0)
BNE A3, A1, cas_fail
AMODSC(SC_,14,10,12) // sc.d.aq a4,a2,(a0)
BNE A4, ZERO, cas_again
MOV $1, A0
MOVB A0, ret+24(FP)
RET
cas_fail:
MOVB ZERO, ret+24(FP)
RET
// func Load(ptr *uint32) uint32
TEXT ·Load(SB),NOSPLIT|NOFRAME,$0-12
MOV ptr+0(FP), A0
AMOWSC(LR_,10,10,0)
MOVW A0, ret+8(FP)
RET
// func Load8(ptr *uint8) uint8
TEXT ·Load8(SB),NOSPLIT|NOFRAME,$0-9
MOV ptr+0(FP), A0
FENCE
MOVBU (A0), A1
FENCE
MOVB A1, ret+8(FP)
RET
// func Load64(ptr *uint64) uint64
TEXT ·Load64(SB),NOSPLIT|NOFRAME,$0-16
MOV ptr+0(FP), A0
AMODSC(LR_,10,10,0)
MOV A0, ret+8(FP)
RET
// func Store(ptr *uint32, val uint32)
TEXT ·Store(SB), NOSPLIT, $0-12
MOV ptr+0(FP), A0
MOVW val+8(FP), A1
AMOWSC(SWAP_,0,10,11)
RET
// func Store8(ptr *uint8, val uint8)
TEXT ·Store8(SB), NOSPLIT, $0-9
MOV ptr+0(FP), A0
MOVBU val+8(FP), A1
FENCE
MOVB A1, (A0)
FENCE
RET
// func Store64(ptr *uint64, val uint64)
TEXT ·Store64(SB), NOSPLIT, $0-16
MOV ptr+0(FP), A0
MOV val+8(FP), A1
AMODSC(SWAP_,0,10,11)
RET
TEXT ·Casp1(SB), NOSPLIT, $0-25
JMP ·Cas64(SB)
TEXT ·Casuintptr(SB),NOSPLIT,$0-25
JMP ·Cas64(SB)
TEXT ·CasRel(SB), NOSPLIT, $0-17
JMP ·Cas(SB)
TEXT ·Loaduintptr(SB),NOSPLIT,$0-16
JMP ·Load64(SB)
TEXT ·Storeuintptr(SB),NOSPLIT,$0-16
JMP ·Store64(SB)
TEXT ·Loaduint(SB),NOSPLIT,$0-16
JMP ·Loaduintptr(SB)
TEXT ·Loadint64(SB),NOSPLIT,$0-16
JMP ·Loaduintptr(SB)
TEXT ·Xaddint64(SB),NOSPLIT,$0-24
MOV ptr+0(FP), A0
MOV delta+8(FP), A1
WORD $0x04b5352f // amoadd.d.aq a0,a1,(a0)
ADD A0, A1, A0
MOVW A0, ret+16(FP)
RET
TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$0-12
JMP ·Load(SB)
// func Loadp(ptr unsafe.Pointer) unsafe.Pointer
TEXT ·Loadp(SB),NOSPLIT,$0-16
JMP ·Load64(SB)
// func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer)
TEXT ·StorepNoWB(SB), NOSPLIT, $0-16
JMP ·Store64(SB)
TEXT ·StoreRel(SB), NOSPLIT, $0-12
JMP ·Store(SB)
// func Xchg(ptr *uint32, new uint32) uint32
TEXT ·Xchg(SB), NOSPLIT, $0-20
MOV ptr+0(FP), A0
MOVW new+8(FP), A1
AMOWSC(SWAP_,11,10,11)
MOVW A1, ret+16(FP)
RET
// func Xchg64(ptr *uint64, new uint64) uint64
TEXT ·Xchg64(SB), NOSPLIT, $0-24
MOV ptr+0(FP), A0
MOV new+8(FP), A1
AMODSC(SWAP_,11,10,11)
MOV A1, ret+16(FP)
RET
// Atomically:
// *val += delta;
// return *val;
// func Xadd(ptr *uint32, delta int32) uint32
TEXT ·Xadd(SB), NOSPLIT, $0-20
MOV ptr+0(FP), A0
MOVW delta+8(FP), A1
AMOWSC(ADD_,12,10,11)
ADD A2,A1,A0
MOVW A0, ret+16(FP)
RET
// func Xadd64(ptr *uint64, delta int64) uint64
TEXT ·Xadd64(SB), NOSPLIT, $0-24
MOV ptr+0(FP), A0
MOV delta+8(FP), A1
AMODSC(ADD_,12,10,11)
ADD A2,A1,A0
MOV A0, ret+16(FP)
RET
// func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
TEXT ·Xadduintptr(SB), NOSPLIT, $0-24
JMP ·Xadd64(SB)
// func Xchguintptr(ptr *uintptr, new uintptr) uintptr
TEXT ·Xchguintptr(SB), NOSPLIT, $0-24
JMP ·Xchg64(SB)
// func And8(ptr *uint8, val uint8)
TEXT ·And8(SB), NOSPLIT, $0-9
MOV ptr+0(FP), A0
MOVBU val+8(FP), A1
AND $3, A0, A2
AND $-4, A0
SLL $3, A2
XOR $255, A1
SLL A2, A1
XOR $-1, A1
AMOWSC(AND_,0,10,11)
RET
// func Or8(ptr *uint8, val uint8)
TEXT ·Or8(SB), NOSPLIT, $0-9
MOV ptr+0(FP), A0
MOVBU val+8(FP), A1
AND $3, A0, A2
AND $-4, A0
SLL $3, A2
SLL A2, A1
AMOWSC(OR_,0,10,11)
RET

View File

@ -14,6 +14,7 @@ const (
MIPS
MIPS64
PPC64
RISCV64
S390X
WASM
)

View File

@ -0,0 +1,18 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sys
const (
ArchFamily = RISCV64
BigEndian = false
CacheLineSize = 64
DefaultPhysPageSize = 4096
PCQuantum = 4
Int64Align = 8
HugePageSize = 1 << 21
MinFrameSize = 8
)
type Uintreg uint64

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64 arm64 mips64 mips64le ppc64 ppc64le s390x wasm
// +build amd64 arm64 mips64 mips64le ppc64 ppc64le riscv64 s390x wasm
package runtime

View File

@ -502,6 +502,7 @@ func mallocinit() {
// allocation at 0x40 << 32 because when using 4k pages with 3-level
// translation buffers, the user address space is limited to 39 bits
// On darwin/arm64, the address space is even smaller.
//
// On AIX, mmaps starts at 0x0A00000000000000 for 64-bit.
// processes.
for i := 0x7f; i >= 0; i-- {

44
src/runtime/memclr_riscv64.s Executable file
View File

@ -0,0 +1,44 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
// void runtime·memclrNoHeapPointers(void*, uintptr)
TEXT runtime·memclrNoHeapPointers(SB),NOSPLIT,$0-16
MOV ptr+0(FP), T1
MOV n+8(FP), T2
ADD T1, T2, T4
// If less than eight bytes, do one byte at a time.
SLTU $8, T2, T3
BNE T3, ZERO, outcheck
// Do one byte at a time until eight-aligned.
JMP aligncheck
align:
MOVB ZERO, (T1)
ADD $1, T1
aligncheck:
AND $7, T1, T3
BNE T3, ZERO, align
// Do eight bytes at a time as long as there is room.
ADD $-7, T4, T5
JMP wordscheck
words:
MOV ZERO, (T1)
ADD $8, T1
wordscheck:
SLTU T5, T1, T3
BNE T3, ZERO, words
JMP outcheck
out:
MOVB ZERO, (T1)
ADD $1, T1
outcheck:
BNE T1, T4, out
done:
RET

96
src/runtime/memmove_riscv64.s Executable file
View File

@ -0,0 +1,96 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
// void runtime·memmove(void*, void*, uintptr)
TEXT runtime·memmove(SB),NOSPLIT,$-0-24
MOV to+0(FP), T0
MOV from+8(FP), T1
MOV n+16(FP), T2
ADD T1, T2, T5
// If the destination is ahead of the source, start at the end of the
// buffer and go backward.
BLTU T1, T0, b
// If less than eight bytes, do one byte at a time.
SLTU $8, T2, T3
BNE T3, ZERO, f_outcheck
// Do one byte at a time until from is eight-aligned.
JMP f_aligncheck
f_align:
MOVB (T1), T3
MOVB T3, (T0)
ADD $1, T0
ADD $1, T1
f_aligncheck:
AND $7, T1, T3
BNE T3, ZERO, f_align
// Do eight bytes at a time as long as there is room.
ADD $-7, T5, T6
JMP f_wordscheck
f_words:
MOV (T1), T3
MOV T3, (T0)
ADD $8, T0
ADD $8, T1
f_wordscheck:
SLTU T6, T1, T3
BNE T3, ZERO, f_words
// Finish off the remaining partial word.
JMP f_outcheck
f_out:
MOVB (T1), T3
MOVB T3, (T0)
ADD $1, T0
ADD $1, T1
f_outcheck:
BNE T1, T5, f_out
RET
b:
ADD T0, T2, T4
// If less than eight bytes, do one byte at a time.
SLTU $8, T2, T3
BNE T3, ZERO, b_outcheck
// Do one byte at a time until from+n is eight-aligned.
JMP b_aligncheck
b_align:
ADD $-1, T4
ADD $-1, T5
MOVB (T5), T3
MOVB T3, (T4)
b_aligncheck:
AND $7, T5, T3
BNE T3, ZERO, b_align
// Do eight bytes at a time as long as there is room.
ADD $7, T1, T6
JMP b_wordscheck
b_words:
ADD $-8, T4
ADD $-8, T5
MOV (T5), T3
MOV T3, (T4)
b_wordscheck:
SLTU T5, T6, T3
BNE T3, ZERO, b_words
// Finish off the remaining partial word.
JMP b_outcheck
b_out:
ADD $-1, T4
ADD $-1, T5
MOVB (T5), T3
MOVB T3, (T4)
b_outcheck:
BNE T5, T1, b_out
RET

View File

@ -116,6 +116,13 @@ const (
_CLONE_NEWUTS = 0x4000000
_CLONE_NEWIPC = 0x8000000
// As of QEMU 2.8.0 (5ea2fc84d), user emulation requires all six of these
// flags to be set when creating a thread; attempts to share the other
// five but leave SYSVSEM unshared will fail with -EINVAL.
//
// In non-QEMU environments CLONE_SYSVSEM is inconsequential as we do not
// use System V semaphores.
cloneFlags = _CLONE_VM | /* share memory */
_CLONE_FS | /* share cwd, etc */
_CLONE_FILES | /* share fd table */

View File

@ -0,0 +1,14 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
TEXT _rt0_riscv64_linux(SB),NOSPLIT|NOFRAME,$0
MOV 0(X2), A0 // argc
ADD $8, X2, A1 // argv
JMP main(SB)
TEXT main(SB),NOSPLIT|NOFRAME,$0
MOV $runtime·rt0_go(SB), T0
JALR ZERO, T0

View File

@ -0,0 +1,68 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"runtime/internal/sys"
"unsafe"
)
type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}
//go:nosplit
//go:nowritebarrierrec
func (c *sigctxt) regs() *sigcontext { return &(*ucontext)(c.ctxt).uc_mcontext }
func (c *sigctxt) ra() uint64 { return c.regs().sc_regs.ra }
func (c *sigctxt) sp() uint64 { return c.regs().sc_regs.sp }
func (c *sigctxt) gp() uint64 { return c.regs().sc_regs.gp }
func (c *sigctxt) tp() uint64 { return c.regs().sc_regs.tp }
func (c *sigctxt) t0() uint64 { return c.regs().sc_regs.t0 }
func (c *sigctxt) t1() uint64 { return c.regs().sc_regs.t1 }
func (c *sigctxt) t2() uint64 { return c.regs().sc_regs.t2 }
func (c *sigctxt) s0() uint64 { return c.regs().sc_regs.s0 }
func (c *sigctxt) s1() uint64 { return c.regs().sc_regs.s1 }
func (c *sigctxt) a0() uint64 { return c.regs().sc_regs.a0 }
func (c *sigctxt) a1() uint64 { return c.regs().sc_regs.a1 }
func (c *sigctxt) a2() uint64 { return c.regs().sc_regs.a2 }
func (c *sigctxt) a3() uint64 { return c.regs().sc_regs.a3 }
func (c *sigctxt) a4() uint64 { return c.regs().sc_regs.a4 }
func (c *sigctxt) a5() uint64 { return c.regs().sc_regs.a5 }
func (c *sigctxt) a6() uint64 { return c.regs().sc_regs.a6 }
func (c *sigctxt) a7() uint64 { return c.regs().sc_regs.a7 }
func (c *sigctxt) s2() uint64 { return c.regs().sc_regs.s2 }
func (c *sigctxt) s3() uint64 { return c.regs().sc_regs.s3 }
func (c *sigctxt) s4() uint64 { return c.regs().sc_regs.s4 }
func (c *sigctxt) s5() uint64 { return c.regs().sc_regs.s5 }
func (c *sigctxt) s6() uint64 { return c.regs().sc_regs.s6 }
func (c *sigctxt) s7() uint64 { return c.regs().sc_regs.s7 }
func (c *sigctxt) s8() uint64 { return c.regs().sc_regs.s8 }
func (c *sigctxt) s9() uint64 { return c.regs().sc_regs.s9 }
func (c *sigctxt) s10() uint64 { return c.regs().sc_regs.s10 }
func (c *sigctxt) s11() uint64 { return c.regs().sc_regs.s11 }
func (c *sigctxt) t3() uint64 { return c.regs().sc_regs.t3 }
func (c *sigctxt) t4() uint64 { return c.regs().sc_regs.t4 }
func (c *sigctxt) t5() uint64 { return c.regs().sc_regs.t5 }
func (c *sigctxt) t6() uint64 { return c.regs().sc_regs.t6 }
//go:nosplit
//go:nowritebarrierrec
func (c *sigctxt) pc() uint64 { return c.regs().sc_regs.pc }
func (c *sigctxt) sigcode() uint32 { return uint32(c.info.si_code) }
func (c *sigctxt) sigaddr() uint64 { return c.info.si_addr }
func (c *sigctxt) set_pc(x uint64) { c.regs().sc_regs.pc = x }
func (c *sigctxt) set_ra(x uint64) { c.regs().sc_regs.ra = x }
func (c *sigctxt) set_sp(x uint64) { c.regs().sc_regs.sp = x }
func (c *sigctxt) set_gp(x uint64) { c.regs().sc_regs.gp = x }
func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
func (c *sigctxt) set_sigaddr(x uint64) {
*(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
}

View File

@ -0,0 +1,85 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux,riscv64
package runtime
import (
"runtime/internal/sys"
"unsafe"
)
func dumpregs(c *sigctxt) {
print("ra ", hex(c.ra()), "\t")
print("sp ", hex(c.sp()), "\n")
print("gp ", hex(c.gp()), "\t")
print("tp ", hex(c.tp()), "\n")
print("t0 ", hex(c.t0()), "\t")
print("t1 ", hex(c.t1()), "\n")
print("t2 ", hex(c.t2()), "\t")
print("s0 ", hex(c.s0()), "\n")
print("s1 ", hex(c.s1()), "\t")
print("a0 ", hex(c.a0()), "\n")
print("a1 ", hex(c.a1()), "\t")
print("a2 ", hex(c.a2()), "\n")
print("a3 ", hex(c.a3()), "\t")
print("a4 ", hex(c.a4()), "\n")
print("a5 ", hex(c.a5()), "\t")
print("a6 ", hex(c.a6()), "\n")
print("a7 ", hex(c.a7()), "\t")
print("s2 ", hex(c.s2()), "\n")
print("s3 ", hex(c.s3()), "\t")
print("s4 ", hex(c.s4()), "\n")
print("s5 ", hex(c.s5()), "\t")
print("s6 ", hex(c.s6()), "\n")
print("s7 ", hex(c.s7()), "\t")
print("s8 ", hex(c.s8()), "\n")
print("s9 ", hex(c.s9()), "\t")
print("s10 ", hex(c.s10()), "\n")
print("s11 ", hex(c.s11()), "\t")
print("t3 ", hex(c.t3()), "\n")
print("t4 ", hex(c.t4()), "\t")
print("t5 ", hex(c.t5()), "\n")
print("t6 ", hex(c.t6()), "\t")
print("pc ", hex(c.pc()), "\n")
}
//go:nosplit
//go:nowritebarrierrec
func (c *sigctxt) sigpc() uintptr { return uintptr(c.pc()) }
func (c *sigctxt) sigsp() uintptr { return uintptr(c.sp()) }
func (c *sigctxt) siglr() uintptr { return uintptr(c.ra()) }
func (c *sigctxt) fault() uintptr { return uintptr(c.sigaddr()) }
// preparePanic sets up the stack to look like a call to sigpanic.
func (c *sigctxt) preparePanic(sig uint32, gp *g) {
// We arrange RA, and pc to pretend the panicking
// function calls sigpanic directly.
// Always save RA to stack so that panics in leaf
// functions are correctly handled. This smashes
// the stack frame but we're not going back there
// anyway.
sp := c.sp() - sys.PtrSize
c.set_sp(sp)
*(*uint64)(unsafe.Pointer(uintptr(sp))) = c.ra()
pc := gp.sigpc
if shouldPushSigpanic(gp, pc, uintptr(c.ra())) {
// Make it look the like faulting PC called sigpanic.
c.set_ra(uint64(pc))
}
// In case we are panicking from external C code
c.set_gp(uint64(uintptr(unsafe.Pointer(gp))))
c.set_pc(uint64(funcPC(sigpanic)))
}
const pushCallSupported = false
func (c *sigctxt) pushCall(targetPC uintptr) {
throw("unimplemented")
}

View File

@ -0,0 +1,517 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
// System calls and other sys.stuff for riscv64, Linux
//
#include "textflag.h"
#include "go_asm.h"
#define AT_FDCWD -100
#define SYS_brk 214
#define SYS_clock_gettime 113
#define SYS_clone 220
#define SYS_close 57
#define SYS_connect 203
#define SYS_epoll_create1 20
#define SYS_epoll_ctl 21
#define SYS_epoll_pwait 22
#define SYS_exit 93
#define SYS_exit_group 94
#define SYS_faccessat 48
#define SYS_fcntl 25
#define SYS_futex 98
#define SYS_getpid 172
#define SYS_getrlimit 163
#define SYS_gettid 178
#define SYS_gettimeofday 169
#define SYS_kill 129
#define SYS_madvise 233
#define SYS_mincore 232
#define SYS_mmap 222
#define SYS_munmap 215
#define SYS_nanosleep 101
#define SYS_openat 56
#define SYS_pipe2 59
#define SYS_pselect6 72
#define SYS_read 63
#define SYS_rt_sigaction 134
#define SYS_rt_sigprocmask 135
#define SYS_rt_sigreturn 139
#define SYS_sched_getaffinity 123
#define SYS_sched_yield 124
#define SYS_setitimer 103
#define SYS_sigaltstack 132
#define SYS_socket 198
#define SYS_tgkill 131
#define SYS_tkill 130
#define SYS_write 64
#define FENCE WORD $0x0ff0000f
// func exit(code int32)
TEXT runtime·exit(SB),NOSPLIT|NOFRAME,$0-4
MOVW code+0(FP), A0
MOV $SYS_exit_group, A7
ECALL
RET
// func exitThread(wait *uint32)
TEXT runtime·exitThread(SB),NOSPLIT|NOFRAME,$0-8
MOV wait+0(FP), A0
// We're done using the stack.
FENCE
MOVW ZERO, (A0)
FENCE
MOV $0, A0 // exit code
MOV $SYS_exit, A7
ECALL
JMP 0(PC)
// func open(name *byte, mode, perm int32) int32
TEXT runtime·open(SB),NOSPLIT|NOFRAME,$0-20
MOV $AT_FDCWD, A0
MOV name+0(FP), A1
MOVW mode+8(FP), A2
MOVW perm+12(FP), A3
MOV $SYS_openat, A7
ECALL
MOV $-4096, T0
BGEU T0, A0, 2(PC)
MOV $-1, A0
MOVW A0, ret+16(FP)
RET
// func closefd(fd int32) int32
TEXT runtime·closefd(SB),NOSPLIT|NOFRAME,$0-12
MOVW fd+0(FP), A0
MOV $SYS_close, A7
ECALL
MOV $-4096, T0
BGEU T0, A0, 2(PC)
MOV $-1, A0
MOVW A0, ret+8(FP)
RET
// func write1(fd uintptr, p unsafe.Pointer, n int32) int32
TEXT runtime·write1(SB),NOSPLIT|NOFRAME,$0-28
MOV fd+0(FP), A0
MOV p+8(FP), A1
MOVW n+16(FP), A2
MOV $SYS_write, A7
ECALL
MOVW A0, ret+24(FP)
RET
// func read(fd int32, p unsafe.Pointer, n int32) int32
TEXT runtime·read(SB),NOSPLIT|NOFRAME,$0-28
MOVW fd+0(FP), A0
MOV p+8(FP), A1
MOVW n+16(FP), A2
MOV $SYS_read, A7
ECALL
MOVW A0, ret+24(FP)
RET
// func pipe() (r, w int32, errno int32)
TEXT runtime·pipe(SB),NOSPLIT|NOFRAME,$0-12
MOV $r+0(FP), A0
MOV ZERO, A1
MOV $SYS_pipe2, A7
ECALL
MOVW A0, errno+8(FP)
RET
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20
MOV $r+8(FP), A0
MOVW flags+0(FP), A1
MOV $SYS_pipe2, A7
ECALL
MOVW A0, errno+16(FP)
RET
// func getrlimit(kind int32, limit unsafe.Pointer) int32
TEXT runtime·getrlimit(SB),NOSPLIT|NOFRAME,$0-20
MOVW kind+0(FP), A0
MOV limit+8(FP), A1
MOV $SYS_getrlimit, A7
ECALL
MOVW A0, ret+16(FP)
RET
// func usleep(usec uint32)
TEXT runtime·usleep(SB),NOSPLIT,$24-4
MOVWU usec+0(FP), A0
MOV $1000, A1
MUL A1, A0, A0
MOV $1000000000, A1
DIV A1, A0, A2
MOV A2, 8(X2)
REM A1, A0, A3
MOV A3, 16(X2)
ADD $8, X2, A0
MOV ZERO, A1
MOV $SYS_nanosleep, A7
ECALL
RET
// func gettid() uint32
TEXT runtime·gettid(SB),NOSPLIT,$0-4
MOV $SYS_gettid, A7
ECALL
MOVW A0, ret+0(FP)
RET
// func raise(sig uint32)
TEXT runtime·raise(SB),NOSPLIT|NOFRAME,$0
MOV $SYS_gettid, A7
ECALL
// arg 1 tid - already in A0
MOVW sig+0(FP), A1 // arg 2
MOV $SYS_tkill, A7
ECALL
RET
// func raiseproc(sig uint32)
TEXT runtime·raiseproc(SB),NOSPLIT|NOFRAME,$0
MOV $SYS_getpid, A7
ECALL
// arg 1 pid - already in A0
MOVW sig+0(FP), A1 // arg 2
MOV $SYS_kill, A7
ECALL
RET
// func getpid() int
TEXT ·getpid(SB),NOSPLIT|NOFRAME,$0-8
MOV $SYS_getpid, A7
ECALL
MOV A0, ret+0(FP)
RET
// func tgkill(tgid, tid, sig int)
TEXT ·tgkill(SB),NOSPLIT|NOFRAME,$0-24
MOV tgid+0(FP), A0
MOV tid+8(FP), A1
MOV sig+16(FP), A2
MOV $SYS_tgkill, A7
ECALL
RET
// func setitimer(mode int32, new, old *itimerval)
TEXT runtime·setitimer(SB),NOSPLIT|NOFRAME,$0-24
MOVW mode+0(FP), A0
MOV new+8(FP), A1
MOV old+16(FP), A2
MOV $SYS_setitimer, A7
ECALL
RET
// func mincore(addr unsafe.Pointer, n uintptr, dst *byte) int32
TEXT runtime·mincore(SB),NOSPLIT|NOFRAME,$0-28
MOV addr+0(FP), A0
MOV n+8(FP), A1
MOV dst+16(FP), A2
MOV $SYS_mincore, A7
ECALL
MOVW A0, ret+24(FP)
RET
// func walltime1() (sec int64, nsec int32)
TEXT runtime·walltime1(SB),NOSPLIT,$24-12
MOV $0, A0 // CLOCK_REALTIME
MOV $8(X2), A1
MOV $SYS_clock_gettime, A7
ECALL
MOV 8(X2), T0 // sec
MOV 16(X2), T1 // nsec
MOV T0, sec+0(FP)
MOVW T1, nsec+8(FP)
RET
// func nanotime1() int64
TEXT runtime·nanotime1(SB),NOSPLIT,$24-8
MOV $1, A0 // CLOCK_MONOTONIC
MOV $8(X2), A1
MOV $SYS_clock_gettime, A7
ECALL
MOV 8(X2), T0 // sec
MOV 16(X2), T1 // nsec
// sec is in T0, nsec in T1
// return nsec in T0
MOV $1000000000, T2
MUL T2, T0
ADD T1, T0
MOV T0, ret+0(FP)
RET
// func rtsigprocmask(how int32, new, old *sigset, size int32)
TEXT runtime·rtsigprocmask(SB),NOSPLIT|NOFRAME,$0-28
MOVW how+0(FP), A0
MOV new+8(FP), A1
MOV old+16(FP), A2
MOVW size+24(FP), A3
MOV $SYS_rt_sigprocmask, A7
ECALL
MOV $-4096, T0
BLTU A0, T0, 2(PC)
WORD $0 // crash
RET
// func rt_sigaction(sig uintptr, new, old *sigactiont, size uintptr) int32
TEXT runtime·rt_sigaction(SB),NOSPLIT|NOFRAME,$0-36
MOV sig+0(FP), A0
MOV new+8(FP), A1
MOV old+16(FP), A2
MOV size+24(FP), A3
MOV $SYS_rt_sigaction, A7
ECALL
MOVW A0, ret+32(FP)
RET
// func sigfwd(fn uintptr, sig uint32, info *siginfo, ctx unsafe.Pointer)
TEXT runtime·sigfwd(SB),NOSPLIT,$0-32
MOVW sig+8(FP), A0
MOV info+16(FP), A1
MOV ctx+24(FP), A2
MOV fn+0(FP), T1
JALR RA, T1
RET
// func sigtramp(signo, ureg, ctxt unsafe.Pointer)
TEXT runtime·sigtramp(SB),NOSPLIT,$64
MOVW A0, 8(X2)
MOV A1, 16(X2)
MOV A2, 24(X2)
// this might be called in external code context,
// where g is not set.
MOVBU runtime·iscgo(SB), A0
BEQ A0, ZERO, 2(PC)
CALL runtime·load_g(SB)
MOV $runtime·sigtrampgo(SB), A0
JALR RA, A0
RET
// func cgoSigtramp()
TEXT runtime·cgoSigtramp(SB),NOSPLIT,$0
MOV $runtime·sigtramp(SB), T1
JALR ZERO, T1
// func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (p unsafe.Pointer, err int)
TEXT runtime·mmap(SB),NOSPLIT|NOFRAME,$0
MOV addr+0(FP), A0
MOV n+8(FP), A1
MOVW prot+16(FP), A2
MOVW flags+20(FP), A3
MOVW fd+24(FP), A4
MOVW off+28(FP), A5
MOV $SYS_mmap, A7
ECALL
MOV $-4096, T0
BGEU T0, A0, 5(PC)
SUB A0, ZERO, A0
MOV ZERO, p+32(FP)
MOV A0, err+40(FP)
RET
ok:
MOV A0, p+32(FP)
MOV ZERO, err+40(FP)
RET
// func munmap(addr unsafe.Pointer, n uintptr)
TEXT runtime·munmap(SB),NOSPLIT|NOFRAME,$0
MOV addr+0(FP), A0
MOV n+8(FP), A1
MOV $SYS_munmap, A7
ECALL
MOV $-4096, T0
BLTU A0, T0, 2(PC)
WORD $0 // crash
RET
// func madvise(addr unsafe.Pointer, n uintptr, flags int32)
TEXT runtime·madvise(SB),NOSPLIT|NOFRAME,$0
MOV addr+0(FP), A0
MOV n+8(FP), A1
MOVW flags+16(FP), A2
MOV $SYS_madvise, A7
ECALL
MOVW A0, ret+24(FP)
RET
// func futex(addr unsafe.Pointer, op int32, val uint32, ts, addr2 unsafe.Pointer, val3 uint32) int32
TEXT runtime·futex(SB),NOSPLIT|NOFRAME,$0
MOV addr+0(FP), A0
MOVW op+8(FP), A1
MOVW val+12(FP), A2
MOV ts+16(FP), A3
MOV addr2+24(FP), A4
MOVW val3+32(FP), A5
MOV $SYS_futex, A7
ECALL
MOVW A0, ret+40(FP)
RET
// func clone(flags int32, stk, mp, gp, fn unsafe.Pointer) int32
TEXT runtime·clone(SB),NOSPLIT|NOFRAME,$0
MOVW flags+0(FP), A0
MOV stk+8(FP), A1
// Copy mp, gp, fn off parent stack for use by child.
MOV mp+16(FP), T0
MOV gp+24(FP), T1
MOV fn+32(FP), T2
MOV T0, -8(A1)
MOV T1, -16(A1)
MOV T2, -24(A1)
MOV $1234, T0
MOV T0, -32(A1)
MOV $SYS_clone, A7
ECALL
// In parent, return.
BEQ ZERO, A0, child
MOVW ZERO, ret+40(FP)
RET
child:
// In child, on new stack.
MOV -32(X2), T0
MOV $1234, A0
BEQ A0, T0, good
WORD $0 // crash
good:
// Initialize m->procid to Linux tid
MOV $SYS_gettid, A7
ECALL
MOV -24(X2), T2 // fn
MOV -16(X2), T1 // g
MOV -8(X2), T0 // m
BEQ ZERO, T0, nog
BEQ ZERO, T1, nog
MOV A0, m_procid(T0)
// In child, set up new stack
MOV T0, g_m(T1)
MOV T1, g
nog:
// Call fn
JALR RA, T2
// It shouldn't return. If it does, exit this thread.
MOV $111, A0
MOV $SYS_exit, A7
ECALL
JMP -3(PC) // keep exiting
// func sigaltstack(new, old *stackt)
TEXT runtime·sigaltstack(SB),NOSPLIT|NOFRAME,$0
MOV new+0(FP), A0
MOV old+8(FP), A1
MOV $SYS_sigaltstack, A7
ECALL
MOV $-4096, T0
BLTU A0, T0, 2(PC)
WORD $0 // crash
RET
// func osyield()
TEXT runtime·osyield(SB),NOSPLIT|NOFRAME,$0
MOV $SYS_sched_yield, A7
ECALL
RET
// func sched_getaffinity(pid, len uintptr, buf *uintptr) int32
TEXT runtime·sched_getaffinity(SB),NOSPLIT|NOFRAME,$0
MOV pid+0(FP), A0
MOV len+8(FP), A1
MOV buf+16(FP), A2
MOV $SYS_sched_getaffinity, A7
ECALL
MOV A0, ret+24(FP)
RET
// func epollcreate(size int32) int32
TEXT runtime·epollcreate(SB),NOSPLIT|NOFRAME,$0
MOV $0, A0
MOV $SYS_epoll_create1, A7
ECALL
MOVW A0, ret+8(FP)
RET
// func epollcreate1(flags int32) int32
TEXT runtime·epollcreate1(SB),NOSPLIT|NOFRAME,$0
MOVW flags+0(FP), A0
MOV $SYS_epoll_create1, A7
ECALL
MOVW A0, ret+8(FP)
RET
// func epollctl(epfd, op, fd int32, ev *epollevent) int32
TEXT runtime·epollctl(SB),NOSPLIT|NOFRAME,$0
MOVW epfd+0(FP), A0
MOVW op+4(FP), A1
MOVW fd+8(FP), A2
MOV ev+16(FP), A3
MOV $SYS_epoll_ctl, A7
ECALL
MOVW A0, ret+24(FP)
RET
// func epollwait(epfd int32, ev *epollevent, nev, timeout int32) int32
TEXT runtime·epollwait(SB),NOSPLIT|NOFRAME,$0
MOVW epfd+0(FP), A0
MOV ev+8(FP), A1
MOVW nev+16(FP), A2
MOVW timeout+20(FP), A3
MOV $0, A4
MOV $SYS_epoll_pwait, A7
ECALL
MOVW A0, ret+24(FP)
RET
// func closeonexec(int32)
TEXT runtime·closeonexec(SB),NOSPLIT|NOFRAME,$0
MOVW fd+0(FP), A0 // fd
MOV $2, A1 // F_SETFD
MOV $1, A2 // FD_CLOEXEC
MOV $SYS_fcntl, A7
ECALL
RET
// func runtime·setNonblock(int32 fd)
TEXT runtime·setNonblock(SB),NOSPLIT|NOFRAME,$0-4
MOVW fd+0(FP), A0 // fd
MOV $3, A1 // F_GETFL
MOV $0, A2
MOV $SYS_fcntl, A7
ECALL
MOV $0x800, A2 // O_NONBLOCK
OR A0, A2
MOVW fd+0(FP), A0 // fd
MOV $4, A1 // F_SETFL
MOV $SYS_fcntl, A7
ECALL
RET
// func sbrk0() uintptr
TEXT runtime·sbrk0(SB),NOSPLIT,$0-8
// Implemented as brk(NULL).
MOV $0, A0
MOV $SYS_brk, A7
ECALL
MOVW A0, ret+0(FP)
RET

View File

@ -0,0 +1,18 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import "unsafe"
// adjust Gobuf as if it executed a call to fn with context ctxt
// and then did an immediate Gosave.
func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) {
if buf.lr != 0 {
throw("invalid use of gostartcall")
}
buf.lr = buf.pc
buf.pc = uintptr(fn)
buf.ctxt = ctxt
}

18
src/runtime/tls_riscv64.s Normal file
View File

@ -0,0 +1,18 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "go_asm.h"
#include "go_tls.h"
#include "funcdata.h"
#include "textflag.h"
// If !iscgo, this is a no-op.
//
// NOTE: mcall() assumes this clobbers only R23 (REGTMP).
// FIXME: cgo
TEXT runtime·save_g(SB),NOSPLIT|NOFRAME,$0-0
RET
TEXT runtime·load_g(SB),NOSPLIT|NOFRAME,$0-0
RET