2016-03-01 15:57:46 -07:00
|
|
|
// Copyright 2015 The Go Authors. All rights reserved.
|
2015-10-21 10:45:27 -06:00
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
2021-05-12 09:55:42 -06:00
|
|
|
//go:build msan
|
2015-10-21 10:45:27 -06:00
|
|
|
|
|
|
|
#include "go_asm.h"
|
|
|
|
#include "go_tls.h"
|
|
|
|
#include "funcdata.h"
|
|
|
|
#include "textflag.h"
|
|
|
|
|
|
|
|
// This is like race_amd64.s, but for the msan calls.
|
|
|
|
// See race_amd64.s for detailed comments.
|
|
|
|
|
|
|
|
#ifdef GOOS_windows
|
|
|
|
#define RARG0 CX
|
|
|
|
#define RARG1 DX
|
|
|
|
#define RARG2 R8
|
|
|
|
#define RARG3 R9
|
|
|
|
#else
|
|
|
|
#define RARG0 DI
|
|
|
|
#define RARG1 SI
|
|
|
|
#define RARG2 DX
|
|
|
|
#define RARG3 CX
|
|
|
|
#endif
|
|
|
|
|
2015-11-04 13:30:30 -07:00
|
|
|
// func runtime·domsanread(addr unsafe.Pointer, sz uintptr)
|
|
|
|
// Called from msanread.
|
|
|
|
TEXT runtime·domsanread(SB), NOSPLIT, $0-16
|
2015-10-21 10:45:27 -06:00
|
|
|
MOVQ addr+0(FP), RARG0
|
2023-12-13 14:18:07 -07:00
|
|
|
MOVQ sz+8(FP), RARG1
|
2015-10-21 10:45:27 -06:00
|
|
|
// void __msan_read_go(void *addr, uintptr_t sz);
|
|
|
|
MOVQ $__msan_read_go(SB), AX
|
|
|
|
JMP msancall<>(SB)
|
|
|
|
|
|
|
|
// func runtime·msanwrite(addr unsafe.Pointer, sz uintptr)
|
|
|
|
// Called from instrumented code.
|
|
|
|
TEXT runtime·msanwrite(SB), NOSPLIT, $0-16
|
|
|
|
MOVQ addr+0(FP), RARG0
|
2023-12-13 14:18:07 -07:00
|
|
|
MOVQ sz+8(FP), RARG1
|
2015-10-21 10:45:27 -06:00
|
|
|
// void __msan_write_go(void *addr, uintptr_t sz);
|
|
|
|
MOVQ $__msan_write_go(SB), AX
|
|
|
|
JMP msancall<>(SB)
|
|
|
|
|
|
|
|
// func runtime·msanmalloc(addr unsafe.Pointer, sz uintptr)
|
|
|
|
TEXT runtime·msanmalloc(SB), NOSPLIT, $0-16
|
|
|
|
MOVQ addr+0(FP), RARG0
|
2023-12-13 14:18:07 -07:00
|
|
|
MOVQ sz+8(FP), RARG1
|
2015-10-21 10:45:27 -06:00
|
|
|
// void __msan_malloc_go(void *addr, uintptr_t sz);
|
|
|
|
MOVQ $__msan_malloc_go(SB), AX
|
|
|
|
JMP msancall<>(SB)
|
|
|
|
|
|
|
|
// func runtime·msanfree(addr unsafe.Pointer, sz uintptr)
|
|
|
|
TEXT runtime·msanfree(SB), NOSPLIT, $0-16
|
|
|
|
MOVQ addr+0(FP), RARG0
|
2023-12-13 14:18:07 -07:00
|
|
|
MOVQ sz+8(FP), RARG1
|
2015-10-21 10:45:27 -06:00
|
|
|
// void __msan_free_go(void *addr, uintptr_t sz);
|
|
|
|
MOVQ $__msan_free_go(SB), AX
|
|
|
|
JMP msancall<>(SB)
|
|
|
|
|
cmd/compile, runtime: use __msan_memmove for moving data, split msanread to fields
Currently, for data moving, we generate an msanread of the source,
followed by an msanwrite of the destination. msanread checks
the source is initialized.
This has a problem: if the source is an aggregate type containing
alignment paddings, the padding bytes may not be thought as
initialized by MSAN. If we copy the aggregate type by value, if
it counts as a read, MSAN reports using uninitialized data. This
CL changes it to use __msan_memmove for data copying, which tells
MSAN to propagate initialized-ness but not check for it.
Caveat: technically __msan_memmove is not a public API of MSAN,
although the C compiler does generate direct calls to it.
Also, when instrumenting a load of a struct, split the
instrumentation to fields, instead of generating an msanread for
the whole struct. This skips padding bytes, which may not be
considered initialized in MSAN.
Fixes #42820.
Change-Id: Id861c8bbfd94cfcccefcc58eaf9e4eb43b4d85c6
Reviewed-on: https://go-review.googlesource.com/c/go/+/270859
Trust: Cherry Zhang <cherryyz@google.com>
Run-TryBot: Cherry Zhang <cherryyz@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
2020-11-16 19:28:26 -07:00
|
|
|
// func runtime·msanmove(dst, src unsafe.Pointer, sz uintptr)
|
|
|
|
TEXT runtime·msanmove(SB), NOSPLIT, $0-24
|
|
|
|
MOVQ dst+0(FP), RARG0
|
|
|
|
MOVQ src+8(FP), RARG1
|
2023-12-13 14:18:07 -07:00
|
|
|
MOVQ sz+16(FP), RARG2
|
cmd/compile, runtime: use __msan_memmove for moving data, split msanread to fields
Currently, for data moving, we generate an msanread of the source,
followed by an msanwrite of the destination. msanread checks
the source is initialized.
This has a problem: if the source is an aggregate type containing
alignment paddings, the padding bytes may not be thought as
initialized by MSAN. If we copy the aggregate type by value, if
it counts as a read, MSAN reports using uninitialized data. This
CL changes it to use __msan_memmove for data copying, which tells
MSAN to propagate initialized-ness but not check for it.
Caveat: technically __msan_memmove is not a public API of MSAN,
although the C compiler does generate direct calls to it.
Also, when instrumenting a load of a struct, split the
instrumentation to fields, instead of generating an msanread for
the whole struct. This skips padding bytes, which may not be
considered initialized in MSAN.
Fixes #42820.
Change-Id: Id861c8bbfd94cfcccefcc58eaf9e4eb43b4d85c6
Reviewed-on: https://go-review.googlesource.com/c/go/+/270859
Trust: Cherry Zhang <cherryyz@google.com>
Run-TryBot: Cherry Zhang <cherryyz@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
2020-11-16 19:28:26 -07:00
|
|
|
// void __msan_memmove(void *dst, void *src, uintptr_t sz);
|
|
|
|
MOVQ $__msan_memmove(SB), AX
|
|
|
|
JMP msancall<>(SB)
|
|
|
|
|
2015-10-21 10:45:27 -06:00
|
|
|
// Switches SP to g0 stack and calls (AX). Arguments already set.
|
|
|
|
TEXT msancall<>(SB), NOSPLIT, $0-0
|
|
|
|
get_tls(R12)
|
|
|
|
MOVQ g(R12), R14
|
2016-11-09 13:28:24 -07:00
|
|
|
MOVQ SP, R12 // callee-saved, preserved across the CALL
|
|
|
|
CMPQ R14, $0
|
|
|
|
JE call // no g; still on a system stack
|
|
|
|
|
2015-10-21 10:45:27 -06:00
|
|
|
MOVQ g_m(R14), R13
|
|
|
|
// Switch to g0 stack.
|
|
|
|
MOVQ m_g0(R13), R10
|
|
|
|
CMPQ R10, R14
|
|
|
|
JE call // already on g0
|
2016-11-09 13:28:24 -07:00
|
|
|
|
2015-10-21 10:45:27 -06:00
|
|
|
MOVQ (g_sched+gobuf_sp)(R10), SP
|
|
|
|
call:
|
|
|
|
ANDQ $~15, SP // alignment for gcc ABI
|
|
|
|
CALL AX
|
|
|
|
MOVQ R12, SP
|
|
|
|
RET
|