mirror of
https://github.com/golang/go
synced 2024-11-07 12:46:16 -07:00
07cba70d57
Currently, for data moving, we generate an msanread of the source, followed by an msanwrite of the destination. msanread checks the source is initialized. This has a problem: if the source is an aggregate type containing alignment paddings, the padding bytes may not be thought as initialized by MSAN. If we copy the aggregate type by value, if it counts as a read, MSAN reports using uninitialized data. This CL changes it to use __msan_memmove for data copying, which tells MSAN to propagate initialized-ness but not check for it. Caveat: technically __msan_memmove is not a public API of MSAN, although the C compiler does generate direct calls to it. Also, when instrumenting a load of a struct, split the instrumentation to fields, instead of generating an msanread for the whole struct. This skips padding bytes, which may not be considered initialized in MSAN. Fixes #42820. Change-Id: Id861c8bbfd94cfcccefcc58eaf9e4eb43b4d85c6 Reviewed-on: https://go-review.googlesource.com/c/go/+/270859 Trust: Cherry Zhang <cherryyz@google.com> Run-TryBot: Cherry Zhang <cherryyz@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com>
90 lines
2.3 KiB
ArmAsm
90 lines
2.3 KiB
ArmAsm
// Copyright 2015 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
// +build msan
|
|
|
|
#include "go_asm.h"
|
|
#include "go_tls.h"
|
|
#include "funcdata.h"
|
|
#include "textflag.h"
|
|
|
|
// This is like race_amd64.s, but for the msan calls.
|
|
// See race_amd64.s for detailed comments.
|
|
|
|
#ifdef GOOS_windows
|
|
#define RARG0 CX
|
|
#define RARG1 DX
|
|
#define RARG2 R8
|
|
#define RARG3 R9
|
|
#else
|
|
#define RARG0 DI
|
|
#define RARG1 SI
|
|
#define RARG2 DX
|
|
#define RARG3 CX
|
|
#endif
|
|
|
|
// func runtime·domsanread(addr unsafe.Pointer, sz uintptr)
|
|
// Called from msanread.
|
|
TEXT runtime·domsanread(SB), NOSPLIT, $0-16
|
|
MOVQ addr+0(FP), RARG0
|
|
MOVQ size+8(FP), RARG1
|
|
// void __msan_read_go(void *addr, uintptr_t sz);
|
|
MOVQ $__msan_read_go(SB), AX
|
|
JMP msancall<>(SB)
|
|
|
|
// func runtime·msanwrite(addr unsafe.Pointer, sz uintptr)
|
|
// Called from instrumented code.
|
|
TEXT runtime·msanwrite(SB), NOSPLIT, $0-16
|
|
MOVQ addr+0(FP), RARG0
|
|
MOVQ size+8(FP), RARG1
|
|
// void __msan_write_go(void *addr, uintptr_t sz);
|
|
MOVQ $__msan_write_go(SB), AX
|
|
JMP msancall<>(SB)
|
|
|
|
// func runtime·msanmalloc(addr unsafe.Pointer, sz uintptr)
|
|
TEXT runtime·msanmalloc(SB), NOSPLIT, $0-16
|
|
MOVQ addr+0(FP), RARG0
|
|
MOVQ size+8(FP), RARG1
|
|
// void __msan_malloc_go(void *addr, uintptr_t sz);
|
|
MOVQ $__msan_malloc_go(SB), AX
|
|
JMP msancall<>(SB)
|
|
|
|
// func runtime·msanfree(addr unsafe.Pointer, sz uintptr)
|
|
TEXT runtime·msanfree(SB), NOSPLIT, $0-16
|
|
MOVQ addr+0(FP), RARG0
|
|
MOVQ size+8(FP), RARG1
|
|
// void __msan_free_go(void *addr, uintptr_t sz);
|
|
MOVQ $__msan_free_go(SB), AX
|
|
JMP msancall<>(SB)
|
|
|
|
// func runtime·msanmove(dst, src unsafe.Pointer, sz uintptr)
|
|
TEXT runtime·msanmove(SB), NOSPLIT, $0-24
|
|
MOVQ dst+0(FP), RARG0
|
|
MOVQ src+8(FP), RARG1
|
|
MOVQ size+16(FP), RARG2
|
|
// void __msan_memmove(void *dst, void *src, uintptr_t sz);
|
|
MOVQ $__msan_memmove(SB), AX
|
|
JMP msancall<>(SB)
|
|
|
|
// Switches SP to g0 stack and calls (AX). Arguments already set.
|
|
TEXT msancall<>(SB), NOSPLIT, $0-0
|
|
get_tls(R12)
|
|
MOVQ g(R12), R14
|
|
MOVQ SP, R12 // callee-saved, preserved across the CALL
|
|
CMPQ R14, $0
|
|
JE call // no g; still on a system stack
|
|
|
|
MOVQ g_m(R14), R13
|
|
// Switch to g0 stack.
|
|
MOVQ m_g0(R13), R10
|
|
CMPQ R10, R14
|
|
JE call // already on g0
|
|
|
|
MOVQ (g_sched+gobuf_sp)(R10), SP
|
|
call:
|
|
ANDQ $~15, SP // alignment for gcc ABI
|
|
CALL AX
|
|
MOVQ R12, SP
|
|
RET
|