diff --git a/go/analysis/cmd/vet/vet.go b/go/analysis/cmd/vet/vet.go index d00fb64659..c149315bf0 100644 --- a/go/analysis/cmd/vet/vet.go +++ b/go/analysis/cmd/vet/vet.go @@ -20,6 +20,7 @@ import ( "golang.org/x/tools/go/analysis/passes/asmdecl" "golang.org/x/tools/go/analysis/passes/assign" "golang.org/x/tools/go/analysis/passes/atomic" + "golang.org/x/tools/go/analysis/passes/atomicalign" "golang.org/x/tools/go/analysis/passes/bools" "golang.org/x/tools/go/analysis/passes/buildtag" "golang.org/x/tools/go/analysis/passes/cgocall" @@ -51,6 +52,7 @@ func main() { asmdecl.Analyzer, assign.Analyzer, atomic.Analyzer, + atomicalign.Analyzer, bools.Analyzer, buildtag.Analyzer, cgocall.Analyzer, diff --git a/go/analysis/passes/atomicalign/atomicalign.go b/go/analysis/passes/atomicalign/atomicalign.go new file mode 100644 index 0000000000..d3fc3e2d73 --- /dev/null +++ b/go/analysis/passes/atomicalign/atomicalign.go @@ -0,0 +1,126 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package atomicalign defines an Analyzer that checks for non-64-bit-aligned +// arguments to sync/atomic functions. On non-32-bit platforms, those functions +// panic if their argument variables are not 64-bit aligned. It is therefore +// the caller's responsibility to arrange for 64-bit alignment of such variables. +// See https://golang.org/pkg/sync/atomic/#pkg-note-BUG +package atomicalign + +import ( + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +var Analyzer = &analysis.Analyzer{ + Name: "atomicalign", + Doc: "check for non-64-bits-aligned arguments to sync/atomic functions", + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + if 8*pass.TypesSizes.Sizeof(types.Typ[types.Uintptr]) == 64 { + return nil, nil // 64-bit platform + } + if imports(pass.Pkg, "sync/atomic") == nil { + return nil, nil // doesn't directly import sync/atomic + } + + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter := []ast.Node{ + (*ast.CallExpr)(nil), + } + + inspect.Preorder(nodeFilter, func(node ast.Node) { + call := node.(*ast.CallExpr) + sel, ok := call.Fun.(*ast.SelectorExpr) + if !ok { + return + } + pkgIdent, ok := sel.X.(*ast.Ident) + if !ok { + return + } + pkgName, ok := pass.TypesInfo.Uses[pkgIdent].(*types.PkgName) + if !ok || pkgName.Imported().Path() != "sync/atomic" { + return + } + + switch sel.Sel.Name { + case "AddInt64", "AddUint64", + "LoadInt64", "LoadUint64", + "StoreInt64", "StoreUint64", + "SwapInt64", "SwapUint64", + "CompareAndSwapInt64", "CompareAndSwapUint64": + + // For all the listed functions, the expression to check is always the first function argument. + check64BitAlignment(pass, sel.Sel.Name, call.Args[0]) + } + }) + + return nil, nil +} + +func check64BitAlignment(pass *analysis.Pass, funcName string, arg ast.Expr) { + // Checks the argument is made of the address operator (&) applied to + // to a struct field (as opposed to a variable as the first word of + // uint64 and int64 variables can be relied upon to be 64-bit aligned. + unary, ok := arg.(*ast.UnaryExpr) + if !ok || unary.Op != token.AND { + return + } + + // Retrieve the types.Struct in order to get the offset of the + // atomically accessed field. + sel, ok := unary.X.(*ast.SelectorExpr) + if !ok { + return + } + tvar, ok := pass.TypesInfo.Selections[sel].Obj().(*types.Var) + if !ok || !tvar.IsField() { + return + } + + stype, ok := pass.TypesInfo.Types[sel.X].Type.Underlying().(*types.Struct) + if !ok { + return + } + + var offset int64 + var fields []*types.Var + for i := 0; i < stype.NumFields(); i++ { + f := stype.Field(i) + fields = append(fields, f) + if f == tvar { + // We're done, this is the field we were looking for, + // no need to fill the fields slice further. + offset = pass.TypesSizes.Offsetsof(fields)[i] + break + } + } + if offset&7 == 0 { + return // 64-bit aligned + } + + pass.Reportf(arg.Pos(), "address of non 64-bit aligned field .%s passed to atomic.%s", tvar.Name(), funcName) +} + +// imports reports whether pkg has path among its direct imports. +// It returns the imported package if so, or nil if not. +// copied from passes/cgocall. +func imports(pkg *types.Package, path string) *types.Package { + for _, imp := range pkg.Imports() { + if imp.Path() == path { + return imp + } + } + return nil +} diff --git a/go/analysis/passes/atomicalign/atomicalign_test.go b/go/analysis/passes/atomicalign/atomicalign_test.go new file mode 100644 index 0000000000..44dd93358f --- /dev/null +++ b/go/analysis/passes/atomicalign/atomicalign_test.go @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package atomicalign_test + +import ( + "testing" + + "golang.org/x/tools/go/analysis/analysistest" + "golang.org/x/tools/go/analysis/passes/atomicalign" +) + +func Test(t *testing.T) { + testdata := analysistest.TestData() + analysistest.Run(t, testdata, atomicalign.Analyzer, "a", "b") +} diff --git a/go/analysis/passes/atomicalign/testdata/src/a/a.go b/go/analysis/passes/atomicalign/testdata/src/a/a.go new file mode 100644 index 0000000000..45dd73d3ac --- /dev/null +++ b/go/analysis/passes/atomicalign/testdata/src/a/a.go @@ -0,0 +1,230 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains tests for the atomic alignment checker. + +// +build arm 386 + +package testdata + +import ( + "io" + "sync/atomic" +) + +func intsAlignment() { + var s struct { + a bool + b uint8 + c int8 + d byte + f int16 + g int16 + h int64 + i byte + j uint64 + } + atomic.AddInt64(&s.h, 9) + atomic.AddUint64(&s.j, 0) // want "address of non 64-bit aligned field .j passed to atomic.AddUint64" +} + +func floatAlignment() { + var s struct { + a float32 + b int64 + c float32 + d float64 + e uint64 + } + atomic.LoadInt64(&s.b) // want "address of non 64-bit aligned field .b passed to atomic.LoadInt64" + atomic.LoadUint64(&s.e) +} + +func uintptrAlignment() { + var s struct { + a uintptr + b int64 + c int + d uint + e int32 + f uint64 + } + atomic.StoreInt64(&s.b, 0) // want "address of non 64-bit aligned field .b passed to atomic.StoreInt64" + atomic.StoreUint64(&s.f, 0) +} + +func runeAlignment() { + var s struct { + a rune + b int64 + _ rune + c uint64 + } + atomic.SwapInt64(&s.b, 0) // want "address of non 64-bit aligned field .b passed to atomic.SwapInt64" + atomic.SwapUint64(&s.c, 0) +} + +func complexAlignment() { + var s struct { + a complex64 + b int64 + c complex128 + d uint64 + } + atomic.CompareAndSwapInt64(&s.b, 0, 1) + atomic.CompareAndSwapUint64(&s.d, 0, 1) +} + +// continuer ici avec les tests + +func channelAlignment() { + var a struct { + a chan struct{} + b int64 + c <-chan struct{} + d uint64 + } + + atomic.AddInt64(&a.b, 0) // want "address of non 64-bit aligned field .b passed to atomic.AddInt64" + atomic.AddUint64(&a.d, 0) +} + +func arrayAlignment() { + var a struct { + a [1]uint16 + b int64 + _ [2]uint16 + c int64 + d [1]uint16 + e uint64 + } + + atomic.LoadInt64(&a.b) // want "address of non 64-bit aligned field .b passed to atomic.LoadInt64" + atomic.LoadInt64(&a.c) + atomic.LoadUint64(&a.e) // want "address of non 64-bit aligned field .e passed to atomic.LoadUint64" +} + +func anonymousFieldAlignment() { + var f struct { + a, b int32 + c, d int64 + _ bool + e, f uint64 + } + + atomic.StoreInt64(&f.c, 12) + atomic.StoreInt64(&f.d, 27) + atomic.StoreUint64(&f.e, 6) // want "address of non 64-bit aligned field .e passed to atomic.StoreUint64" + atomic.StoreUint64(&f.f, 79) // want "address of non 64-bit aligned field .f passed to atomic.StoreUint64" +} + +type ts struct { + e int64 + e2 []int + f uint64 +} + +func typedStructAlignment() { + var b ts + atomic.SwapInt64(&b.e, 9) + atomic.SwapUint64(&b.f, 9) // want "address of non 64-bit aligned field .f passed to atomic.SwapUint64" +} + +func aliasAlignment() { + type ( + mybytea uint8 + mybyteb byte + mybytec = uint8 + mybyted = byte + ) + + var e struct { + a byte + b mybytea + c mybyteb + e mybytec + f int64 + g, h uint16 + i uint64 + } + + atomic.CompareAndSwapInt64(&e.f, 0, 1) // want "address of non 64-bit aligned field .f passed to atomic.CompareAndSwapInt64" + atomic.CompareAndSwapUint64(&e.i, 1, 2) +} + +func stringAlignment() { + var a struct { + a uint32 + b string + c int64 + } + atomic.AddInt64(&a.c, 10) // want "address of non 64-bit aligned field .c passed to atomic.AddInt64" +} + +func sliceAlignment() { + var s struct { + a []int32 + b int64 + c uint32 + d uint64 + } + + atomic.LoadInt64(&s.b) // want "address of non 64-bit aligned field .b passed to atomic.LoadInt64" + atomic.LoadUint64(&s.d) +} + +func interfaceAlignment() { + var s struct { + a interface{} + b int64 + c io.Writer + e int64 + _ int32 + f uint64 + } + + atomic.StoreInt64(&s.b, 9) + atomic.StoreInt64(&s.e, 9) + atomic.StoreUint64(&s.f, 9) // want "address of non 64-bit aligned field .f passed to atomic.StoreUint64" +} + +func pointerAlignment() { + var s struct { + a, b *int + c int64 + d *interface{} + e uint64 + } + + atomic.SwapInt64(&s.c, 9) + atomic.SwapUint64(&s.e, 9) // want "address of non 64-bit aligned field .e passed to atomic.SwapUint64" +} + +// non-struct fields are already 64-bits correctly aligned per Go spec +func nonStructFields() { + var ( + a *int64 + b [2]uint64 + c int64 + ) + + atomic.CompareAndSwapInt64(a, 10, 11) + atomic.CompareAndSwapUint64(&b[0], 5, 23) + atomic.CompareAndSwapInt64(&c, -1, -15) +} + +func embeddedStructFields() { + var s1 struct { + _ struct{ _ int32 } + a int64 + _ struct{} + b uint64 + _ struct{ _ [2]uint16 } + c int64 + } + + atomic.AddInt64(&s1.a, 9) // want "address of non 64-bit aligned field .a passed to atomic.AddInt64" + atomic.AddUint64(&s1.b, 9) // want "address of non 64-bit aligned field .b passed to atomic.AddUint64" + atomic.AddInt64(&s1.c, 9) +} diff --git a/go/analysis/passes/atomicalign/testdata/src/a/stub.go b/go/analysis/passes/atomicalign/testdata/src/a/stub.go new file mode 100644 index 0000000000..09454f59ae --- /dev/null +++ b/go/analysis/passes/atomicalign/testdata/src/a/stub.go @@ -0,0 +1,7 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is only here to not trigger "build constraints exclude all Go files" during tests + +package testdata diff --git a/go/analysis/passes/atomicalign/testdata/src/b/b.go b/go/analysis/passes/atomicalign/testdata/src/b/b.go new file mode 100644 index 0000000000..adb352aafb --- /dev/null +++ b/go/analysis/passes/atomicalign/testdata/src/b/b.go @@ -0,0 +1,19 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !arm,!386 + +package testdata + +import ( + "sync/atomic" +) + +func nonAffectedArchs() { + var s struct { + _ bool + a uint64 + } + atomic.SwapUint64(&s.a, 9) // ok on 64-bit architectures +} diff --git a/go/analysis/passes/atomicalign/testdata/src/b/stub.go b/go/analysis/passes/atomicalign/testdata/src/b/stub.go new file mode 100644 index 0000000000..09454f59ae --- /dev/null +++ b/go/analysis/passes/atomicalign/testdata/src/b/stub.go @@ -0,0 +1,7 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is only here to not trigger "build constraints exclude all Go files" during tests + +package testdata