Skip to content

Commit 8628bf9

Browse files
committed
cmd/compile: resurrect clobberdead mode
This CL resurrects the clobberdead debugging mode (CL 23924). When -clobberdead flag is set (TODO: make it GOEXPERIMENT?), the compiler inserts code that clobbers all dead stack slots that contains pointers. Mark windows syscall functions cgo_unsafe_args, as the code actually does that, by taking the address of one argument and passing it to cgocall. Change-Id: Ie09a015f4bd14ae6053cc707866e30ae509b9d6f Reviewed-on: https://go-review.googlesource.com/c/go/+/301791 Trust: Cherry Zhang <cherryyz@google.com> Run-TryBot: Cherry Zhang <cherryyz@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Than McIntosh <thanm@google.com>
1 parent 0bd308f commit 8628bf9

File tree

5 files changed

+251
-3
lines changed

5 files changed

+251
-3
lines changed

src/cmd/compile/internal/base/flag.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,7 @@ type CmdFlags struct {
9090
BuildID string "help:\"record `id` as the build id in the export metadata\""
9191
CPUProfile string "help:\"write cpu profile to `file`\""
9292
Complete bool "help:\"compiling complete package (no C or assembly)\""
93+
ClobberDead bool "help:\"clobber dead stack slots (for debugging)\""
9394
Dwarf bool "help:\"generate DWARF symbols\""
9495
DwarfBASEntries *bool "help:\"use base address selection entries in DWARF\"" // &Ctxt.UseBASEntries, set below
9596
DwarfLocationLists *bool "help:\"add location lists to DWARF in optimized mode\"" // &Ctxt.Flag_locationlists, set below

src/cmd/compile/internal/liveness/plive.go

Lines changed: 162 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,9 @@ package liveness
1616

1717
import (
1818
"crypto/md5"
19+
"crypto/sha1"
1920
"fmt"
21+
"os"
2022
"sort"
2123
"strings"
2224

@@ -30,6 +32,7 @@ import (
3032
"cmd/compile/internal/types"
3133
"cmd/internal/obj"
3234
"cmd/internal/objabi"
35+
"cmd/internal/src"
3336
)
3437

3538
// OpVarDef is an annotation for the liveness analysis, marking a place
@@ -123,9 +126,9 @@ type liveness struct {
123126
unsafePoints bitvec.BitVec
124127

125128
// An array with a bit vector for each safe point in the
126-
// current Block during Liveness.epilogue. Indexed in Value
129+
// current Block during liveness.epilogue. Indexed in Value
127130
// order for that block. Additionally, for the entry block
128-
// livevars[0] is the entry bitmap. Liveness.compact moves
131+
// livevars[0] is the entry bitmap. liveness.compact moves
129132
// these to stackMaps.
130133
livevars []bitvec.BitVec
131134

@@ -136,6 +139,8 @@ type liveness struct {
136139
stackMaps []bitvec.BitVec
137140

138141
cache progeffectscache
142+
143+
doClobber bool // Whether to clobber dead stack slots in this function.
139144
}
140145

141146
// Map maps from *ssa.Value to LivenessIndex.
@@ -387,6 +392,9 @@ func newliveness(fn *ir.Func, f *ssa.Func, vars []*ir.Name, idx map[*ir.Name]int
387392
lv.livenessMap.reset()
388393

389394
lv.markUnsafePoints()
395+
396+
lv.enableClobber()
397+
390398
return lv
391399
}
392400

@@ -820,6 +828,10 @@ func (lv *liveness) epilogue() {
820828
live.Or(*live, liveout)
821829
}
822830

831+
if lv.doClobber {
832+
lv.clobber(b)
833+
}
834+
823835
// The liveness maps for this block are now complete. Compact them.
824836
lv.compact(b)
825837
}
@@ -873,7 +885,7 @@ func (lv *liveness) compact(b *ssa.Block) {
873885
}
874886
for _, v := range b.Values {
875887
hasStackMap := lv.hasStackMap(v)
876-
isUnsafePoint := lv.allUnsafe || lv.unsafePoints.Get(int32(v.ID))
888+
isUnsafePoint := lv.allUnsafe || v.Op != ssa.OpClobber && lv.unsafePoints.Get(int32(v.ID))
877889
idx := objw.LivenessIndex{StackMapIndex: objw.StackMapDontCare, IsUnsafePoint: isUnsafePoint}
878890
if hasStackMap {
879891
idx.StackMapIndex = lv.stackMapSet.add(lv.livevars[pos])
@@ -888,6 +900,153 @@ func (lv *liveness) compact(b *ssa.Block) {
888900
lv.livevars = lv.livevars[:0]
889901
}
890902

903+
func (lv *liveness) enableClobber() {
904+
// The clobberdead experiment inserts code to clobber pointer slots in all
905+
// the dead variables (locals and args) at every synchronous safepoint.
906+
if !base.Flag.ClobberDead {
907+
return
908+
}
909+
if lv.fn.Pragma&ir.CgoUnsafeArgs != 0 {
910+
// C or assembly code uses the exact frame layout. Don't clobber.
911+
return
912+
}
913+
if len(lv.vars) > 10000 || len(lv.f.Blocks) > 10000 {
914+
// Be careful to avoid doing too much work.
915+
// Bail if >10000 variables or >10000 blocks.
916+
// Otherwise, giant functions make this experiment generate too much code.
917+
return
918+
}
919+
if lv.f.Name == "forkAndExecInChild" || lv.f.Name == "wbBufFlush" {
920+
// forkAndExecInChild calls vfork on some platforms.
921+
// The code we add here clobbers parts of the stack in the child.
922+
// When the parent resumes, it is using the same stack frame. But the
923+
// child has clobbered stack variables that the parent needs. Boom!
924+
// In particular, the sys argument gets clobbered.
925+
//
926+
// runtime.wbBufFlush must not modify its arguments. See the comments
927+
// in runtime/mwbbuf.go:wbBufFlush.
928+
return
929+
}
930+
if h := os.Getenv("GOCLOBBERDEADHASH"); h != "" {
931+
// Clobber only functions where the hash of the function name matches a pattern.
932+
// Useful for binary searching for a miscompiled function.
933+
hstr := ""
934+
for _, b := range sha1.Sum([]byte(lv.f.Name)) {
935+
hstr += fmt.Sprintf("%08b", b)
936+
}
937+
if !strings.HasSuffix(hstr, h) {
938+
return
939+
}
940+
fmt.Printf("\t\t\tCLOBBERDEAD %s\n", lv.f.Name)
941+
}
942+
lv.doClobber = true
943+
}
944+
945+
// Inserts code to clobber pointer slots in all the dead variables (locals and args)
946+
// at every synchronous safepoint in b.
947+
func (lv *liveness) clobber(b *ssa.Block) {
948+
// Copy block's values to a temporary.
949+
oldSched := append([]*ssa.Value{}, b.Values...)
950+
b.Values = b.Values[:0]
951+
idx := 0
952+
953+
// Clobber pointer slots in all dead variables at entry.
954+
if b == lv.f.Entry {
955+
for len(oldSched) > 0 && len(oldSched[0].Args) == 0 {
956+
// Skip argless ops. We need to skip at least
957+
// the lowered ClosurePtr op, because it
958+
// really wants to be first. This will also
959+
// skip ops like InitMem and SP, which are ok.
960+
b.Values = append(b.Values, oldSched[0])
961+
oldSched = oldSched[1:]
962+
}
963+
clobber(lv, b, lv.livevars[0])
964+
idx++
965+
}
966+
967+
// Copy values into schedule, adding clobbering around safepoints.
968+
for _, v := range oldSched {
969+
if !lv.hasStackMap(v) {
970+
b.Values = append(b.Values, v)
971+
continue
972+
}
973+
clobber(lv, b, lv.livevars[idx])
974+
b.Values = append(b.Values, v)
975+
idx++
976+
}
977+
}
978+
979+
// clobber generates code to clobber pointer slots in all dead variables
980+
// (those not marked in live). Clobbering instructions are added to the end
981+
// of b.Values.
982+
func clobber(lv *liveness, b *ssa.Block, live bitvec.BitVec) {
983+
for i, n := range lv.vars {
984+
if !live.Get(int32(i)) && !n.Addrtaken() {
985+
// Don't clobber stack objects (address-taken). They are
986+
// tracked dynamically.
987+
clobberVar(b, n)
988+
}
989+
}
990+
}
991+
992+
// clobberVar generates code to trash the pointers in v.
993+
// Clobbering instructions are added to the end of b.Values.
994+
func clobberVar(b *ssa.Block, v *ir.Name) {
995+
clobberWalk(b, v, 0, v.Type())
996+
}
997+
998+
// b = block to which we append instructions
999+
// v = variable
1000+
// offset = offset of (sub-portion of) variable to clobber (in bytes)
1001+
// t = type of sub-portion of v.
1002+
func clobberWalk(b *ssa.Block, v *ir.Name, offset int64, t *types.Type) {
1003+
if !t.HasPointers() {
1004+
return
1005+
}
1006+
switch t.Kind() {
1007+
case types.TPTR,
1008+
types.TUNSAFEPTR,
1009+
types.TFUNC,
1010+
types.TCHAN,
1011+
types.TMAP:
1012+
clobberPtr(b, v, offset)
1013+
1014+
case types.TSTRING:
1015+
// struct { byte *str; int len; }
1016+
clobberPtr(b, v, offset)
1017+
1018+
case types.TINTER:
1019+
// struct { Itab *tab; void *data; }
1020+
// or, when isnilinter(t)==true:
1021+
// struct { Type *type; void *data; }
1022+
clobberPtr(b, v, offset)
1023+
clobberPtr(b, v, offset+int64(types.PtrSize))
1024+
1025+
case types.TSLICE:
1026+
// struct { byte *array; int len; int cap; }
1027+
clobberPtr(b, v, offset)
1028+
1029+
case types.TARRAY:
1030+
for i := int64(0); i < t.NumElem(); i++ {
1031+
clobberWalk(b, v, offset+i*t.Elem().Size(), t.Elem())
1032+
}
1033+
1034+
case types.TSTRUCT:
1035+
for _, t1 := range t.Fields().Slice() {
1036+
clobberWalk(b, v, offset+t1.Offset, t1.Type)
1037+
}
1038+
1039+
default:
1040+
base.Fatalf("clobberWalk: unexpected type, %v", t)
1041+
}
1042+
}
1043+
1044+
// clobberPtr generates a clobber of the pointer at offset offset in v.
1045+
// The clobber instruction is added at the end of b.
1046+
func clobberPtr(b *ssa.Block, v *ir.Name, offset int64) {
1047+
b.NewValue0IA(src.NoXPos, ssa.OpClobber, types.TypeVoid, offset, v)
1048+
}
1049+
8911050
func (lv *liveness) showlive(v *ssa.Value, live bitvec.BitVec) {
8921051
if base.Flag.Live == 0 || ir.FuncName(lv.fn) == "init" || strings.HasPrefix(ir.FuncName(lv.fn), ".") {
8931052
return
Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
// Copyright 2021 The Go Authors. All rights reserved.
2+
// Use of this source code is governed by a BSD-style
3+
// license that can be found in the LICENSE file.
4+
5+
package test
6+
7+
import (
8+
"internal/testenv"
9+
"io/ioutil"
10+
"os/exec"
11+
"path/filepath"
12+
"testing"
13+
)
14+
15+
const helloSrc = `
16+
package main
17+
import "fmt"
18+
func main() { fmt.Println("hello") }
19+
`
20+
21+
func TestClobberDead(t *testing.T) {
22+
// Test that clobberdead mode generates correct program.
23+
24+
if testing.Short() {
25+
// This test rebuilds the runtime with a special flag, which
26+
// takes a while.
27+
t.Skip("skip in short mode")
28+
}
29+
testenv.MustHaveGoRun(t)
30+
t.Parallel()
31+
32+
tmpdir := t.TempDir()
33+
src := filepath.Join(tmpdir, "x.go")
34+
err := ioutil.WriteFile(src, []byte(helloSrc), 0644)
35+
if err != nil {
36+
t.Fatalf("write file failed: %v", err)
37+
}
38+
39+
cmd := exec.Command(testenv.GoToolPath(t), "run", "-gcflags=all=-clobberdead", src)
40+
out, err := cmd.CombinedOutput()
41+
if err != nil {
42+
t.Fatalf("go run failed: %v\n%s", err, out)
43+
}
44+
if string(out) != "hello\n" {
45+
t.Errorf("wrong output: got %q, want %q", out, "hello\n")
46+
}
47+
}

src/runtime/syscall_windows.go

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -263,6 +263,7 @@ const _LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x00000800
263263
// to the full path inside of system32 for use with vanilla LoadLibrary.
264264
//go:linkname syscall_loadsystemlibrary syscall.loadsystemlibrary
265265
//go:nosplit
266+
//go:cgo_unsafe_args
266267
func syscall_loadsystemlibrary(filename *uint16, absoluteFilepath *uint16) (handle, err uintptr) {
267268
lockOSThread()
268269
c := &getg().m.syscall
@@ -293,6 +294,7 @@ func syscall_loadsystemlibrary(filename *uint16, absoluteFilepath *uint16) (hand
293294

294295
//go:linkname syscall_loadlibrary syscall.loadlibrary
295296
//go:nosplit
297+
//go:cgo_unsafe_args
296298
func syscall_loadlibrary(filename *uint16) (handle, err uintptr) {
297299
lockOSThread()
298300
defer unlockOSThread()
@@ -310,6 +312,7 @@ func syscall_loadlibrary(filename *uint16) (handle, err uintptr) {
310312

311313
//go:linkname syscall_getprocaddress syscall.getprocaddress
312314
//go:nosplit
315+
//go:cgo_unsafe_args
313316
func syscall_getprocaddress(handle uintptr, procname *byte) (outhandle, err uintptr) {
314317
lockOSThread()
315318
defer unlockOSThread()
@@ -327,6 +330,7 @@ func syscall_getprocaddress(handle uintptr, procname *byte) (outhandle, err uint
327330

328331
//go:linkname syscall_Syscall syscall.Syscall
329332
//go:nosplit
333+
//go:cgo_unsafe_args
330334
func syscall_Syscall(fn, nargs, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
331335
lockOSThread()
332336
defer unlockOSThread()
@@ -340,6 +344,7 @@ func syscall_Syscall(fn, nargs, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
340344

341345
//go:linkname syscall_Syscall6 syscall.Syscall6
342346
//go:nosplit
347+
//go:cgo_unsafe_args
343348
func syscall_Syscall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
344349
lockOSThread()
345350
defer unlockOSThread()
@@ -353,6 +358,7 @@ func syscall_Syscall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err ui
353358

354359
//go:linkname syscall_Syscall9 syscall.Syscall9
355360
//go:nosplit
361+
//go:cgo_unsafe_args
356362
func syscall_Syscall9(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, err uintptr) {
357363
lockOSThread()
358364
defer unlockOSThread()
@@ -366,6 +372,7 @@ func syscall_Syscall9(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1
366372

367373
//go:linkname syscall_Syscall12 syscall.Syscall12
368374
//go:nosplit
375+
//go:cgo_unsafe_args
369376
func syscall_Syscall12(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12 uintptr) (r1, r2, err uintptr) {
370377
lockOSThread()
371378
defer unlockOSThread()
@@ -379,6 +386,7 @@ func syscall_Syscall12(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11,
379386

380387
//go:linkname syscall_Syscall15 syscall.Syscall15
381388
//go:nosplit
389+
//go:cgo_unsafe_args
382390
func syscall_Syscall15(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) {
383391
lockOSThread()
384392
defer unlockOSThread()
@@ -392,6 +400,7 @@ func syscall_Syscall15(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11,
392400

393401
//go:linkname syscall_Syscall18 syscall.Syscall18
394402
//go:nosplit
403+
//go:cgo_unsafe_args
395404
func syscall_Syscall18(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 uintptr) (r1, r2, err uintptr) {
396405
lockOSThread()
397406
defer unlockOSThread()

test/codegen/clobberdead.go

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
// asmcheck -gcflags=-clobberdead
2+
3+
// +build amd64
4+
5+
// Copyright 2021 The Go Authors. All rights reserved.
6+
// Use of this source code is governed by a BSD-style
7+
// license that can be found in the LICENSE file.
8+
9+
package codegen
10+
11+
type T [2]*int // contain pointer, not SSA-able (so locals are not registerized)
12+
13+
var p1, p2, p3 T
14+
15+
func F() {
16+
// 3735936685 is 0xdeaddead
17+
// clobber x, y at entry. not clobber z (stack object).
18+
// amd64:`MOVL\t\$3735936685, ""\.x`, `MOVL\t\$3735936685, ""\.y`, -`MOVL\t\$3735936685, ""\.z`
19+
x, y, z := p1, p2, p3
20+
addrTaken(&z)
21+
// x is dead at the call (the value of x is loaded before the CALL), y is not
22+
// amd64:`MOVL\t\$3735936685, ""\.x`, -`MOVL\t\$3735936685, ""\.y`
23+
use(x)
24+
// amd64:`MOVL\t\$3735936685, ""\.x`, `MOVL\t\$3735936685, ""\.y`
25+
use(y)
26+
}
27+
28+
//go:noinline
29+
func use(T) {}
30+
31+
//go:noinline
32+
func addrTaken(*T) {}

0 commit comments

Comments
 (0)