@@ -16,7 +16,9 @@ package liveness
1616
1717import (
1818 "crypto/md5"
19+ "crypto/sha1"
1920 "fmt"
21+ "os"
2022 "sort"
2123 "strings"
2224
@@ -30,6 +32,7 @@ import (
3032 "cmd/compile/internal/types"
3133 "cmd/internal/obj"
3234 "cmd/internal/objabi"
35+ "cmd/internal/src"
3336)
3437
3538// OpVarDef is an annotation for the liveness analysis, marking a place
@@ -123,9 +126,9 @@ type liveness struct {
123126 unsafePoints bitvec.BitVec
124127
125128 // An array with a bit vector for each safe point in the
126- // current Block during Liveness .epilogue. Indexed in Value
129+ // current Block during liveness .epilogue. Indexed in Value
127130 // order for that block. Additionally, for the entry block
128- // livevars[0] is the entry bitmap. Liveness .compact moves
131+ // livevars[0] is the entry bitmap. liveness .compact moves
129132 // these to stackMaps.
130133 livevars []bitvec.BitVec
131134
@@ -136,6 +139,8 @@ type liveness struct {
136139 stackMaps []bitvec.BitVec
137140
138141 cache progeffectscache
142+
143+ doClobber bool // Whether to clobber dead stack slots in this function.
139144}
140145
141146// Map maps from *ssa.Value to LivenessIndex.
@@ -387,6 +392,9 @@ func newliveness(fn *ir.Func, f *ssa.Func, vars []*ir.Name, idx map[*ir.Name]int
387392 lv .livenessMap .reset ()
388393
389394 lv .markUnsafePoints ()
395+
396+ lv .enableClobber ()
397+
390398 return lv
391399}
392400
@@ -820,6 +828,10 @@ func (lv *liveness) epilogue() {
820828 live .Or (* live , liveout )
821829 }
822830
831+ if lv .doClobber {
832+ lv .clobber (b )
833+ }
834+
823835 // The liveness maps for this block are now complete. Compact them.
824836 lv .compact (b )
825837 }
@@ -873,7 +885,7 @@ func (lv *liveness) compact(b *ssa.Block) {
873885 }
874886 for _ , v := range b .Values {
875887 hasStackMap := lv .hasStackMap (v )
876- isUnsafePoint := lv .allUnsafe || lv .unsafePoints .Get (int32 (v .ID ))
888+ isUnsafePoint := lv .allUnsafe || v . Op != ssa . OpClobber && lv .unsafePoints .Get (int32 (v .ID ))
877889 idx := objw.LivenessIndex {StackMapIndex : objw .StackMapDontCare , IsUnsafePoint : isUnsafePoint }
878890 if hasStackMap {
879891 idx .StackMapIndex = lv .stackMapSet .add (lv .livevars [pos ])
@@ -888,6 +900,153 @@ func (lv *liveness) compact(b *ssa.Block) {
888900 lv .livevars = lv .livevars [:0 ]
889901}
890902
903+ func (lv * liveness ) enableClobber () {
904+ // The clobberdead experiment inserts code to clobber pointer slots in all
905+ // the dead variables (locals and args) at every synchronous safepoint.
906+ if ! base .Flag .ClobberDead {
907+ return
908+ }
909+ if lv .fn .Pragma & ir .CgoUnsafeArgs != 0 {
910+ // C or assembly code uses the exact frame layout. Don't clobber.
911+ return
912+ }
913+ if len (lv .vars ) > 10000 || len (lv .f .Blocks ) > 10000 {
914+ // Be careful to avoid doing too much work.
915+ // Bail if >10000 variables or >10000 blocks.
916+ // Otherwise, giant functions make this experiment generate too much code.
917+ return
918+ }
919+ if lv .f .Name == "forkAndExecInChild" || lv .f .Name == "wbBufFlush" {
920+ // forkAndExecInChild calls vfork on some platforms.
921+ // The code we add here clobbers parts of the stack in the child.
922+ // When the parent resumes, it is using the same stack frame. But the
923+ // child has clobbered stack variables that the parent needs. Boom!
924+ // In particular, the sys argument gets clobbered.
925+ //
926+ // runtime.wbBufFlush must not modify its arguments. See the comments
927+ // in runtime/mwbbuf.go:wbBufFlush.
928+ return
929+ }
930+ if h := os .Getenv ("GOCLOBBERDEADHASH" ); h != "" {
931+ // Clobber only functions where the hash of the function name matches a pattern.
932+ // Useful for binary searching for a miscompiled function.
933+ hstr := ""
934+ for _ , b := range sha1 .Sum ([]byte (lv .f .Name )) {
935+ hstr += fmt .Sprintf ("%08b" , b )
936+ }
937+ if ! strings .HasSuffix (hstr , h ) {
938+ return
939+ }
940+ fmt .Printf ("\t \t \t CLOBBERDEAD %s\n " , lv .f .Name )
941+ }
942+ lv .doClobber = true
943+ }
944+
945+ // Inserts code to clobber pointer slots in all the dead variables (locals and args)
946+ // at every synchronous safepoint in b.
947+ func (lv * liveness ) clobber (b * ssa.Block ) {
948+ // Copy block's values to a temporary.
949+ oldSched := append ([]* ssa.Value {}, b .Values ... )
950+ b .Values = b .Values [:0 ]
951+ idx := 0
952+
953+ // Clobber pointer slots in all dead variables at entry.
954+ if b == lv .f .Entry {
955+ for len (oldSched ) > 0 && len (oldSched [0 ].Args ) == 0 {
956+ // Skip argless ops. We need to skip at least
957+ // the lowered ClosurePtr op, because it
958+ // really wants to be first. This will also
959+ // skip ops like InitMem and SP, which are ok.
960+ b .Values = append (b .Values , oldSched [0 ])
961+ oldSched = oldSched [1 :]
962+ }
963+ clobber (lv , b , lv .livevars [0 ])
964+ idx ++
965+ }
966+
967+ // Copy values into schedule, adding clobbering around safepoints.
968+ for _ , v := range oldSched {
969+ if ! lv .hasStackMap (v ) {
970+ b .Values = append (b .Values , v )
971+ continue
972+ }
973+ clobber (lv , b , lv .livevars [idx ])
974+ b .Values = append (b .Values , v )
975+ idx ++
976+ }
977+ }
978+
979+ // clobber generates code to clobber pointer slots in all dead variables
980+ // (those not marked in live). Clobbering instructions are added to the end
981+ // of b.Values.
982+ func clobber (lv * liveness , b * ssa.Block , live bitvec.BitVec ) {
983+ for i , n := range lv .vars {
984+ if ! live .Get (int32 (i )) && ! n .Addrtaken () {
985+ // Don't clobber stack objects (address-taken). They are
986+ // tracked dynamically.
987+ clobberVar (b , n )
988+ }
989+ }
990+ }
991+
992+ // clobberVar generates code to trash the pointers in v.
993+ // Clobbering instructions are added to the end of b.Values.
994+ func clobberVar (b * ssa.Block , v * ir.Name ) {
995+ clobberWalk (b , v , 0 , v .Type ())
996+ }
997+
998+ // b = block to which we append instructions
999+ // v = variable
1000+ // offset = offset of (sub-portion of) variable to clobber (in bytes)
1001+ // t = type of sub-portion of v.
1002+ func clobberWalk (b * ssa.Block , v * ir.Name , offset int64 , t * types.Type ) {
1003+ if ! t .HasPointers () {
1004+ return
1005+ }
1006+ switch t .Kind () {
1007+ case types .TPTR ,
1008+ types .TUNSAFEPTR ,
1009+ types .TFUNC ,
1010+ types .TCHAN ,
1011+ types .TMAP :
1012+ clobberPtr (b , v , offset )
1013+
1014+ case types .TSTRING :
1015+ // struct { byte *str; int len; }
1016+ clobberPtr (b , v , offset )
1017+
1018+ case types .TINTER :
1019+ // struct { Itab *tab; void *data; }
1020+ // or, when isnilinter(t)==true:
1021+ // struct { Type *type; void *data; }
1022+ clobberPtr (b , v , offset )
1023+ clobberPtr (b , v , offset + int64 (types .PtrSize ))
1024+
1025+ case types .TSLICE :
1026+ // struct { byte *array; int len; int cap; }
1027+ clobberPtr (b , v , offset )
1028+
1029+ case types .TARRAY :
1030+ for i := int64 (0 ); i < t .NumElem (); i ++ {
1031+ clobberWalk (b , v , offset + i * t .Elem ().Size (), t .Elem ())
1032+ }
1033+
1034+ case types .TSTRUCT :
1035+ for _ , t1 := range t .Fields ().Slice () {
1036+ clobberWalk (b , v , offset + t1 .Offset , t1 .Type )
1037+ }
1038+
1039+ default :
1040+ base .Fatalf ("clobberWalk: unexpected type, %v" , t )
1041+ }
1042+ }
1043+
1044+ // clobberPtr generates a clobber of the pointer at offset offset in v.
1045+ // The clobber instruction is added at the end of b.
1046+ func clobberPtr (b * ssa.Block , v * ir.Name , offset int64 ) {
1047+ b .NewValue0IA (src .NoXPos , ssa .OpClobber , types .TypeVoid , offset , v )
1048+ }
1049+
8911050func (lv * liveness ) showlive (v * ssa.Value , live bitvec.BitVec ) {
8921051 if base .Flag .Live == 0 || ir .FuncName (lv .fn ) == "init" || strings .HasPrefix (ir .FuncName (lv .fn ), "." ) {
8931052 return
0 commit comments