diff --git a/src/cmd/compile/internal/ssa/writebarrier.go b/src/cmd/compile/internal/ssa/writebarrier.go index 9ef3667d516..ec6901f13ec 100644 --- a/src/cmd/compile/internal/ssa/writebarrier.go +++ b/src/cmd/compile/internal/ssa/writebarrier.go @@ -303,6 +303,15 @@ func writebarrier(f *Func) { mem := stores[0].MemoryArg() pos := stores[0].Pos + // If there is a nil check before the WB store, duplicate it to + // the two branches, where the store and the WB load occur. So + // they are more likely be removed by late nilcheck removal (which + // is block-local). + var nilcheck, nilcheckThen, nilcheckEnd *Value + if a := stores[0].Args[0]; a.Op == OpNilCheck && a.Args[1] == mem { + nilcheck = a + } + // If the source of a MoveWB is volatile (will be clobbered by a // function call), we need to copy it to a temporary location, as // marshaling the args of wbMove might clobber the value we're @@ -377,6 +386,10 @@ func writebarrier(f *Func) { // For each write barrier store, append write barrier code to bThen. memThen := mem + if nilcheck != nil { + nilcheckThen = bThen.NewValue2(nilcheck.Pos, OpNilCheck, nilcheck.Type, nilcheck.Args[0], memThen) + } + // Note: we can issue the write barrier code in any order. In particular, // it doesn't matter if they are in a different order *even if* they end // up referring to overlapping memory regions. For instance if an OpStore @@ -447,6 +460,9 @@ func writebarrier(f *Func) { // take care of the vast majority of these. We could // patch this up in the signal handler, or use XCHG to // combine the read and the write. + if ptr == nilcheck { + ptr = nilcheckThen + } oldVal := bThen.NewValue2(pos, OpLoad, types.Types[types.TUINTPTR], ptr, memThen) // Save old value to write buffer. addEntry(pos, oldVal) @@ -459,9 +475,12 @@ func writebarrier(f *Func) { // Now do the rare cases, Zeros and Moves. for _, w := range stores { pos := w.Pos + dst := w.Args[0] + if dst == nilcheck { + dst = nilcheckThen + } switch w.Op { case OpZeroWB: - dst := w.Args[0] typ := reflectdata.TypeLinksym(w.Aux.(*types.Type)) // zeroWB(&typ, dst) taddr := b.NewValue1A(pos, OpAddr, b.Func.Config.Types.Uintptr, typ, sb) @@ -469,7 +488,6 @@ func writebarrier(f *Func) { f.fe.Func().SetWBPos(pos) nWBops-- case OpMoveWB: - dst := w.Args[0] src := w.Args[1] if isVolatile(src) { for _, c := range volatiles { @@ -491,24 +509,29 @@ func writebarrier(f *Func) { // merge memory mem = bEnd.NewValue2(pos, OpPhi, types.TypeMem, mem, memThen) + if nilcheck != nil { + nilcheckEnd = bEnd.NewValue2(nilcheck.Pos, OpNilCheck, nilcheck.Type, nilcheck.Args[0], mem) + } + // Do raw stores after merge point. for _, w := range stores { pos := w.Pos + dst := w.Args[0] + if dst == nilcheck { + dst = nilcheckEnd + } switch w.Op { case OpStoreWB: - ptr := w.Args[0] val := w.Args[1] if buildcfg.Experiment.CgoCheck2 { // Issue cgo checking code. - mem = wbcall(pos, bEnd, cgoCheckPtrWrite, sp, mem, ptr, val) + mem = wbcall(pos, bEnd, cgoCheckPtrWrite, sp, mem, dst, val) } - mem = bEnd.NewValue3A(pos, OpStore, types.TypeMem, w.Aux, ptr, val, mem) + mem = bEnd.NewValue3A(pos, OpStore, types.TypeMem, w.Aux, dst, val, mem) case OpZeroWB: - dst := w.Args[0] mem = bEnd.NewValue2I(pos, OpZero, types.TypeMem, w.AuxInt, dst, mem) mem.Aux = w.Aux case OpMoveWB: - dst := w.Args[0] src := w.Args[1] if isVolatile(src) { for _, c := range volatiles { @@ -529,9 +552,8 @@ func writebarrier(f *Func) { case OpVarDef, OpVarLive: mem = bEnd.NewValue1A(pos, w.Op, types.TypeMem, w.Aux, mem) case OpStore: - ptr := w.Args[0] val := w.Args[1] - mem = bEnd.NewValue3A(pos, OpStore, types.TypeMem, w.Aux, ptr, val, mem) + mem = bEnd.NewValue3A(pos, OpStore, types.TypeMem, w.Aux, dst, val, mem) } } @@ -557,6 +579,9 @@ func writebarrier(f *Func) { f.freeValue(w) } } + if nilcheck != nil && nilcheck.Uses == 0 { + nilcheck.reset(OpInvalid) + } // put values after the store sequence into the end block bEnd.Values = append(bEnd.Values, after...) diff --git a/test/nilptr5.go b/test/nilptr5.go index 51a302f2521..a76592abadc 100644 --- a/test/nilptr5.go +++ b/test/nilptr5.go @@ -30,3 +30,9 @@ func f6(p, q *T) { func f8(t *struct{ b [8]int }) struct{ b [8]int } { return *t // ERROR "removed nil check" } + +// nil check is removed for pointer write (which involves a +// write barrier). +func f9(x **int, y *int) { + *x = y // ERROR "removed nil check" +} diff --git a/test/nilptr5_aix.go b/test/nilptr5_aix.go index 3b116ca19f5..cd8ecb1e496 100644 --- a/test/nilptr5_aix.go +++ b/test/nilptr5_aix.go @@ -30,3 +30,9 @@ func f6(p, q *T) { func f8(t *[8]int) [8]int { return *t // ERROR "generated nil check" } + +// On AIX, a write nil check is removed, but a read nil check +// remains (for the write barrier). +func f9(x **int, y *int) { + *x = y // ERROR "generated nil check" "removed nil check" +} diff --git a/test/nilptr5_wasm.go b/test/nilptr5_wasm.go index ea380d1384f..e653ffd2870 100644 --- a/test/nilptr5_wasm.go +++ b/test/nilptr5_wasm.go @@ -30,3 +30,8 @@ func f6(p, q *T) { func f8(t *[8]int) [8]int { return *t // ERROR "generated nil check" } + +// nil check is not removed on Wasm. +func f9(x **int, y *int) { + *x = y // ERROR "generated nil check" +}