Files
go/src/reflect/benchmark_test.go
thepudds c3bb27bbc7 cmd/compile/internal/walk: use global zeroVal in interface conversions for zero values
This is a small-ish adjustment to the change earlier in our
stack in CL 649555, which started creating read-only global storage
for a composite literal used in an interface conversion and setting
the interface data pointer to point to that global storage.

In some cases, there are execution-time performance benefits to point
to runtime.zeroVal in particular. In reflect, pointer checks against
the runtime.zeroVal memory address are used to side-step some work,
such as in reflect.Value.Set and reflect.Value.IsZero.

In this CL, we therefore dig up the zeroVal symbol, and we use the
machinery from earlier in our stack to use a pointer to zeroVal for
the interface data pointer if we see examples like:

    sink = S{}
or:
    s := S{}
    sink = s

CL 649076 (also earlier in our stack) added most of the tests
along with debug diagnostics in convert.go to make it easier
to test this change.

We add a benchmark in reflect to show examples of performance benefit.
The left column is our immediately prior CL 649555, and the right is
this CL. (The arrays of structs here do not seem to benefit, which
we attempt to address in our next CL).

goos: linux
goarch: amd64
pkg: reflect
cpu: Intel(R) Xeon(R) CPU @ 2.80GHz
                                          │  cl-649555   │           new                       │
                                          │    sec/op    │   sec/op     vs base                │
Zero/IsZero/ByteArray/size=16-4              4.176n ± 0%   4.171n ± 0%        ~ (p=0.151 n=20)
Zero/IsZero/ByteArray/size=64-4              6.921n ± 0%   3.864n ± 0%  -44.16% (p=0.000 n=20)
Zero/IsZero/ByteArray/size=1024-4           21.210n ± 0%   3.878n ± 0%  -81.72% (p=0.000 n=20)
Zero/IsZero/BigStruct/size=1024-4           25.505n ± 0%   5.061n ± 0%  -80.15% (p=0.000 n=20)
Zero/IsZero/SmallStruct/size=16-4            4.188n ± 0%   4.191n ± 0%        ~ (p=0.106 n=20)
Zero/IsZero/SmallStructArray/size=64-4       8.639n ± 0%   8.636n ± 0%        ~ (p=0.973 n=20)
Zero/IsZero/SmallStructArray/size=1024-4     79.99n ± 0%   80.06n ± 0%        ~ (p=0.213 n=20)
Zero/IsZero/Time/size=24-4                   7.232n ± 0%   3.865n ± 0%  -46.56% (p=0.000 n=20)
Zero/SetZero/ByteArray/size=16-4             13.47n ± 0%   13.09n ± 0%   -2.78% (p=0.000 n=20)
Zero/SetZero/ByteArray/size=64-4             14.14n ± 0%   13.70n ± 0%   -3.15% (p=0.000 n=20)
Zero/SetZero/ByteArray/size=1024-4           24.22n ± 0%   20.18n ± 0%  -16.68% (p=0.000 n=20)
Zero/SetZero/BigStruct/size=1024-4           24.24n ± 0%   20.18n ± 0%  -16.73% (p=0.000 n=20)
Zero/SetZero/SmallStruct/size=16-4           13.45n ± 0%   13.10n ± 0%   -2.60% (p=0.000 n=20)
Zero/SetZero/SmallStructArray/size=64-4      14.12n ± 0%   13.69n ± 0%   -3.05% (p=0.000 n=20)
Zero/SetZero/SmallStructArray/size=1024-4    24.62n ± 0%   21.61n ± 0%  -12.26% (p=0.000 n=20)
Zero/SetZero/Time/size=24-4                  13.59n ± 0%   13.40n ± 0%   -1.40% (p=0.000 n=20)
geomean                                      14.06n        10.19n       -27.54%

Finally, here are results from the benchmark example from #71323.
Note however that almost all the benefit shown here is from our earlier
CL 649555, which is a more general purpose change and eliminates
the allocation using a different read-only global than this CL.

             │   go1.24       │               new                    │
             │     sec/op     │    sec/op     vs base                │
InterfaceAny   112.6000n ± 5%   0.8078n ± 3%  -99.28% (p=0.000 n=20)
ReflectValue      11.63n ± 2%    11.59n ± 0%        ~ (p=0.330 n=20)

             │  go1.24.out  │                 new.out                 │
             │     B/op     │    B/op     vs base                     │
InterfaceAny   224.0 ± 0%       0.0 ± 0%  -100.00% (p=0.000 n=20)
ReflectValue   0.000 ± 0%     0.000 ± 0%         ~ (p=1.000 n=20) ¹

             │  go1.24.out  │                 new.out                 │
             │  allocs/op   │ allocs/op   vs base                     │
InterfaceAny   1.000 ± 0%     0.000 ± 0%  -100.00% (p=0.000 n=20)
ReflectValue   0.000 ± 0%     0.000 ± 0%         ~ (p=1.000 n=20) ¹

Updates #71359
Updates #71323

Change-Id: I64d8cf1a7900f011d2ec59b948388aeda1150676
Reviewed-on: https://go-review.googlesource.com/c/go/+/649078
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Keith Randall <khr@google.com>
Reviewed-by: David Chase <drchase@google.com>
2025-05-21 12:24:22 -07:00

481 lines
10 KiB
Go

// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package reflect_test
import (
"fmt"
. "reflect"
"strconv"
"testing"
"time"
)
var sourceAll = struct {
Bool Value
String Value
Bytes Value
NamedBytes Value
BytesArray Value
SliceAny Value
MapStringAny Value
}{
Bool: ValueOf(new(bool)).Elem(),
String: ValueOf(new(string)).Elem(),
Bytes: ValueOf(new([]byte)).Elem(),
NamedBytes: ValueOf(new(namedBytes)).Elem(),
BytesArray: ValueOf(new([32]byte)).Elem(),
SliceAny: ValueOf(new([]any)).Elem(),
MapStringAny: ValueOf(new(map[string]any)).Elem(),
}
var sinkAll struct {
RawBool bool
RawString string
RawBytes []byte
RawInt int
}
func BenchmarkBool(b *testing.B) {
for i := 0; i < b.N; i++ {
sinkAll.RawBool = sourceAll.Bool.Bool()
}
}
func BenchmarkString(b *testing.B) {
for i := 0; i < b.N; i++ {
sinkAll.RawString = sourceAll.String.String()
}
}
func BenchmarkBytes(b *testing.B) {
for i := 0; i < b.N; i++ {
sinkAll.RawBytes = sourceAll.Bytes.Bytes()
}
}
func BenchmarkNamedBytes(b *testing.B) {
for i := 0; i < b.N; i++ {
sinkAll.RawBytes = sourceAll.NamedBytes.Bytes()
}
}
func BenchmarkBytesArray(b *testing.B) {
for i := 0; i < b.N; i++ {
sinkAll.RawBytes = sourceAll.BytesArray.Bytes()
}
}
func BenchmarkSliceLen(b *testing.B) {
for i := 0; i < b.N; i++ {
sinkAll.RawInt = sourceAll.SliceAny.Len()
}
}
func BenchmarkMapLen(b *testing.B) {
for i := 0; i < b.N; i++ {
sinkAll.RawInt = sourceAll.MapStringAny.Len()
}
}
func BenchmarkStringLen(b *testing.B) {
for i := 0; i < b.N; i++ {
sinkAll.RawInt = sourceAll.String.Len()
}
}
func BenchmarkArrayLen(b *testing.B) {
for i := 0; i < b.N; i++ {
sinkAll.RawInt = sourceAll.BytesArray.Len()
}
}
func BenchmarkSliceCap(b *testing.B) {
for i := 0; i < b.N; i++ {
sinkAll.RawInt = sourceAll.SliceAny.Cap()
}
}
func BenchmarkDeepEqual(b *testing.B) {
for _, bb := range deepEqualPerfTests {
b.Run(ValueOf(bb.x).Type().String(), func(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
sink = DeepEqual(bb.x, bb.y)
}
})
}
}
func BenchmarkMapsDeepEqual(b *testing.B) {
m1 := map[int]int{
1: 1, 2: 2,
}
m2 := map[int]int{
1: 1, 2: 2,
}
for i := 0; i < b.N; i++ {
DeepEqual(m1, m2)
}
}
func BenchmarkIsZero(b *testing.B) {
type Int4 struct {
a, b, c, d int
}
type Int1024 struct {
a [1024]int
}
type Int512 struct {
a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16 [16]S
}
s := struct {
ArrayComparable [4]T
ArrayIncomparable [4]_Complex
StructComparable T
StructIncomparable _Complex
ArrayInt_4 [4]int
ArrayInt_1024 [1024]int
ArrayInt_1024_NoZero [1024]int
Struct4Int Int4
ArrayStruct4Int_1024 [256]Int4
ArrayChanInt_1024 [1024]chan int
StructInt_512 Int512
}{}
s.ArrayInt_1024_NoZero[512] = 1
source := ValueOf(s)
for i := 0; i < source.NumField(); i++ {
name := source.Type().Field(i).Name
value := source.Field(i)
b.Run(name, func(b *testing.B) {
for i := 0; i < b.N; i++ {
sink = value.IsZero()
}
})
}
}
func BenchmarkSetZero(b *testing.B) {
source := ValueOf(new(struct {
Bool bool
Int int64
Uint uint64
Float float64
Complex complex128
Array [4]Value
Chan chan Value
Func func() Value
Interface interface{ String() string }
Map map[string]Value
Pointer *Value
Slice []Value
String string
Struct Value
})).Elem()
for i := 0; i < source.NumField(); i++ {
name := source.Type().Field(i).Name
value := source.Field(i)
zero := Zero(value.Type())
b.Run(name+"/Direct", func(b *testing.B) {
for i := 0; i < b.N; i++ {
value.SetZero()
}
})
b.Run(name+"/CachedZero", func(b *testing.B) {
for i := 0; i < b.N; i++ {
value.Set(zero)
}
})
b.Run(name+"/NewZero", func(b *testing.B) {
for i := 0; i < b.N; i++ {
value.Set(Zero(value.Type()))
}
})
}
}
// BenchmarkZero overlaps some with BenchmarkSetZero,
// but the inputs are set up differently to exercise
// different optimizations.
func BenchmarkZero(b *testing.B) {
type bm struct {
name string
zero Value
nonZero Value
size int
}
type Small struct {
A int64
B, C bool
}
type Big struct {
A int64
B, C bool
D [1008]byte
}
entry := func(name string, zero any, nonZero any) bm {
return bm{name, ValueOf(zero), ValueOf(nonZero).Elem(), int(TypeOf(zero).Size())}
}
nonZeroTime := func() *time.Time { t := time.Now(); return &t }
bms := []bm{
entry("ByteArray", [16]byte{}, &[16]byte{1}),
entry("ByteArray", [64]byte{}, &[64]byte{1}),
entry("ByteArray", [1024]byte{}, &[1024]byte{1}),
entry("BigStruct", Big{}, &Big{A: 1}),
entry("SmallStruct", Small{}, &Small{A: 1}),
entry("SmallStructArray", [4]Small{}, &[4]Small{0: {A: 1}}),
entry("SmallStructArray", [64]Small{}, &[64]Small{0: {A: 1}}),
entry("Time", time.Time{}, nonZeroTime()),
}
for _, bm := range bms {
b.Run(fmt.Sprintf("IsZero/%s/size=%d", bm.name, bm.size), func(b *testing.B) {
for i := 0; i < b.N; i++ {
bm.zero.IsZero()
}
})
}
for _, bm := range bms {
b.Run(fmt.Sprintf("SetZero/%s/size=%d", bm.name, bm.size), func(b *testing.B) {
for i := 0; i < b.N; i++ {
bm.nonZero.Set(bm.zero)
}
})
}
}
func BenchmarkSelect(b *testing.B) {
channel := make(chan int)
close(channel)
var cases []SelectCase
for i := 0; i < 8; i++ {
cases = append(cases, SelectCase{
Dir: SelectRecv,
Chan: ValueOf(channel),
})
}
for _, numCases := range []int{1, 4, 8} {
b.Run(strconv.Itoa(numCases), func(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
_, _, _ = Select(cases[:numCases])
}
})
}
}
func BenchmarkCall(b *testing.B) {
fv := ValueOf(func(a, b string) {})
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
args := []Value{ValueOf("a"), ValueOf("b")}
for pb.Next() {
fv.Call(args)
}
})
}
type myint int64
func (i *myint) inc() {
*i = *i + 1
}
func BenchmarkCallMethod(b *testing.B) {
b.ReportAllocs()
z := new(myint)
v := ValueOf(z.inc)
for i := 0; i < b.N; i++ {
v.Call(nil)
}
}
func BenchmarkCallArgCopy(b *testing.B) {
byteArray := func(n int) Value {
return Zero(ArrayOf(n, TypeOf(byte(0))))
}
sizes := [...]struct {
fv Value
arg Value
}{
{ValueOf(func(a [128]byte) {}), byteArray(128)},
{ValueOf(func(a [256]byte) {}), byteArray(256)},
{ValueOf(func(a [1024]byte) {}), byteArray(1024)},
{ValueOf(func(a [4096]byte) {}), byteArray(4096)},
{ValueOf(func(a [65536]byte) {}), byteArray(65536)},
}
for _, size := range sizes {
bench := func(b *testing.B) {
args := []Value{size.arg}
b.SetBytes(int64(size.arg.Len()))
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
size.fv.Call(args)
}
})
}
name := fmt.Sprintf("size=%v", size.arg.Len())
b.Run(name, bench)
}
}
func BenchmarkPtrTo(b *testing.B) {
// Construct a type with a zero ptrToThis.
type T struct{ int }
t := SliceOf(TypeOf(T{}))
ptrToThis := ValueOf(t).Elem().FieldByName("PtrToThis")
if !ptrToThis.IsValid() {
b.Skipf("%v has no ptrToThis field; was it removed from rtype?", t) // TODO fix this at top of refactoring
// b.Fatalf("%v has no ptrToThis field; was it removed from rtype?", t)
}
if ptrToThis.Int() != 0 {
b.Fatalf("%v.ptrToThis unexpectedly nonzero", t)
}
b.ResetTimer()
// Now benchmark calling PointerTo on it: we'll have to hit the ptrMap cache on
// every call.
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
PointerTo(t)
}
})
}
type B1 struct {
X int
Y int
Z int
}
func BenchmarkFieldByName1(b *testing.B) {
t := TypeOf(B1{})
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
t.FieldByName("Z")
}
})
}
func BenchmarkFieldByName2(b *testing.B) {
t := TypeOf(S3{})
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
t.FieldByName("B")
}
})
}
func BenchmarkFieldByName3(b *testing.B) {
t := TypeOf(R0{})
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
t.FieldByName("X")
}
})
}
type S struct {
i1 int64
i2 int64
}
func BenchmarkInterfaceBig(b *testing.B) {
v := ValueOf(S{})
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
v.Interface()
}
})
b.StopTimer()
}
func BenchmarkInterfaceSmall(b *testing.B) {
v := ValueOf(int64(0))
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
v.Interface()
}
})
}
func BenchmarkNew(b *testing.B) {
v := TypeOf(XM{})
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
New(v)
}
})
}
func BenchmarkMap(b *testing.B) {
type V *int
type S string
value := ValueOf((V)(nil))
stringKeys := []string{}
mapOfStrings := map[string]V{}
uint64Keys := []uint64{}
mapOfUint64s := map[uint64]V{}
userStringKeys := []S{}
mapOfUserStrings := map[S]V{}
for i := 0; i < 100; i++ {
stringKey := fmt.Sprintf("key%d", i)
stringKeys = append(stringKeys, stringKey)
mapOfStrings[stringKey] = nil
uint64Key := uint64(i)
uint64Keys = append(uint64Keys, uint64Key)
mapOfUint64s[uint64Key] = nil
userStringKey := S(fmt.Sprintf("key%d", i))
userStringKeys = append(userStringKeys, userStringKey)
mapOfUserStrings[userStringKey] = nil
}
tests := []struct {
label string
m, keys, value Value
}{
{"StringKeys", ValueOf(mapOfStrings), ValueOf(stringKeys), value},
{"Uint64Keys", ValueOf(mapOfUint64s), ValueOf(uint64Keys), value},
{"UserStringKeys", ValueOf(mapOfUserStrings), ValueOf(userStringKeys), value},
}
for _, tt := range tests {
b.Run(tt.label, func(b *testing.B) {
b.Run("MapIndex", func(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
for j := tt.keys.Len() - 1; j >= 0; j-- {
tt.m.MapIndex(tt.keys.Index(j))
}
}
})
b.Run("SetMapIndex", func(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
for j := tt.keys.Len() - 1; j >= 0; j-- {
tt.m.SetMapIndex(tt.keys.Index(j), tt.value)
}
}
})
})
}
}
func BenchmarkMapIterNext(b *testing.B) {
m := ValueOf(map[string]int{"a": 0, "b": 1, "c": 2, "d": 3})
it := m.MapRange()
for i := 0; i < b.N; i++ {
for it.Next() {
}
it.Reset(m)
}
}