diff --git a/src/cmd/compile/internal/reflectdata/alg.go b/src/cmd/compile/internal/reflectdata/alg.go index c87cd8e257a..eb46ed84aa2 100644 --- a/src/cmd/compile/internal/reflectdata/alg.go +++ b/src/cmd/compile/internal/reflectdata/alg.go @@ -18,35 +18,6 @@ import ( "cmd/internal/obj" ) -// AlgType returns the fixed-width AMEMxx variants instead of the general -// AMEM kind when possible. -func AlgType(t *types.Type) types.AlgKind { - a := types.AlgType(t) - if a == types.AMEM { - if t.Alignment() < int64(base.Ctxt.Arch.Alignment) && t.Alignment() < t.Size() { - // For example, we can't treat [2]int16 as an int32 if int32s require - // 4-byte alignment. See issue 46283. - return a - } - switch t.Size() { - case 0: - return types.AMEM0 - case 1: - return types.AMEM8 - case 2: - return types.AMEM16 - case 4: - return types.AMEM32 - case 8: - return types.AMEM64 - case 16: - return types.AMEM128 - } - } - - return a -} - // genhash returns a symbol which is the closure used to compute // the hash of a value of type t. func genhash(t *types.Type) *obj.LSym { @@ -266,7 +237,7 @@ func sysClosure(name string) *obj.LSym { // geneq returns a symbol which is the closure used to compute // equality for two objects of type t. func geneq(t *types.Type) *obj.LSym { - switch AlgType(t) { + switch types.AlgType(t) { case types.ANOEQ, types.ANOALG: // The runtime will panic if it tries to compare // a type with a nil equality function. diff --git a/src/cmd/compile/internal/reflectdata/map.go b/src/cmd/compile/internal/reflectdata/map.go index 2b43d4af27a..30a9dcc28ca 100644 --- a/src/cmd/compile/internal/reflectdata/map.go +++ b/src/cmd/compile/internal/reflectdata/map.go @@ -271,10 +271,10 @@ func writeMapType(t *types.Type, lsym *obj.LSym, c rttype.Cursor) { slotTyp := gtyp.Field(1).Type.Elem() elemOff := slotTyp.Field(1).Offset - if AlgType(t.Key()) == types.AMEM64 && elemOff != 8 { + if types.AlgType(t.Key()) == types.AMEM && t.Key().Size() == 8 && elemOff != 8 { base.Fatalf("runtime assumes elemOff for 8-byte keys is 8, got %d", elemOff) } - if AlgType(t.Key()) == types.ASTRING && elemOff != int64(2*types.PtrSize) { + if types.AlgType(t.Key()) == types.ASTRING && elemOff != int64(2*types.PtrSize) { base.Fatalf("runtime assumes elemOff for string keys is %d, got %d", 2*types.PtrSize, elemOff) } diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go index 25add3d8043..08f36095a55 100644 --- a/src/cmd/compile/internal/walk/walk.go +++ b/src/cmd/compile/internal/walk/walk.go @@ -10,7 +10,6 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" - "cmd/compile/internal/reflectdata" "cmd/compile/internal/rttype" "cmd/compile/internal/ssagen" "cmd/compile/internal/typecheck" @@ -194,7 +193,7 @@ func mapfast(t *types.Type) int { if t.Elem().Size() > abi.MapMaxElemBytes { return mapslow } - switch reflectdata.AlgType(t.Key()) { + switch algType(t.Key()) { case types.AMEM32: if !t.Key().HasPointers() { return mapfast32 @@ -218,6 +217,35 @@ func mapfast(t *types.Type) int { return mapslow } +// algType returns the fixed-width AMEMxx variants instead of the general +// AMEM kind when possible. +func algType(t *types.Type) types.AlgKind { + a := types.AlgType(t) + if a == types.AMEM { + if t.Alignment() < int64(base.Ctxt.Arch.Alignment) && t.Alignment() < t.Size() { + // For example, we can't treat [2]int16 as an int32 if int32s require + // 4-byte alignment. See issue 46283. + return a + } + switch t.Size() { + case 0: + return types.AMEM0 + case 1: + return types.AMEM8 + case 2: + return types.AMEM16 + case 4: + return types.AMEM32 + case 8: + return types.AMEM64 + case 16: + return types.AMEM128 + } + } + + return a +} + func walkAppendArgs(n *ir.CallExpr, init *ir.Nodes) { walkExprListSafe(n.Args, init)