cmd/compile: remove the NORconst op on mips{,64}

In the mips{,64} instruction sets and their extensions, there is no
NORI instruction.

Change-Id: If008442c792297d011b3d0c1e8501e62e32ab175
Reviewed-on: https://go-review.googlesource.com/c/go/+/735900
Reviewed-by: Michael Pratt <mpratt@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: abner chenc <chenguoqi@loongson.cn>
Reviewed-by: Cherry Mui <cherryyz@google.com>
This commit is contained in:
Xiaolin Zhao
2026-01-13 11:30:21 +08:00
committed by abner chenc
parent a0796d8af6
commit 514790c2b9
9 changed files with 62 additions and 199 deletions

View File

@@ -197,7 +197,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
ssa.OpMIPSANDconst,
ssa.OpMIPSORconst,
ssa.OpMIPSXORconst,
ssa.OpMIPSNORconst,
ssa.OpMIPSSLLconst,
ssa.OpMIPSSRLconst,
ssa.OpMIPSSRAconst,

View File

@@ -191,7 +191,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
ssa.OpMIPS64ANDconst,
ssa.OpMIPS64ORconst,
ssa.OpMIPS64XORconst,
ssa.OpMIPS64NORconst,
ssa.OpMIPS64SLLVconst,
ssa.OpMIPS64SRLVconst,
ssa.OpMIPS64SRAVconst,

View File

@@ -127,7 +127,7 @@
(Neg(32|16|8) ...) => (NEG ...)
(Neg(32|64)F ...) => (NEG(F|D) ...)
(Com(32|16|8) x) => (NORconst [0] x)
(Com(32|16|8) x) => (NOR (MOVWconst [0]) x)
(Sqrt ...) => (SQRTD ...)
(Sqrt32 ...) => (SQRTF ...)
@@ -382,7 +382,7 @@
(OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val)
(SLLconst <typ.UInt32> [3]
(ANDconst <typ.UInt32> [3] ptr)))
(NORconst [0] <typ.UInt32> (SLL <typ.UInt32>
(NOR (MOVWconst [0]) <typ.UInt32> (SLL <typ.UInt32>
(MOVWconst [0xff]) (SLLconst <typ.UInt32> [3]
(ANDconst <typ.UInt32> [3] ptr))))) mem)
@@ -401,7 +401,7 @@
(SLLconst <typ.UInt32> [3]
(ANDconst <typ.UInt32> [3]
(XORconst <typ.UInt32> [3] ptr))))
(NORconst [0] <typ.UInt32> (SLL <typ.UInt32>
(NOR (MOVWconst [0]) <typ.UInt32> (SLL <typ.UInt32>
(MOVWconst [0xff]) (SLLconst <typ.UInt32> [3]
(ANDconst <typ.UInt32> [3]
(XORconst <typ.UInt32> [3] ptr)))))) mem)
@@ -599,7 +599,6 @@
(AND x (MOVWconst [c])) => (ANDconst [c] x)
(OR x (MOVWconst [c])) => (ORconst [c] x)
(XOR x (MOVWconst [c])) => (XORconst [c] x)
(NOR x (MOVWconst [c])) => (NORconst [c] x)
(SLL x (MOVWconst [c])) => (SLLconst x [c&31])
(SRL x (MOVWconst [c])) => (SRLconst x [c&31])
@@ -648,7 +647,6 @@
(ORconst [0] x) => x
(ORconst [-1] _) => (MOVWconst [-1])
(XORconst [0] x) => x
(XORconst [-1] x) => (NORconst [0] x)
// generic constant folding
(ADDconst [c] (MOVWconst [d])) => (MOVWconst [int32(c+d)])
@@ -673,7 +671,6 @@
(ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x)
(XORconst [c] (MOVWconst [d])) => (MOVWconst [c^d])
(XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x)
(NORconst [c] (MOVWconst [d])) => (MOVWconst [^(c|d)])
(NEG (MOVWconst [c])) => (MOVWconst [-c])
(MOVBreg (MOVWconst [c])) => (MOVWconst [int32(int8(c))])
(MOVBUreg (MOVWconst [c])) => (MOVWconst [int32(uint8(c))])

View File

@@ -433,7 +433,7 @@
(OR <typ.UInt64> (SLLV <typ.UInt32> (ZeroExt8to32 val)
(SLLVconst <typ.UInt64> [3]
(ANDconst <typ.UInt64> [3] ptr)))
(NORconst [0] <typ.UInt64> (SLLV <typ.UInt64>
(NOR (MOVVconst [0]) <typ.UInt64> (SLLV <typ.UInt64>
(MOVVconst [0xff]) (SLLVconst <typ.UInt64> [3]
(ANDconst <typ.UInt64> [3] ptr))))) mem)
@@ -452,7 +452,7 @@
(SLLVconst <typ.UInt64> [3]
(ANDconst <typ.UInt64> [3]
(XORconst <typ.UInt64> [3] ptr))))
(NORconst [0] <typ.UInt64> (SLLV <typ.UInt64>
(NOR (MOVVconst [0]) <typ.UInt64> (SLLV <typ.UInt64>
(MOVVconst [0xff]) (SLLVconst <typ.UInt64> [3]
(ANDconst <typ.UInt64> [3]
(XORconst <typ.UInt64> [3] ptr)))))) mem)
@@ -668,7 +668,6 @@
(AND x (MOVVconst [c])) && is32Bit(c) => (ANDconst [c] x)
(OR x (MOVVconst [c])) && is32Bit(c) => (ORconst [c] x)
(XOR x (MOVVconst [c])) && is32Bit(c) => (XORconst [c] x)
(NOR x (MOVVconst [c])) && is32Bit(c) => (NORconst [c] x)
(SLLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0])
(SRLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0])
@@ -711,7 +710,6 @@
(ORconst [0] x) => x
(ORconst [-1] _) => (MOVVconst [-1])
(XORconst [0] x) => x
(XORconst [-1] x) => (NORconst [0] x)
// generic constant folding
(ADDVconst [c] (MOVVconst [d])) => (MOVVconst [c+d])
@@ -734,7 +732,6 @@
(ORconst [c] (ORconst [d] x)) && is32Bit(c|d) => (ORconst [c|d] x)
(XORconst [c] (MOVVconst [d])) => (MOVVconst [c^d])
(XORconst [c] (XORconst [d] x)) && is32Bit(c^d) => (XORconst [c^d] x)
(NORconst [c] (MOVVconst [d])) => (MOVVconst [^(c|d)])
(NEGV (MOVVconst [c])) => (MOVVconst [-c])
(MOVBreg (MOVVconst [c])) => (MOVVconst [int64(int8(c))])
(MOVBUreg (MOVVconst [c])) => (MOVVconst [int64(uint8(c))])

View File

@@ -189,7 +189,6 @@ func init() {
{name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true, typ: "UInt64"}, // arg0 ^ arg1
{name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64", typ: "UInt64"}, // arg0 ^ auxInt
{name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true}, // ^(arg0 | arg1)
{name: "NORconst", argLength: 1, reg: gp11, asm: "NOR", aux: "Int64"}, // ^(arg0 | auxInt)
{name: "NEGV", argLength: 1, reg: gp11}, // -arg0
{name: "NEGF", argLength: 1, reg: fp11, asm: "NEGF"}, // -arg0, float32

View File

@@ -173,7 +173,6 @@ func init() {
{name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true, typ: "UInt32"}, // arg0 ^ arg1
{name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int32", typ: "UInt32"}, // arg0 ^ auxInt
{name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true}, // ^(arg0 | arg1)
{name: "NORconst", argLength: 1, reg: gp11, asm: "NOR", aux: "Int32"}, // ^(arg0 | auxInt)
{name: "NEG", argLength: 1, reg: gp11}, // -arg0
{name: "NEGF", argLength: 1, reg: fp11, asm: "NEGF"}, // -arg0, float32

View File

@@ -4709,7 +4709,6 @@ const (
OpMIPSXOR
OpMIPSXORconst
OpMIPSNOR
OpMIPSNORconst
OpMIPSNEG
OpMIPSNEGF
OpMIPSNEGD
@@ -4825,7 +4824,6 @@ const (
OpMIPS64XOR
OpMIPS64XORconst
OpMIPS64NOR
OpMIPS64NORconst
OpMIPS64NEGV
OpMIPS64NEGF
OpMIPS64NEGD
@@ -72738,20 +72736,6 @@ var opcodeTable = [...]opInfo{
},
},
},
{
name: "NORconst",
auxType: auxInt32,
argLen: 1,
asm: mips.ANOR,
reg: regInfo{
inputs: []inputInfo{
{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
},
outputs: []outputInfo{
{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
},
},
},
{
name: "NEG",
argLen: 1,
@@ -74276,20 +74260,6 @@ var opcodeTable = [...]opInfo{
},
},
},
{
name: "NORconst",
auxType: auxInt64,
argLen: 1,
asm: mips.ANOR,
reg: regInfo{
inputs: []inputInfo{
{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
},
outputs: []outputInfo{
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
},
},
},
{
name: "NEGV",
argLen: 1,

View File

@@ -333,10 +333,6 @@ func rewriteValueMIPS(v *Value) bool {
return rewriteValueMIPS_OpMIPSMUL(v)
case OpMIPSNEG:
return rewriteValueMIPS_OpMIPSNEG(v)
case OpMIPSNOR:
return rewriteValueMIPS_OpMIPSNOR(v)
case OpMIPSNORconst:
return rewriteValueMIPS_OpMIPSNORconst(v)
case OpMIPSOR:
return rewriteValueMIPS_OpMIPSOR(v)
case OpMIPSORconst:
@@ -654,7 +650,7 @@ func rewriteValueMIPS_OpAtomicAnd8(v *Value) bool {
typ := &b.Func.Config.Types
// match: (AtomicAnd8 ptr val mem)
// cond: !config.BigEndian
// result: (LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr) (OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] ptr))) (NORconst [0] <typ.UInt32> (SLL <typ.UInt32> (MOVWconst [0xff]) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] ptr))))) mem)
// result: (LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr) (OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] ptr))) (NOR (MOVWconst [0]) <typ.UInt32> (SLL <typ.UInt32> (MOVWconst [0xff]) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] ptr))))) mem)
for {
ptr := v_0
val := v_1
@@ -678,20 +674,21 @@ func rewriteValueMIPS_OpAtomicAnd8(v *Value) bool {
v6.AddArg(ptr)
v5.AddArg(v6)
v3.AddArg2(v4, v5)
v7 := b.NewValue0(v.Pos, OpMIPSNORconst, typ.UInt32)
v7.AuxInt = int32ToAuxInt(0)
v8 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
v9 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v9.AuxInt = int32ToAuxInt(0xff)
v8.AddArg2(v9, v5)
v7.AddArg(v8)
v7 := b.NewValue0(v.Pos, OpMIPSNOR, typ.UInt32)
v8 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v8.AuxInt = int32ToAuxInt(0)
v9 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
v10 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v10.AuxInt = int32ToAuxInt(0xff)
v9.AddArg2(v10, v5)
v7.AddArg2(v8, v9)
v2.AddArg2(v3, v7)
v.AddArg3(v0, v2, mem)
return true
}
// match: (AtomicAnd8 ptr val mem)
// cond: config.BigEndian
// result: (LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr) (OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] (XORconst <typ.UInt32> [3] ptr)))) (NORconst [0] <typ.UInt32> (SLL <typ.UInt32> (MOVWconst [0xff]) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] (XORconst <typ.UInt32> [3] ptr)))))) mem)
// result: (LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr) (OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] (XORconst <typ.UInt32> [3] ptr)))) (NOR (MOVWconst [0]) <typ.UInt32> (SLL <typ.UInt32> (MOVWconst [0xff]) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] (XORconst <typ.UInt32> [3] ptr)))))) mem)
for {
ptr := v_0
val := v_1
@@ -718,13 +715,14 @@ func rewriteValueMIPS_OpAtomicAnd8(v *Value) bool {
v6.AddArg(v7)
v5.AddArg(v6)
v3.AddArg2(v4, v5)
v8 := b.NewValue0(v.Pos, OpMIPSNORconst, typ.UInt32)
v8.AuxInt = int32ToAuxInt(0)
v9 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
v10 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v10.AuxInt = int32ToAuxInt(0xff)
v9.AddArg2(v10, v5)
v8.AddArg(v9)
v8 := b.NewValue0(v.Pos, OpMIPSNOR, typ.UInt32)
v9 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v9.AuxInt = int32ToAuxInt(0)
v10 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
v11 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v11.AuxInt = int32ToAuxInt(0xff)
v10.AddArg2(v11, v5)
v8.AddArg2(v9, v10)
v2.AddArg2(v3, v8)
v.AddArg3(v0, v2, mem)
return true
@@ -869,37 +867,46 @@ func rewriteValueMIPS_OpBitLen8(v *Value) bool {
}
func rewriteValueMIPS_OpCom16(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
// match: (Com16 x)
// result: (NORconst [0] x)
// result: (NOR (MOVWconst [0]) x)
for {
x := v_0
v.reset(OpMIPSNORconst)
v.AuxInt = int32ToAuxInt(0)
v.AddArg(x)
v.reset(OpMIPSNOR)
v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v0.AuxInt = int32ToAuxInt(0)
v.AddArg2(v0, x)
return true
}
}
func rewriteValueMIPS_OpCom32(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
// match: (Com32 x)
// result: (NORconst [0] x)
// result: (NOR (MOVWconst [0]) x)
for {
x := v_0
v.reset(OpMIPSNORconst)
v.AuxInt = int32ToAuxInt(0)
v.AddArg(x)
v.reset(OpMIPSNOR)
v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v0.AuxInt = int32ToAuxInt(0)
v.AddArg2(v0, x)
return true
}
}
func rewriteValueMIPS_OpCom8(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
// match: (Com8 x)
// result: (NORconst [0] x)
// result: (NOR (MOVWconst [0]) x)
for {
x := v_0
v.reset(OpMIPSNORconst)
v.AuxInt = int32ToAuxInt(0)
v.AddArg(x)
v.reset(OpMIPSNOR)
v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v0.AuxInt = int32ToAuxInt(0)
v.AddArg2(v0, x)
return true
}
}
@@ -4273,43 +4280,6 @@ func rewriteValueMIPS_OpMIPSNEG(v *Value) bool {
}
return false
}
func rewriteValueMIPS_OpMIPSNOR(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (NOR x (MOVWconst [c]))
// result: (NORconst [c] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
if v_1.Op != OpMIPSMOVWconst {
continue
}
c := auxIntToInt32(v_1.AuxInt)
v.reset(OpMIPSNORconst)
v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
return true
}
break
}
return false
}
func rewriteValueMIPS_OpMIPSNORconst(v *Value) bool {
v_0 := v.Args[0]
// match: (NORconst [c] (MOVWconst [d]))
// result: (MOVWconst [^(c|d)])
for {
c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpMIPSMOVWconst {
break
}
d := auxIntToInt32(v_0.AuxInt)
v.reset(OpMIPSMOVWconst)
v.AuxInt = int32ToAuxInt(^(c | d))
return true
}
return false
}
func rewriteValueMIPS_OpMIPSOR(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@@ -5046,18 +5016,6 @@ func rewriteValueMIPS_OpMIPSXORconst(v *Value) bool {
v.copyOf(x)
return true
}
// match: (XORconst [-1] x)
// result: (NORconst [0] x)
for {
if auxIntToInt32(v.AuxInt) != -1 {
break
}
x := v_0
v.reset(OpMIPSNORconst)
v.AuxInt = int32ToAuxInt(0)
v.AddArg(x)
return true
}
// match: (XORconst [c] (MOVWconst [d]))
// result: (MOVWconst [c^d])
for {

View File

@@ -370,10 +370,6 @@ func rewriteValueMIPS64(v *Value) bool {
return rewriteValueMIPS64_OpMIPS64MOVWstore(v)
case OpMIPS64NEGV:
return rewriteValueMIPS64_OpMIPS64NEGV(v)
case OpMIPS64NOR:
return rewriteValueMIPS64_OpMIPS64NOR(v)
case OpMIPS64NORconst:
return rewriteValueMIPS64_OpMIPS64NORconst(v)
case OpMIPS64OR:
return rewriteValueMIPS64_OpMIPS64OR(v)
case OpMIPS64ORconst:
@@ -719,7 +715,7 @@ func rewriteValueMIPS64_OpAtomicAnd8(v *Value) bool {
typ := &b.Func.Config.Types
// match: (AtomicAnd8 ptr val mem)
// cond: !config.BigEndian
// result: (LoweredAtomicAnd32 (AND <typ.UInt32Ptr> (MOVVconst [^3]) ptr) (OR <typ.UInt64> (SLLV <typ.UInt32> (ZeroExt8to32 val) (SLLVconst <typ.UInt64> [3] (ANDconst <typ.UInt64> [3] ptr))) (NORconst [0] <typ.UInt64> (SLLV <typ.UInt64> (MOVVconst [0xff]) (SLLVconst <typ.UInt64> [3] (ANDconst <typ.UInt64> [3] ptr))))) mem)
// result: (LoweredAtomicAnd32 (AND <typ.UInt32Ptr> (MOVVconst [^3]) ptr) (OR <typ.UInt64> (SLLV <typ.UInt32> (ZeroExt8to32 val) (SLLVconst <typ.UInt64> [3] (ANDconst <typ.UInt64> [3] ptr))) (NOR (MOVVconst [0]) <typ.UInt64> (SLLV <typ.UInt64> (MOVVconst [0xff]) (SLLVconst <typ.UInt64> [3] (ANDconst <typ.UInt64> [3] ptr))))) mem)
for {
ptr := v_0
val := v_1
@@ -743,20 +739,21 @@ func rewriteValueMIPS64_OpAtomicAnd8(v *Value) bool {
v6.AddArg(ptr)
v5.AddArg(v6)
v3.AddArg2(v4, v5)
v7 := b.NewValue0(v.Pos, OpMIPS64NORconst, typ.UInt64)
v7.AuxInt = int64ToAuxInt(0)
v8 := b.NewValue0(v.Pos, OpMIPS64SLLV, typ.UInt64)
v9 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v9.AuxInt = int64ToAuxInt(0xff)
v8.AddArg2(v9, v5)
v7.AddArg(v8)
v7 := b.NewValue0(v.Pos, OpMIPS64NOR, typ.UInt64)
v8 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v8.AuxInt = int64ToAuxInt(0)
v9 := b.NewValue0(v.Pos, OpMIPS64SLLV, typ.UInt64)
v10 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v10.AuxInt = int64ToAuxInt(0xff)
v9.AddArg2(v10, v5)
v7.AddArg2(v8, v9)
v2.AddArg2(v3, v7)
v.AddArg3(v0, v2, mem)
return true
}
// match: (AtomicAnd8 ptr val mem)
// cond: config.BigEndian
// result: (LoweredAtomicAnd32 (AND <typ.UInt32Ptr> (MOVVconst [^3]) ptr) (OR <typ.UInt64> (SLLV <typ.UInt32> (ZeroExt8to32 val) (SLLVconst <typ.UInt64> [3] (ANDconst <typ.UInt64> [3] (XORconst <typ.UInt64> [3] ptr)))) (NORconst [0] <typ.UInt64> (SLLV <typ.UInt64> (MOVVconst [0xff]) (SLLVconst <typ.UInt64> [3] (ANDconst <typ.UInt64> [3] (XORconst <typ.UInt64> [3] ptr)))))) mem)
// result: (LoweredAtomicAnd32 (AND <typ.UInt32Ptr> (MOVVconst [^3]) ptr) (OR <typ.UInt64> (SLLV <typ.UInt32> (ZeroExt8to32 val) (SLLVconst <typ.UInt64> [3] (ANDconst <typ.UInt64> [3] (XORconst <typ.UInt64> [3] ptr)))) (NOR (MOVVconst [0]) <typ.UInt64> (SLLV <typ.UInt64> (MOVVconst [0xff]) (SLLVconst <typ.UInt64> [3] (ANDconst <typ.UInt64> [3] (XORconst <typ.UInt64> [3] ptr)))))) mem)
for {
ptr := v_0
val := v_1
@@ -783,13 +780,14 @@ func rewriteValueMIPS64_OpAtomicAnd8(v *Value) bool {
v6.AddArg(v7)
v5.AddArg(v6)
v3.AddArg2(v4, v5)
v8 := b.NewValue0(v.Pos, OpMIPS64NORconst, typ.UInt64)
v8.AuxInt = int64ToAuxInt(0)
v9 := b.NewValue0(v.Pos, OpMIPS64SLLV, typ.UInt64)
v10 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v10.AuxInt = int64ToAuxInt(0xff)
v9.AddArg2(v10, v5)
v8.AddArg(v9)
v8 := b.NewValue0(v.Pos, OpMIPS64NOR, typ.UInt64)
v9 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v9.AuxInt = int64ToAuxInt(0)
v10 := b.NewValue0(v.Pos, OpMIPS64SLLV, typ.UInt64)
v11 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v11.AuxInt = int64ToAuxInt(0xff)
v10.AddArg2(v11, v5)
v8.AddArg2(v9, v10)
v2.AddArg2(v3, v8)
v.AddArg3(v0, v2, mem)
return true
@@ -4526,47 +4524,6 @@ func rewriteValueMIPS64_OpMIPS64NEGV(v *Value) bool {
}
return false
}
func rewriteValueMIPS64_OpMIPS64NOR(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (NOR x (MOVVconst [c]))
// cond: is32Bit(c)
// result: (NORconst [c] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
if v_1.Op != OpMIPS64MOVVconst {
continue
}
c := auxIntToInt64(v_1.AuxInt)
if !(is32Bit(c)) {
continue
}
v.reset(OpMIPS64NORconst)
v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
break
}
return false
}
func rewriteValueMIPS64_OpMIPS64NORconst(v *Value) bool {
v_0 := v.Args[0]
// match: (NORconst [c] (MOVVconst [d]))
// result: (MOVVconst [^(c|d)])
for {
c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpMIPS64MOVVconst {
break
}
d := auxIntToInt64(v_0.AuxInt)
v.reset(OpMIPS64MOVVconst)
v.AuxInt = int64ToAuxInt(^(c | d))
return true
}
return false
}
func rewriteValueMIPS64_OpMIPS64OR(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@@ -5305,18 +5262,6 @@ func rewriteValueMIPS64_OpMIPS64XORconst(v *Value) bool {
v.copyOf(x)
return true
}
// match: (XORconst [-1] x)
// result: (NORconst [0] x)
for {
if auxIntToInt64(v.AuxInt) != -1 {
break
}
x := v_0
v.reset(OpMIPS64NORconst)
v.AuxInt = int64ToAuxInt(0)
v.AddArg(x)
return true
}
// match: (XORconst [c] (MOVVconst [d]))
// result: (MOVVconst [c^d])
for {