mirror of
https://github.com/golang/go.git
synced 2026-01-29 07:02:05 +03:00
Compare commits
9 Commits
cf0c42c2ca
...
0f72aff835
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0f72aff835 | ||
|
|
03a6a20740 | ||
|
|
ffb50fb716 | ||
|
|
d8d2b90a46 | ||
|
|
64e8b238a1 | ||
|
|
a977717393 | ||
|
|
8ca47fab42 | ||
|
|
2d1f571c6b | ||
|
|
f532f87a98 |
@@ -118,6 +118,7 @@ func init() {
|
||||
regCtxt := regNamed["X26"]
|
||||
callerSave := gpMask | fpMask | regNamed["g"]
|
||||
r5toR6 := regNamed["X5"] | regNamed["X6"]
|
||||
regX5 := regNamed["X5"]
|
||||
|
||||
var (
|
||||
gpstore = regInfo{inputs: []regMask{gpspsbMask, gpspMask, 0}} // SB in first input so we can load from a global, but not in second to avoid using SB as a temporary register
|
||||
@@ -142,9 +143,13 @@ func init() {
|
||||
fpload = regInfo{inputs: []regMask{gpspsbMask, 0}, outputs: []regMask{fpMask}}
|
||||
fp2gp = regInfo{inputs: []regMask{fpMask, fpMask}, outputs: []regMask{gpMask}}
|
||||
|
||||
call = regInfo{clobbers: callerSave}
|
||||
callClosure = regInfo{inputs: []regMask{gpspMask, regCtxt, 0}, clobbers: callerSave}
|
||||
callInter = regInfo{inputs: []regMask{gpMask}, clobbers: callerSave}
|
||||
call = regInfo{clobbers: callerSave}
|
||||
// Avoid using X5 as the source register of calls. Using X5 here triggers
|
||||
// RAS pop-then-push behavior which is not correct for function calls.
|
||||
// Please refer to section 2.5.1 of the RISC-V ISA
|
||||
// (https://docs.riscv.org/reference/isa/unpriv/rv32.html#rashints) for details.
|
||||
callClosure = regInfo{inputs: []regMask{gpspMask ^ regX5, regCtxt, 0}, clobbers: callerSave}
|
||||
callInter = regInfo{inputs: []regMask{gpMask ^ regX5}, clobbers: callerSave}
|
||||
)
|
||||
|
||||
RISCV64ops := []opData{
|
||||
|
||||
@@ -80293,7 +80293,7 @@ var opcodeTable = [...]opInfo{
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 33554432}, // X26
|
||||
{0, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
|
||||
{0, 1006632930}, // SP X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
|
||||
},
|
||||
clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
|
||||
},
|
||||
@@ -80305,7 +80305,7 @@ var opcodeTable = [...]opInfo{
|
||||
call: true,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
|
||||
{0, 1006632928}, // X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
|
||||
},
|
||||
clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
|
||||
},
|
||||
|
||||
@@ -752,7 +752,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
|
||||
return
|
||||
}
|
||||
|
||||
if hasVarSize(x.typ, nil) {
|
||||
if check.hasVarSize(x.typ) {
|
||||
x.mode = value
|
||||
if check.recordTypes() {
|
||||
check.recordBuiltinType(call.Fun, makeSig(Typ[Uintptr], x.typ))
|
||||
@@ -816,7 +816,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
|
||||
// the part of the struct which is variable-sized. This makes both the rules
|
||||
// simpler and also permits (or at least doesn't prevent) a compiler from re-
|
||||
// arranging struct fields if it wanted to.
|
||||
if hasVarSize(base, nil) {
|
||||
if check.hasVarSize(base) {
|
||||
x.mode = value
|
||||
if check.recordTypes() {
|
||||
check.recordBuiltinType(call.Fun, makeSig(Typ[Uintptr], obj.Type()))
|
||||
@@ -840,7 +840,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
|
||||
return
|
||||
}
|
||||
|
||||
if hasVarSize(x.typ, nil) {
|
||||
if check.hasVarSize(x.typ) {
|
||||
x.mode = value
|
||||
if check.recordTypes() {
|
||||
check.recordBuiltinType(call.Fun, makeSig(Typ[Uintptr], x.typ))
|
||||
@@ -1007,37 +1007,55 @@ func sliceElem(x *operand) (Type, *typeError) {
|
||||
// hasVarSize reports if the size of type t is variable due to type parameters
|
||||
// or if the type is infinitely-sized due to a cycle for which the type has not
|
||||
// yet been checked.
|
||||
func hasVarSize(t Type, seen map[*Named]bool) (varSized bool) {
|
||||
// Cycles are only possible through *Named types.
|
||||
// The seen map is used to detect cycles and track
|
||||
// the results of previously seen types.
|
||||
if named := asNamed(t); named != nil {
|
||||
if v, ok := seen[named]; ok {
|
||||
return v
|
||||
func (check *Checker) hasVarSize(t Type) bool {
|
||||
// Note: We could use Underlying here, but passing through the RHS may yield
|
||||
// better error messages.
|
||||
switch t := Unalias(t).(type) {
|
||||
case *Named:
|
||||
if t.stateHas(hasVarSize) {
|
||||
return t.varSize
|
||||
}
|
||||
if seen == nil {
|
||||
seen = make(map[*Named]bool)
|
||||
}
|
||||
seen[named] = true // possibly cyclic until proven otherwise
|
||||
defer func() {
|
||||
seen[named] = varSized // record final determination for named
|
||||
}()
|
||||
}
|
||||
|
||||
switch u := t.Underlying().(type) {
|
||||
if i, ok := check.objPathIdx[t.obj]; ok {
|
||||
cycle := check.objPath[i:]
|
||||
check.cycleError(cycle, firstInSrc(cycle))
|
||||
return true
|
||||
}
|
||||
|
||||
check.push(t.obj)
|
||||
defer check.pop()
|
||||
|
||||
varSize := check.hasVarSize(t.fromRHS)
|
||||
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
// Careful, t.varSize has lock-free readers. Since we might be racing
|
||||
// another call to hasVarSize, we have to avoid overwriting t.varSize.
|
||||
// Otherwise, the race detector will be tripped.
|
||||
if !t.stateHas(hasVarSize) {
|
||||
t.varSize = varSize
|
||||
t.setState(hasVarSize)
|
||||
}
|
||||
|
||||
return varSize
|
||||
|
||||
case *Array:
|
||||
return hasVarSize(u.elem, seen)
|
||||
// The array length is already computed. If it was a valid length, it
|
||||
// is constant; else, an error was reported in the computation.
|
||||
return check.hasVarSize(t.elem)
|
||||
|
||||
case *Struct:
|
||||
for _, f := range u.fields {
|
||||
if hasVarSize(f.typ, seen) {
|
||||
for _, f := range t.fields {
|
||||
if check.hasVarSize(f.typ) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
case *Interface:
|
||||
return isTypeParam(t)
|
||||
case *Named, *Union:
|
||||
panic("unreachable")
|
||||
|
||||
case *TypeParam:
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -199,6 +199,11 @@ func (check *Checker) callExpr(x *operand, call *syntax.CallExpr) exprKind {
|
||||
}
|
||||
T := x.typ
|
||||
x.mode = invalid
|
||||
// We cannot convert a value to an incomplete type; make sure it's complete.
|
||||
if !check.isComplete(T) {
|
||||
x.expr = call
|
||||
return conversion
|
||||
}
|
||||
switch n := len(call.ArgList); n {
|
||||
case 0:
|
||||
check.errorf(call, WrongArgCount, "missing argument in conversion to %s", T)
|
||||
@@ -319,7 +324,14 @@ func (check *Checker) callExpr(x *operand, call *syntax.CallExpr) exprKind {
|
||||
} else {
|
||||
x.mode = value
|
||||
}
|
||||
x.typ = sig.results.vars[0].typ // unpack tuple
|
||||
typ := sig.results.vars[0].typ // unpack tuple
|
||||
// We cannot return a value of an incomplete type; make sure it's complete.
|
||||
if !check.isComplete(typ) {
|
||||
x.mode = invalid
|
||||
x.expr = call
|
||||
return statement
|
||||
}
|
||||
x.typ = typ
|
||||
default:
|
||||
x.mode = value
|
||||
x.typ = sig.results
|
||||
@@ -784,8 +796,12 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr, wantType bool
|
||||
goto Error
|
||||
}
|
||||
|
||||
// Avoid crashing when checking an invalid selector in a method declaration
|
||||
// (i.e., where def is not set):
|
||||
// We cannot select on an incomplete type; make sure it's complete.
|
||||
if !check.isComplete(x.typ) {
|
||||
goto Error
|
||||
}
|
||||
|
||||
// Avoid crashing when checking an invalid selector in a method declaration.
|
||||
//
|
||||
// type S[T any] struct{}
|
||||
// type V = S[any]
|
||||
@@ -795,14 +811,17 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr, wantType bool
|
||||
// expecting a type expression, it is an error.
|
||||
//
|
||||
// See go.dev/issue/57522 for more details.
|
||||
//
|
||||
// TODO(rfindley): We should do better by refusing to check selectors in all cases where
|
||||
// x.typ is incomplete.
|
||||
if wantType {
|
||||
check.errorf(e.Sel, NotAType, "%s is not a type", syntax.Expr(e))
|
||||
goto Error
|
||||
}
|
||||
|
||||
// Additionally, if x.typ is a pointer type, selecting implicitly dereferences the value, meaning
|
||||
// its base type must also be complete.
|
||||
if p, ok := x.typ.Underlying().(*Pointer); ok && !check.isComplete(p.base) {
|
||||
goto Error
|
||||
}
|
||||
|
||||
obj, index, indirect = lookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, sel, false)
|
||||
if obj == nil {
|
||||
// Don't report another error if the underlying type was invalid (go.dev/issue/49541).
|
||||
|
||||
@@ -103,49 +103,25 @@ func (check *Checker) directCycle(tname *TypeName, pathIdx map[*TypeName]int) {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(markfreeman): Can the value cached on Named be used in validType / hasVarSize?
|
||||
|
||||
// finiteSize returns whether a type has finite size.
|
||||
func (check *Checker) finiteSize(t Type) bool {
|
||||
switch t := Unalias(t).(type) {
|
||||
case *Named:
|
||||
if t.stateHas(hasFinite) {
|
||||
return t.finite
|
||||
}
|
||||
|
||||
if i, ok := check.objPathIdx[t.obj]; ok {
|
||||
// isComplete returns whether a type is complete (i.e. up to having an underlying type).
|
||||
// Incomplete types will panic if [Type.Underlying] is called on them.
|
||||
func (check *Checker) isComplete(t Type) bool {
|
||||
if n, ok := Unalias(t).(*Named); ok {
|
||||
if i, found := check.objPathIdx[n.obj]; found {
|
||||
cycle := check.objPath[i:]
|
||||
check.cycleError(cycle, firstInSrc(cycle))
|
||||
return false
|
||||
}
|
||||
check.push(t.obj)
|
||||
defer check.pop()
|
||||
|
||||
isFinite := check.finiteSize(t.fromRHS)
|
||||
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
// Careful, t.finite has lock-free readers. Since we might be racing
|
||||
// another call to finiteSize, we have to avoid overwriting t.finite.
|
||||
// Otherwise, the race detector will be tripped.
|
||||
if !t.stateHas(hasFinite) {
|
||||
t.finite = isFinite
|
||||
t.setState(hasFinite)
|
||||
}
|
||||
|
||||
return isFinite
|
||||
|
||||
case *Array:
|
||||
// The array length is already computed. If it was a valid length, it
|
||||
// is finite; else, an error was reported in the computation.
|
||||
return check.finiteSize(t.elem)
|
||||
|
||||
case *Struct:
|
||||
for _, f := range t.fields {
|
||||
if !check.finiteSize(f.typ) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
// We must walk through names because we permit certain cycles of names.
|
||||
// Consider:
|
||||
//
|
||||
// type A B
|
||||
// type B [unsafe.Sizeof(A{})]int
|
||||
//
|
||||
// starting at B. At the site of A{}, A has no underlying type, and so a
|
||||
// cycle must be reported.
|
||||
return check.isComplete(n.fromRHS)
|
||||
}
|
||||
|
||||
return true
|
||||
|
||||
@@ -483,20 +483,6 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl) {
|
||||
}
|
||||
|
||||
named := check.newNamed(obj, nil, nil)
|
||||
|
||||
// TODO: adjust this comment (gotypesalias) as needed if we don't need allowNilRHS anymore.
|
||||
// The RHS of a named N can be nil if, for example, N is defined as a cycle of aliases with
|
||||
// gotypesalias=0. Consider:
|
||||
//
|
||||
// type D N // N.unpack() will panic
|
||||
// type N A
|
||||
// type A = N // N.fromRHS is not set before N.unpack(), since A does not call setDefType
|
||||
//
|
||||
// There is likely a better way to detect such cases, but it may not be worth the effort.
|
||||
// Instead, we briefly permit a nil N.fromRHS while type-checking D.
|
||||
named.allowNilRHS = true
|
||||
defer (func() { named.allowNilRHS = false })()
|
||||
|
||||
if tdecl.TParamList != nil {
|
||||
check.openScope(tdecl, "type parameters")
|
||||
defer check.closeScope()
|
||||
|
||||
@@ -148,7 +148,8 @@ func (check *Checker) unary(x *operand, e *syntax.Operation) {
|
||||
return
|
||||
|
||||
case syntax.Recv:
|
||||
if elem := check.chanElem(x, x, true); elem != nil {
|
||||
// We cannot receive a value with an incomplete type; make sure it's complete.
|
||||
if elem := check.chanElem(x, x, true); elem != nil && check.isComplete(elem) {
|
||||
x.mode = commaok
|
||||
x.typ = elem
|
||||
check.hasCallOrRecv = true
|
||||
@@ -993,13 +994,6 @@ func (check *Checker) rawExpr(T *target, x *operand, e syntax.Expr, hint Type, a
|
||||
check.nonGeneric(T, x)
|
||||
}
|
||||
|
||||
// Here, x is a value, meaning it has a type. If that type is pending, then we have
|
||||
// a cycle. As an example:
|
||||
//
|
||||
// type T [unsafe.Sizeof(T{})]int
|
||||
//
|
||||
// has a cycle T->T which is deemed valid (by decl.go), but which is in fact invalid.
|
||||
check.pendingType(x)
|
||||
check.record(x)
|
||||
|
||||
return kind
|
||||
@@ -1034,19 +1028,6 @@ func (check *Checker) nonGeneric(T *target, x *operand) {
|
||||
}
|
||||
}
|
||||
|
||||
// If x has a pending type (i.e. its declaring object is on the object path), pendingType
|
||||
// reports an error and invalidates x.mode and x.typ.
|
||||
// Otherwise it leaves x alone.
|
||||
func (check *Checker) pendingType(x *operand) {
|
||||
if x.mode == invalid || x.mode == novalue {
|
||||
return
|
||||
}
|
||||
if !check.finiteSize(x.typ) {
|
||||
x.mode = invalid
|
||||
x.typ = Typ[Invalid]
|
||||
}
|
||||
}
|
||||
|
||||
// exprInternal contains the core of type checking of expressions.
|
||||
// Must only be called by rawExpr.
|
||||
// (See rawExpr for an explanation of the parameters.)
|
||||
@@ -1140,6 +1121,10 @@ func (check *Checker) exprInternal(T *target, x *operand, e syntax.Expr, hint Ty
|
||||
if !isValid(T) {
|
||||
goto Error
|
||||
}
|
||||
// We cannot assert to an incomplete type; make sure it's complete.
|
||||
if !check.isComplete(T) {
|
||||
goto Error
|
||||
}
|
||||
check.typeAssertion(e, x, T, false)
|
||||
x.mode = commaok
|
||||
x.typ = T
|
||||
@@ -1207,6 +1192,10 @@ func (check *Checker) exprInternal(T *target, x *operand, e syntax.Expr, hint Ty
|
||||
}) {
|
||||
goto Error
|
||||
}
|
||||
// We cannot dereference a pointer with an incomplete base type; make sure it's complete.
|
||||
if !check.isComplete(base) {
|
||||
goto Error
|
||||
}
|
||||
x.mode = variable
|
||||
x.typ = base
|
||||
}
|
||||
|
||||
@@ -47,6 +47,28 @@ func (check *Checker) indexExpr(x *operand, e *syntax.IndexExpr) (isFuncInst boo
|
||||
return false
|
||||
}
|
||||
|
||||
// We cannot index on an incomplete type; make sure it's complete.
|
||||
if !check.isComplete(x.typ) {
|
||||
x.mode = invalid
|
||||
return false
|
||||
}
|
||||
switch typ := x.typ.Underlying().(type) {
|
||||
case *Pointer:
|
||||
// Additionally, if x.typ is a pointer to an array type, indexing implicitly dereferences the value, meaning
|
||||
// its base type must also be complete.
|
||||
if !check.isComplete(typ.base) {
|
||||
x.mode = invalid
|
||||
return false
|
||||
}
|
||||
case *Map:
|
||||
// Lastly, if x.typ is a map type, indexing must produce a value of a complete type, meaning
|
||||
// its element type must also be complete.
|
||||
if !check.isComplete(typ.elem) {
|
||||
x.mode = invalid
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// ordinary index expression
|
||||
valid := false
|
||||
length := int64(-1) // valid if >= 0
|
||||
@@ -251,6 +273,14 @@ func (check *Checker) sliceExpr(x *operand, e *syntax.SliceExpr) {
|
||||
}
|
||||
}
|
||||
|
||||
// Note that we don't permit slice expressions where x is a type expression, so we don't check for that here.
|
||||
// However, if x.typ is a pointer to an array type, slicing implicitly dereferences the value, meaning
|
||||
// its base type must also be complete.
|
||||
if p, ok := x.typ.Underlying().(*Pointer); ok && !check.isComplete(p.base) {
|
||||
x.mode = invalid
|
||||
return
|
||||
}
|
||||
|
||||
valid := false
|
||||
length := int64(-1) // valid if >= 0
|
||||
switch u := cu.(type) {
|
||||
|
||||
@@ -143,6 +143,12 @@ func (check *Checker) compositeLit(x *operand, e *syntax.CompositeLit, hint Type
|
||||
base = typ
|
||||
}
|
||||
|
||||
// We cannot create a literal of an incomplete type; make sure it's complete.
|
||||
if !check.isComplete(base) {
|
||||
x.mode = invalid
|
||||
return
|
||||
}
|
||||
|
||||
switch u, _ := commonUnder(base, nil); utyp := u.(type) {
|
||||
case *Struct:
|
||||
if len(e.ElemList) == 0 {
|
||||
|
||||
@@ -106,9 +106,7 @@ type Named struct {
|
||||
check *Checker // non-nil during type-checking; nil otherwise
|
||||
obj *TypeName // corresponding declared object for declared types; see above for instantiated types
|
||||
|
||||
// flags indicating temporary violations of the invariants for fromRHS and underlying
|
||||
allowNilRHS bool // same as below, as well as briefly during checking of a type declaration
|
||||
allowNilUnderlying bool // may be true from creation via [NewNamed] until [Named.SetUnderlying]
|
||||
allowNilRHS bool // may be true from creation via [NewNamed] until [Named.SetUnderlying]
|
||||
|
||||
inst *instance // information for instantiated types; nil otherwise
|
||||
|
||||
@@ -117,7 +115,7 @@ type Named struct {
|
||||
fromRHS Type // the declaration RHS this type is derived from
|
||||
tparams *TypeParamList // type parameters, or nil
|
||||
underlying Type // underlying type, or nil
|
||||
finite bool // whether the type has finite size
|
||||
varSize bool // whether the type has variable size
|
||||
|
||||
// methods declared for this type (not the method set of this type)
|
||||
// Signatures are type-checked lazily.
|
||||
@@ -149,10 +147,10 @@ type instance struct {
|
||||
// unpacked
|
||||
// └── hasMethods
|
||||
// └── hasUnder
|
||||
// └── hasFinite
|
||||
// └── hasVarSize
|
||||
//
|
||||
// That is, descent down the tree is mostly linear (initial through unpacked), except upon
|
||||
// reaching the leaves (hasMethods, hasUnder, and hasFinite). A type may occupy any
|
||||
// reaching the leaves (hasMethods, hasUnder, and hasVarSize). A type may occupy any
|
||||
// combination of the leaf states at once (they are independent states).
|
||||
//
|
||||
// To represent this independence, the set of active states is represented with a bit set. State
|
||||
@@ -166,7 +164,7 @@ type instance struct {
|
||||
// 11000 | unpacked, which implies lazyLoaded
|
||||
// 11100 | hasMethods, which implies unpacked (which in turn implies lazyLoaded)
|
||||
// 11010 | hasUnder, which implies unpacked ...
|
||||
// 11001 | hasFinite, which implies unpacked ...
|
||||
// 11001 | hasVarSize, which implies unpacked ...
|
||||
// 11110 | both hasMethods and hasUnder which implies unpacked ...
|
||||
// ... | (other combinations of leaf states)
|
||||
//
|
||||
@@ -179,7 +177,7 @@ const (
|
||||
unpacked // methods might be unexpanded (for instances)
|
||||
hasMethods // methods are all expanded (for instances)
|
||||
hasUnder // underlying type is available
|
||||
hasFinite // size finiteness is available
|
||||
hasVarSize // varSize is available
|
||||
)
|
||||
|
||||
// NewNamed returns a new named type for the given type name, underlying type, and associated methods.
|
||||
@@ -192,7 +190,6 @@ func NewNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
|
||||
n := (*Checker)(nil).newNamed(obj, underlying, methods)
|
||||
if underlying == nil {
|
||||
n.allowNilRHS = true
|
||||
n.allowNilUnderlying = true
|
||||
} else {
|
||||
n.SetUnderlying(underlying)
|
||||
}
|
||||
@@ -309,8 +306,8 @@ func (n *Named) setState(m stateMask) {
|
||||
if m&hasUnder != 0 {
|
||||
assert(u)
|
||||
}
|
||||
// hasFinite => unpacked
|
||||
if m&hasFinite != 0 {
|
||||
// hasVarSize => unpacked
|
||||
if m&hasVarSize != 0 {
|
||||
assert(u)
|
||||
}
|
||||
}
|
||||
@@ -532,7 +529,6 @@ func (t *Named) SetUnderlying(u Type) {
|
||||
t.setState(lazyLoaded | unpacked | hasMethods) // TODO(markfreeman): Why hasMethods?
|
||||
|
||||
t.underlying = u
|
||||
t.allowNilUnderlying = false
|
||||
t.setState(hasUnder)
|
||||
}
|
||||
|
||||
@@ -594,9 +590,7 @@ func (n *Named) Underlying() Type {
|
||||
// and complicating things there, we just check for that special case here.
|
||||
if n.rhs() == nil {
|
||||
assert(n.allowNilRHS)
|
||||
if n.allowNilUnderlying {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if !n.stateHas(hasUnder) { // minor performance optimization
|
||||
@@ -637,9 +631,6 @@ func (n *Named) resolveUnderlying() {
|
||||
var u Type
|
||||
for rhs := Type(n); u == nil; {
|
||||
switch t := rhs.(type) {
|
||||
case nil:
|
||||
u = Typ[Invalid]
|
||||
|
||||
case *Alias:
|
||||
rhs = unalias(t)
|
||||
|
||||
@@ -661,8 +652,8 @@ func (n *Named) resolveUnderlying() {
|
||||
path = append(path, t)
|
||||
|
||||
t.unpack()
|
||||
assert(t.rhs() != nil || t.allowNilRHS)
|
||||
rhs = t.rhs()
|
||||
assert(rhs != nil)
|
||||
|
||||
default:
|
||||
u = rhs // any type literal or predeclared type works
|
||||
|
||||
@@ -583,7 +583,7 @@ func xcoffUpdateOuterSize(ctxt *Link, size int64, stype sym.SymKind) {
|
||||
switch stype {
|
||||
default:
|
||||
Errorf("unknown XCOFF outer symbol for type %s", stype.String())
|
||||
case sym.SRODATA, sym.SFUNCTAB, sym.SSTRING:
|
||||
case sym.SRODATA, sym.SRODATARELRO, sym.SFUNCTAB, sym.SSTRING:
|
||||
// Nothing to do
|
||||
case sym.STYPE:
|
||||
if ctxt.UseRelro() && (ctxt.BuildMode == BuildModeCArchive || ctxt.BuildMode == BuildModeCShared || ctxt.BuildMode == BuildModePIE) {
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build (amd64 || arm64 || ppc64 || ppc64le || riscv64) && !purego
|
||||
//go:build (amd64 || arm64 || ppc64 || ppc64le) && !purego
|
||||
|
||||
package subtle
|
||||
|
||||
|
||||
18
src/crypto/internal/fips140/subtle/xor_riscv64.go
Normal file
18
src/crypto/internal/fips140/subtle/xor_riscv64.go
Normal file
@@ -0,0 +1,18 @@
|
||||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build riscv64 && !purego
|
||||
|
||||
package subtle
|
||||
|
||||
import (
|
||||
"crypto/internal/fips140deps/cpu"
|
||||
)
|
||||
|
||||
//go:noescape
|
||||
func xorBytesRISCV64(dst, a, b *byte, n int, hasV bool)
|
||||
|
||||
func xorBytes(dst, a, b *byte, n int) {
|
||||
xorBytesRISCV64(dst, a, b, n, cpu.RISCV64HasV)
|
||||
}
|
||||
@@ -4,10 +4,12 @@
|
||||
|
||||
//go:build !purego
|
||||
|
||||
#include "asm_riscv64.h"
|
||||
#include "go_asm.h"
|
||||
#include "textflag.h"
|
||||
|
||||
// func xorBytes(dst, a, b *byte, n int)
|
||||
TEXT ·xorBytes(SB), NOSPLIT|NOFRAME, $0
|
||||
// func xorBytesRISCV64(dst, a, b *byte, n int, hasV bool)
|
||||
TEXT ·xorBytesRISCV64(SB), NOSPLIT|NOFRAME, $0
|
||||
MOV dst+0(FP), X10
|
||||
MOV a+8(FP), X11
|
||||
MOV b+16(FP), X12
|
||||
@@ -16,6 +18,35 @@ TEXT ·xorBytes(SB), NOSPLIT|NOFRAME, $0
|
||||
MOV $32, X15
|
||||
BLT X13, X15, loop4_check
|
||||
|
||||
#ifndef hasV
|
||||
MOVB hasV+32(FP), X5
|
||||
BEQZ X5, xorbytes_scalar
|
||||
#endif
|
||||
|
||||
// Use vector if not 8 byte aligned.
|
||||
OR X10, X11, X5
|
||||
AND $7, X5
|
||||
BNEZ X5, vector_loop
|
||||
|
||||
// Use scalar if 8 byte aligned and <= 64 bytes.
|
||||
SUB $64, X12, X6
|
||||
BLEZ X6, loop64_check
|
||||
|
||||
PCALIGN $16
|
||||
vector_loop:
|
||||
VSETVLI X13, E8, M8, TU, MU, X15
|
||||
VLE8V (X11), V8
|
||||
VLE8V (X12), V16
|
||||
VXORVV V8, V16, V24
|
||||
VSE8V V24, (X10)
|
||||
ADD X15, X10
|
||||
ADD X15, X11
|
||||
ADD X15, X12
|
||||
SUB X15, X13
|
||||
BNEZ X13, vector_loop
|
||||
RET
|
||||
|
||||
xorbytes_scalar:
|
||||
// Check alignment - if alignment differs we have to do one byte at a time.
|
||||
AND $7, X10, X5
|
||||
AND $7, X11, X6
|
||||
|
||||
@@ -27,6 +27,8 @@ var (
|
||||
LOONG64HasLSX = cpu.Loong64.HasLSX
|
||||
LOONG64HasLASX = cpu.Loong64.HasLASX
|
||||
|
||||
RISCV64HasV = cpu.RISCV64.HasV
|
||||
|
||||
S390XHasAES = cpu.S390X.HasAES
|
||||
S390XHasAESCBC = cpu.S390X.HasAESCBC
|
||||
S390XHasAESCTR = cpu.S390X.HasAESCTR
|
||||
|
||||
@@ -755,7 +755,7 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b
|
||||
return
|
||||
}
|
||||
|
||||
if hasVarSize(x.typ, nil) {
|
||||
if check.hasVarSize(x.typ) {
|
||||
x.mode = value
|
||||
if check.recordTypes() {
|
||||
check.recordBuiltinType(call.Fun, makeSig(Typ[Uintptr], x.typ))
|
||||
@@ -819,7 +819,7 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b
|
||||
// the part of the struct which is variable-sized. This makes both the rules
|
||||
// simpler and also permits (or at least doesn't prevent) a compiler from re-
|
||||
// arranging struct fields if it wanted to.
|
||||
if hasVarSize(base, nil) {
|
||||
if check.hasVarSize(base) {
|
||||
x.mode = value
|
||||
if check.recordTypes() {
|
||||
check.recordBuiltinType(call.Fun, makeSig(Typ[Uintptr], obj.Type()))
|
||||
@@ -843,7 +843,7 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b
|
||||
return
|
||||
}
|
||||
|
||||
if hasVarSize(x.typ, nil) {
|
||||
if check.hasVarSize(x.typ) {
|
||||
x.mode = value
|
||||
if check.recordTypes() {
|
||||
check.recordBuiltinType(call.Fun, makeSig(Typ[Uintptr], x.typ))
|
||||
@@ -1010,37 +1010,55 @@ func sliceElem(x *operand) (Type, *typeError) {
|
||||
// hasVarSize reports if the size of type t is variable due to type parameters
|
||||
// or if the type is infinitely-sized due to a cycle for which the type has not
|
||||
// yet been checked.
|
||||
func hasVarSize(t Type, seen map[*Named]bool) (varSized bool) {
|
||||
// Cycles are only possible through *Named types.
|
||||
// The seen map is used to detect cycles and track
|
||||
// the results of previously seen types.
|
||||
if named := asNamed(t); named != nil {
|
||||
if v, ok := seen[named]; ok {
|
||||
return v
|
||||
func (check *Checker) hasVarSize(t Type) bool {
|
||||
// Note: We could use Underlying here, but passing through the RHS may yield
|
||||
// better error messages.
|
||||
switch t := Unalias(t).(type) {
|
||||
case *Named:
|
||||
if t.stateHas(hasVarSize) {
|
||||
return t.varSize
|
||||
}
|
||||
if seen == nil {
|
||||
seen = make(map[*Named]bool)
|
||||
}
|
||||
seen[named] = true // possibly cyclic until proven otherwise
|
||||
defer func() {
|
||||
seen[named] = varSized // record final determination for named
|
||||
}()
|
||||
}
|
||||
|
||||
switch u := t.Underlying().(type) {
|
||||
if i, ok := check.objPathIdx[t.obj]; ok {
|
||||
cycle := check.objPath[i:]
|
||||
check.cycleError(cycle, firstInSrc(cycle))
|
||||
return true
|
||||
}
|
||||
|
||||
check.push(t.obj)
|
||||
defer check.pop()
|
||||
|
||||
varSize := check.hasVarSize(t.fromRHS)
|
||||
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
// Careful, t.varSize has lock-free readers. Since we might be racing
|
||||
// another call to hasVarSize, we have to avoid overwriting t.varSize.
|
||||
// Otherwise, the race detector will be tripped.
|
||||
if !t.stateHas(hasVarSize) {
|
||||
t.varSize = varSize
|
||||
t.setState(hasVarSize)
|
||||
}
|
||||
|
||||
return varSize
|
||||
|
||||
case *Array:
|
||||
return hasVarSize(u.elem, seen)
|
||||
// The array length is already computed. If it was a valid length, it
|
||||
// is constant; else, an error was reported in the computation.
|
||||
return check.hasVarSize(t.elem)
|
||||
|
||||
case *Struct:
|
||||
for _, f := range u.fields {
|
||||
if hasVarSize(f.typ, seen) {
|
||||
for _, f := range t.fields {
|
||||
if check.hasVarSize(f.typ) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
case *Interface:
|
||||
return isTypeParam(t)
|
||||
case *Named, *Union:
|
||||
panic("unreachable")
|
||||
|
||||
case *TypeParam:
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -201,6 +201,11 @@ func (check *Checker) callExpr(x *operand, call *ast.CallExpr) exprKind {
|
||||
}
|
||||
T := x.typ
|
||||
x.mode = invalid
|
||||
// We cannot convert a value to an incomplete type; make sure it's complete.
|
||||
if !check.isComplete(T) {
|
||||
x.expr = call
|
||||
return conversion
|
||||
}
|
||||
switch n := len(call.Args); n {
|
||||
case 0:
|
||||
check.errorf(inNode(call, call.Rparen), WrongArgCount, "missing argument in conversion to %s", T)
|
||||
@@ -321,7 +326,14 @@ func (check *Checker) callExpr(x *operand, call *ast.CallExpr) exprKind {
|
||||
} else {
|
||||
x.mode = value
|
||||
}
|
||||
x.typ = sig.results.vars[0].typ // unpack tuple
|
||||
typ := sig.results.vars[0].typ // unpack tuple
|
||||
// We cannot return a value of an incomplete type; make sure it's complete.
|
||||
if !check.isComplete(typ) {
|
||||
x.mode = invalid
|
||||
x.expr = call
|
||||
return statement
|
||||
}
|
||||
x.typ = typ
|
||||
default:
|
||||
x.mode = value
|
||||
x.typ = sig.results
|
||||
@@ -787,8 +799,12 @@ func (check *Checker) selector(x *operand, e *ast.SelectorExpr, wantType bool) {
|
||||
goto Error
|
||||
}
|
||||
|
||||
// Avoid crashing when checking an invalid selector in a method declaration
|
||||
// (i.e., where def is not set):
|
||||
// We cannot select on an incomplete type; make sure it's complete.
|
||||
if !check.isComplete(x.typ) {
|
||||
goto Error
|
||||
}
|
||||
|
||||
// Avoid crashing when checking an invalid selector in a method declaration.
|
||||
//
|
||||
// type S[T any] struct{}
|
||||
// type V = S[any]
|
||||
@@ -798,14 +814,17 @@ func (check *Checker) selector(x *operand, e *ast.SelectorExpr, wantType bool) {
|
||||
// expecting a type expression, it is an error.
|
||||
//
|
||||
// See go.dev/issue/57522 for more details.
|
||||
//
|
||||
// TODO(rfindley): We should do better by refusing to check selectors in all cases where
|
||||
// x.typ is incomplete.
|
||||
if wantType {
|
||||
check.errorf(e.Sel, NotAType, "%s is not a type", ast.Expr(e))
|
||||
goto Error
|
||||
}
|
||||
|
||||
// Additionally, if x.typ is a pointer type, selecting implicitly dereferences the value, meaning
|
||||
// its base type must also be complete.
|
||||
if p, ok := x.typ.Underlying().(*Pointer); ok && !check.isComplete(p.base) {
|
||||
goto Error
|
||||
}
|
||||
|
||||
obj, index, indirect = lookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, sel, false)
|
||||
if obj == nil {
|
||||
// Don't report another error if the underlying type was invalid (go.dev/issue/49541).
|
||||
|
||||
@@ -106,49 +106,25 @@ func (check *Checker) directCycle(tname *TypeName, pathIdx map[*TypeName]int) {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(markfreeman): Can the value cached on Named be used in validType / hasVarSize?
|
||||
|
||||
// finiteSize returns whether a type has finite size.
|
||||
func (check *Checker) finiteSize(t Type) bool {
|
||||
switch t := Unalias(t).(type) {
|
||||
case *Named:
|
||||
if t.stateHas(hasFinite) {
|
||||
return t.finite
|
||||
}
|
||||
|
||||
if i, ok := check.objPathIdx[t.obj]; ok {
|
||||
// isComplete returns whether a type is complete (i.e. up to having an underlying type).
|
||||
// Incomplete types will panic if [Type.Underlying] is called on them.
|
||||
func (check *Checker) isComplete(t Type) bool {
|
||||
if n, ok := Unalias(t).(*Named); ok {
|
||||
if i, found := check.objPathIdx[n.obj]; found {
|
||||
cycle := check.objPath[i:]
|
||||
check.cycleError(cycle, firstInSrc(cycle))
|
||||
return false
|
||||
}
|
||||
check.push(t.obj)
|
||||
defer check.pop()
|
||||
|
||||
isFinite := check.finiteSize(t.fromRHS)
|
||||
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
// Careful, t.finite has lock-free readers. Since we might be racing
|
||||
// another call to finiteSize, we have to avoid overwriting t.finite.
|
||||
// Otherwise, the race detector will be tripped.
|
||||
if !t.stateHas(hasFinite) {
|
||||
t.finite = isFinite
|
||||
t.setState(hasFinite)
|
||||
}
|
||||
|
||||
return isFinite
|
||||
|
||||
case *Array:
|
||||
// The array length is already computed. If it was a valid length, it
|
||||
// is finite; else, an error was reported in the computation.
|
||||
return check.finiteSize(t.elem)
|
||||
|
||||
case *Struct:
|
||||
for _, f := range t.fields {
|
||||
if !check.finiteSize(f.typ) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
// We must walk through names because we permit certain cycles of names.
|
||||
// Consider:
|
||||
//
|
||||
// type A B
|
||||
// type B [unsafe.Sizeof(A{})]int
|
||||
//
|
||||
// starting at B. At the site of A{}, A has no underlying type, and so a
|
||||
// cycle must be reported.
|
||||
return check.isComplete(n.fromRHS)
|
||||
}
|
||||
|
||||
return true
|
||||
|
||||
@@ -559,19 +559,6 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *ast.TypeSpec) {
|
||||
}
|
||||
|
||||
named := check.newNamed(obj, nil, nil)
|
||||
|
||||
// The RHS of a named N can be nil if, for example, N is defined as a cycle of aliases with
|
||||
// gotypesalias=0. Consider:
|
||||
//
|
||||
// type D N // N.unpack() will panic
|
||||
// type N A
|
||||
// type A = N // N.fromRHS is not set before N.unpack(), since A does not call setDefType
|
||||
//
|
||||
// There is likely a better way to detect such cases, but it may not be worth the effort.
|
||||
// Instead, we briefly permit a nil N.fromRHS while type-checking D.
|
||||
named.allowNilRHS = true
|
||||
defer (func() { named.allowNilRHS = false })()
|
||||
|
||||
if tdecl.TypeParams != nil {
|
||||
check.openScope(tdecl, "type parameters")
|
||||
defer check.closeScope()
|
||||
|
||||
@@ -147,7 +147,8 @@ func (check *Checker) unary(x *operand, e *ast.UnaryExpr) {
|
||||
return
|
||||
|
||||
case token.ARROW:
|
||||
if elem := check.chanElem(x, x, true); elem != nil {
|
||||
// We cannot receive a value with an incomplete type; make sure it's complete.
|
||||
if elem := check.chanElem(x, x, true); elem != nil && check.isComplete(elem) {
|
||||
x.mode = commaok
|
||||
x.typ = elem
|
||||
check.hasCallOrRecv = true
|
||||
@@ -985,13 +986,6 @@ func (check *Checker) rawExpr(T *target, x *operand, e ast.Expr, hint Type, allo
|
||||
check.nonGeneric(T, x)
|
||||
}
|
||||
|
||||
// Here, x is a value, meaning it has a type. If that type is pending, then we have
|
||||
// a cycle. As an example:
|
||||
//
|
||||
// type T [unsafe.Sizeof(T{})]int
|
||||
//
|
||||
// has a cycle T->T which is deemed valid (by decl.go), but which is in fact invalid.
|
||||
check.pendingType(x)
|
||||
check.record(x)
|
||||
|
||||
return kind
|
||||
@@ -1026,19 +1020,6 @@ func (check *Checker) nonGeneric(T *target, x *operand) {
|
||||
}
|
||||
}
|
||||
|
||||
// If x has a pending type (i.e. its declaring object is on the object path), pendingType
|
||||
// reports an error and invalidates x.mode and x.typ.
|
||||
// Otherwise it leaves x alone.
|
||||
func (check *Checker) pendingType(x *operand) {
|
||||
if x.mode == invalid || x.mode == novalue {
|
||||
return
|
||||
}
|
||||
if !check.finiteSize(x.typ) {
|
||||
x.mode = invalid
|
||||
x.typ = Typ[Invalid]
|
||||
}
|
||||
}
|
||||
|
||||
// exprInternal contains the core of type checking of expressions.
|
||||
// Must only be called by rawExpr.
|
||||
// (See rawExpr for an explanation of the parameters.)
|
||||
@@ -1129,6 +1110,10 @@ func (check *Checker) exprInternal(T *target, x *operand, e ast.Expr, hint Type)
|
||||
if !isValid(T) {
|
||||
goto Error
|
||||
}
|
||||
// We cannot assert to an incomplete type; make sure it's complete.
|
||||
if !check.isComplete(T) {
|
||||
goto Error
|
||||
}
|
||||
check.typeAssertion(e, x, T, false)
|
||||
x.mode = commaok
|
||||
x.typ = T
|
||||
@@ -1161,6 +1146,10 @@ func (check *Checker) exprInternal(T *target, x *operand, e ast.Expr, hint Type)
|
||||
}) {
|
||||
goto Error
|
||||
}
|
||||
// We cannot dereference a pointer with an incomplete base type; make sure it's complete.
|
||||
if !check.isComplete(base) {
|
||||
goto Error
|
||||
}
|
||||
x.mode = variable
|
||||
x.typ = base
|
||||
}
|
||||
|
||||
@@ -48,6 +48,28 @@ func (check *Checker) indexExpr(x *operand, e *indexedExpr) (isFuncInst bool) {
|
||||
return false
|
||||
}
|
||||
|
||||
// We cannot index on an incomplete type; make sure it's complete.
|
||||
if !check.isComplete(x.typ) {
|
||||
x.mode = invalid
|
||||
return false
|
||||
}
|
||||
switch typ := x.typ.Underlying().(type) {
|
||||
case *Pointer:
|
||||
// Additionally, if x.typ is a pointer to an array type, indexing implicitly dereferences the value, meaning
|
||||
// its base type must also be complete.
|
||||
if !check.isComplete(typ.base) {
|
||||
x.mode = invalid
|
||||
return false
|
||||
}
|
||||
case *Map:
|
||||
// Lastly, if x.typ is a map type, indexing must produce a value of a complete type, meaning
|
||||
// its element type must also be complete.
|
||||
if !check.isComplete(typ.elem) {
|
||||
x.mode = invalid
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// ordinary index expression
|
||||
valid := false
|
||||
length := int64(-1) // valid if >= 0
|
||||
@@ -256,6 +278,14 @@ func (check *Checker) sliceExpr(x *operand, e *ast.SliceExpr) {
|
||||
}
|
||||
}
|
||||
|
||||
// Note that we don't permit slice expressions where x is a type expression, so we don't check for that here.
|
||||
// However, if x.typ is a pointer to an array type, slicing implicitly dereferences the value, meaning
|
||||
// its base type must also be complete.
|
||||
if p, ok := x.typ.Underlying().(*Pointer); ok && !check.isComplete(p.base) {
|
||||
x.mode = invalid
|
||||
return
|
||||
}
|
||||
|
||||
valid := false
|
||||
length := int64(-1) // valid if >= 0
|
||||
switch u := cu.(type) {
|
||||
|
||||
@@ -147,6 +147,12 @@ func (check *Checker) compositeLit(x *operand, e *ast.CompositeLit, hint Type) {
|
||||
base = typ
|
||||
}
|
||||
|
||||
// We cannot create a literal of an incomplete type; make sure it's complete.
|
||||
if !check.isComplete(base) {
|
||||
x.mode = invalid
|
||||
return
|
||||
}
|
||||
|
||||
switch u, _ := commonUnder(base, nil); utyp := u.(type) {
|
||||
case *Struct:
|
||||
if len(e.Elts) == 0 {
|
||||
|
||||
@@ -109,9 +109,7 @@ type Named struct {
|
||||
check *Checker // non-nil during type-checking; nil otherwise
|
||||
obj *TypeName // corresponding declared object for declared types; see above for instantiated types
|
||||
|
||||
// flags indicating temporary violations of the invariants for fromRHS and underlying
|
||||
allowNilRHS bool // same as below, as well as briefly during checking of a type declaration
|
||||
allowNilUnderlying bool // may be true from creation via [NewNamed] until [Named.SetUnderlying]
|
||||
allowNilRHS bool // may be true from creation via [NewNamed] until [Named.SetUnderlying]
|
||||
|
||||
inst *instance // information for instantiated types; nil otherwise
|
||||
|
||||
@@ -120,7 +118,7 @@ type Named struct {
|
||||
fromRHS Type // the declaration RHS this type is derived from
|
||||
tparams *TypeParamList // type parameters, or nil
|
||||
underlying Type // underlying type, or nil
|
||||
finite bool // whether the type has finite size
|
||||
varSize bool // whether the type has variable size
|
||||
|
||||
// methods declared for this type (not the method set of this type)
|
||||
// Signatures are type-checked lazily.
|
||||
@@ -152,10 +150,10 @@ type instance struct {
|
||||
// unpacked
|
||||
// └── hasMethods
|
||||
// └── hasUnder
|
||||
// └── hasFinite
|
||||
// └── hasVarSize
|
||||
//
|
||||
// That is, descent down the tree is mostly linear (initial through unpacked), except upon
|
||||
// reaching the leaves (hasMethods, hasUnder, and hasFinite). A type may occupy any
|
||||
// reaching the leaves (hasMethods, hasUnder, and hasVarSize). A type may occupy any
|
||||
// combination of the leaf states at once (they are independent states).
|
||||
//
|
||||
// To represent this independence, the set of active states is represented with a bit set. State
|
||||
@@ -169,7 +167,7 @@ type instance struct {
|
||||
// 11000 | unpacked, which implies lazyLoaded
|
||||
// 11100 | hasMethods, which implies unpacked (which in turn implies lazyLoaded)
|
||||
// 11010 | hasUnder, which implies unpacked ...
|
||||
// 11001 | hasFinite, which implies unpacked ...
|
||||
// 11001 | hasVarSize, which implies unpacked ...
|
||||
// 11110 | both hasMethods and hasUnder which implies unpacked ...
|
||||
// ... | (other combinations of leaf states)
|
||||
//
|
||||
@@ -182,7 +180,7 @@ const (
|
||||
unpacked // methods might be unexpanded (for instances)
|
||||
hasMethods // methods are all expanded (for instances)
|
||||
hasUnder // underlying type is available
|
||||
hasFinite // size finiteness is available
|
||||
hasVarSize // varSize is available
|
||||
)
|
||||
|
||||
// NewNamed returns a new named type for the given type name, underlying type, and associated methods.
|
||||
@@ -195,7 +193,6 @@ func NewNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
|
||||
n := (*Checker)(nil).newNamed(obj, underlying, methods)
|
||||
if underlying == nil {
|
||||
n.allowNilRHS = true
|
||||
n.allowNilUnderlying = true
|
||||
} else {
|
||||
n.SetUnderlying(underlying)
|
||||
}
|
||||
@@ -312,8 +309,8 @@ func (n *Named) setState(m stateMask) {
|
||||
if m&hasUnder != 0 {
|
||||
assert(u)
|
||||
}
|
||||
// hasFinite => unpacked
|
||||
if m&hasFinite != 0 {
|
||||
// hasVarSize => unpacked
|
||||
if m&hasVarSize != 0 {
|
||||
assert(u)
|
||||
}
|
||||
}
|
||||
@@ -535,7 +532,6 @@ func (t *Named) SetUnderlying(u Type) {
|
||||
t.setState(lazyLoaded | unpacked | hasMethods) // TODO(markfreeman): Why hasMethods?
|
||||
|
||||
t.underlying = u
|
||||
t.allowNilUnderlying = false
|
||||
t.setState(hasUnder)
|
||||
}
|
||||
|
||||
@@ -597,9 +593,7 @@ func (n *Named) Underlying() Type {
|
||||
// and complicating things there, we just check for that special case here.
|
||||
if n.rhs() == nil {
|
||||
assert(n.allowNilRHS)
|
||||
if n.allowNilUnderlying {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if !n.stateHas(hasUnder) { // minor performance optimization
|
||||
@@ -640,9 +634,6 @@ func (n *Named) resolveUnderlying() {
|
||||
var u Type
|
||||
for rhs := Type(n); u == nil; {
|
||||
switch t := rhs.(type) {
|
||||
case nil:
|
||||
u = Typ[Invalid]
|
||||
|
||||
case *Alias:
|
||||
rhs = unalias(t)
|
||||
|
||||
@@ -664,8 +655,8 @@ func (n *Named) resolveUnderlying() {
|
||||
path = append(path, t)
|
||||
|
||||
t.unpack()
|
||||
assert(t.rhs() != nil || t.allowNilRHS)
|
||||
rhs = t.rhs()
|
||||
assert(rhs != nil)
|
||||
|
||||
default:
|
||||
u = rhs // any type literal or predeclared type works
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -63,7 +63,7 @@ func genExpanders(file *gen.File) {
|
||||
xf := int(ob) / 8
|
||||
log.Printf("size class %d bytes, expansion %dx", ob, xf)
|
||||
|
||||
fn := gen.NewFunc(fmt.Sprintf("expandAVX512_%d<>", xf))
|
||||
fn := gen.NewFunc(fmt.Sprintf("·expandAVX512_%d<>", xf))
|
||||
ptrObjBits := gen.Arg[gen.Ptr[gen.Uint8x64]](fn)
|
||||
|
||||
if xf == 1 {
|
||||
|
||||
2
src/internal/types/testdata/check/cycles6.go
vendored
2
src/internal/types/testdata/check/cycles6.go
vendored
@@ -69,3 +69,5 @@ type T11 /* ERROR "invalid recursive type" */ [unsafe.Sizeof(new(T11)[:])]int
|
||||
type T12 /* ERROR "invalid recursive type" */ [unsafe.Sizeof(T12{}[42])]int
|
||||
// index on pointer (case 3)
|
||||
type T13 /* ERROR "invalid recursive type" */ [unsafe.Sizeof(new(T13)[42])]int
|
||||
// index on map (case 1)
|
||||
type T14 /* ERROR "invalid recursive type" */ [unsafe.Sizeof((*new(map[int]T14))[42])]int
|
||||
|
||||
@@ -18,4 +18,4 @@ func _[P any]() {
|
||||
_ = unsafe.Sizeof(struct{ T[P] }{})
|
||||
}
|
||||
|
||||
const _ = unsafe.Sizeof(T /* ERROR "invalid recursive type" */ [int]{})
|
||||
const _ = unsafe /* ERROR "not constant" */ .Sizeof(T /* ERROR "invalid recursive type" */ [int]{})
|
||||
|
||||
@@ -6,7 +6,7 @@ package p
|
||||
|
||||
import "unsafe"
|
||||
|
||||
type A /* ERROR "invalid recursive type" */ [unsafe.Sizeof(S{})]byte
|
||||
type A /* ERROR "invalid recursive type" */ [unsafe/* ERROR "must be constant" */.Sizeof(S{})]byte
|
||||
|
||||
type S struct {
|
||||
a A
|
||||
|
||||
@@ -15,7 +15,7 @@ func f() D {
|
||||
}
|
||||
type D C
|
||||
|
||||
type E /* ERROR "invalid recursive type" */ [unsafe.Sizeof(g[F]())]int
|
||||
type E /* ERROR "invalid recursive type" */ [unsafe/* ERROR "must be constant" */.Sizeof(g[F]())]int
|
||||
func g[P any]() P {
|
||||
panic(0)
|
||||
}
|
||||
|
||||
@@ -954,7 +954,7 @@ TEXT runtime·memhash(SB),NOSPLIT,$0-16
|
||||
MOVL p+0(FP), AX // ptr to data
|
||||
MOVL s+8(FP), BX // size
|
||||
LEAL ret+12(FP), DX
|
||||
JMP aeshashbody<>(SB)
|
||||
JMP runtime·aeshashbody<>(SB)
|
||||
noaes:
|
||||
JMP runtime·memhashFallback(SB)
|
||||
|
||||
@@ -965,14 +965,14 @@ TEXT runtime·strhash(SB),NOSPLIT,$0-12
|
||||
MOVL 4(AX), BX // length of string
|
||||
MOVL (AX), AX // string data
|
||||
LEAL ret+8(FP), DX
|
||||
JMP aeshashbody<>(SB)
|
||||
JMP runtime·aeshashbody<>(SB)
|
||||
noaes:
|
||||
JMP runtime·strhashFallback(SB)
|
||||
|
||||
// AX: data
|
||||
// BX: length
|
||||
// DX: address to put return value
|
||||
TEXT aeshashbody<>(SB),NOSPLIT,$0-0
|
||||
TEXT runtime·aeshashbody<>(SB),NOSPLIT,$0-0
|
||||
MOVL h+4(FP), X0 // 32 bits of per-table hash seed
|
||||
PINSRW $4, BX, X0 // 16 bits of length
|
||||
PSHUFHW $0, X0, X0 // replace size with its low 2 bytes repeated 4 times
|
||||
|
||||
@@ -1286,7 +1286,7 @@ TEXT runtime·memhash<ABIInternal>(SB),NOSPLIT,$0-32
|
||||
// CX = size
|
||||
CMPB runtime·useAeshash(SB), $0
|
||||
JEQ noaes
|
||||
JMP aeshashbody<>(SB)
|
||||
JMP runtime·aeshashbody<>(SB)
|
||||
noaes:
|
||||
JMP runtime·memhashFallback<ABIInternal>(SB)
|
||||
|
||||
@@ -1298,7 +1298,7 @@ TEXT runtime·strhash<ABIInternal>(SB),NOSPLIT,$0-24
|
||||
JEQ noaes
|
||||
MOVQ 8(AX), CX // length of string
|
||||
MOVQ (AX), AX // string data
|
||||
JMP aeshashbody<>(SB)
|
||||
JMP runtime·aeshashbody<>(SB)
|
||||
noaes:
|
||||
JMP runtime·strhashFallback<ABIInternal>(SB)
|
||||
|
||||
@@ -1306,7 +1306,7 @@ noaes:
|
||||
// BX: hash seed
|
||||
// CX: length
|
||||
// At return: AX = return value
|
||||
TEXT aeshashbody<>(SB),NOSPLIT,$0-0
|
||||
TEXT runtime·aeshashbody<>(SB),NOSPLIT,$0-0
|
||||
// Fill an SSE register with our seeds.
|
||||
MOVQ BX, X0 // 64 bits of per-table hash seed
|
||||
PINSRW $4, CX, X0 // 16 bits of length
|
||||
|
||||
@@ -742,7 +742,7 @@ noaes:
|
||||
TEXT runtime·memhash<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-32
|
||||
MOVB runtime·useAeshash(SB), R10
|
||||
CBZ R10, noaes
|
||||
B aeshashbody<>(SB)
|
||||
B runtime·aeshashbody<>(SB)
|
||||
noaes:
|
||||
B runtime·memhashFallback<ABIInternal>(SB)
|
||||
|
||||
@@ -751,7 +751,7 @@ TEXT runtime·strhash<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-24
|
||||
MOVB runtime·useAeshash(SB), R10
|
||||
CBZ R10, noaes
|
||||
LDP (R0), (R0, R2) // string data / length
|
||||
B aeshashbody<>(SB)
|
||||
B runtime·aeshashbody<>(SB)
|
||||
noaes:
|
||||
B runtime·strhashFallback<ABIInternal>(SB)
|
||||
|
||||
@@ -759,7 +759,7 @@ noaes:
|
||||
// R1: seed data
|
||||
// R2: length
|
||||
// At return, R0 = return value
|
||||
TEXT aeshashbody<>(SB),NOSPLIT|NOFRAME,$0
|
||||
TEXT runtime·aeshashbody<>(SB),NOSPLIT|NOFRAME,$0
|
||||
VEOR V30.B16, V30.B16, V30.B16
|
||||
VMOV R1, V30.D[0]
|
||||
VMOV R2, V30.D[1] // load length into seed
|
||||
|
||||
@@ -44,15 +44,15 @@ TEXT _rt0_riscv64_lib(SB),NOSPLIT,$224
|
||||
MOV A1, _rt0_riscv64_lib_argv<>(SB)
|
||||
|
||||
// Synchronous initialization.
|
||||
MOV $runtime·libpreinit(SB), T0
|
||||
JALR RA, T0
|
||||
MOV $runtime·libpreinit(SB), T1
|
||||
JALR RA, T1
|
||||
|
||||
// Create a new thread to do the runtime initialization and return.
|
||||
MOV _cgo_sys_thread_create(SB), T0
|
||||
BEQZ T0, nocgo
|
||||
MOV _cgo_sys_thread_create(SB), T1
|
||||
BEQZ T1, nocgo
|
||||
MOV $_rt0_riscv64_lib_go(SB), A0
|
||||
MOV $0, A1
|
||||
JALR RA, T0
|
||||
JALR RA, T1
|
||||
JMP restore
|
||||
|
||||
nocgo:
|
||||
@@ -60,8 +60,8 @@ nocgo:
|
||||
MOV $_rt0_riscv64_lib_go(SB), A1
|
||||
MOV A0, 8(X2)
|
||||
MOV A1, 16(X2)
|
||||
MOV $runtime·newosproc0(SB), T0
|
||||
JALR RA, T0
|
||||
MOV $runtime·newosproc0(SB), T1
|
||||
JALR RA, T1
|
||||
|
||||
restore:
|
||||
// Restore callee-save registers, along with X1 (LR).
|
||||
@@ -122,14 +122,14 @@ TEXT runtime·rt0_go(SB),NOSPLIT|TOPFRAME,$0
|
||||
MOV X2, (g_stack+stack_hi)(g)
|
||||
|
||||
// if there is a _cgo_init, call it using the gcc ABI.
|
||||
MOV _cgo_init(SB), T0
|
||||
BEQ T0, ZERO, nocgo
|
||||
MOV _cgo_init(SB), T2
|
||||
BEQ T2, ZERO, nocgo
|
||||
|
||||
MOV ZERO, A3 // arg 3: not used
|
||||
MOV ZERO, A2 // arg 2: not used
|
||||
MOV $setg_gcc<>(SB), A1 // arg 1: setg
|
||||
MOV g, A0 // arg 0: G
|
||||
JALR RA, T0
|
||||
JALR RA, T2
|
||||
|
||||
nocgo:
|
||||
// update stackguard after _cgo_init
|
||||
@@ -421,9 +421,9 @@ TEXT gosave_systemstack_switch<>(SB),NOSPLIT|NOFRAME,$0
|
||||
// Call fn(arg) aligned appropriately for the gcc ABI.
|
||||
// Called on a system stack, and there may be no g yet (during needm).
|
||||
TEXT ·asmcgocall_no_g(SB),NOSPLIT,$0-16
|
||||
MOV fn+0(FP), X5
|
||||
MOV fn+0(FP), X11
|
||||
MOV arg+8(FP), X10
|
||||
JALR RA, (X5)
|
||||
JALR RA, (X11)
|
||||
RET
|
||||
|
||||
// func asmcgocall(fn, arg unsafe.Pointer) int32
|
||||
@@ -431,7 +431,7 @@ TEXT ·asmcgocall_no_g(SB),NOSPLIT,$0-16
|
||||
// aligned appropriately for the gcc ABI.
|
||||
// See cgocall.go for more details.
|
||||
TEXT ·asmcgocall(SB),NOSPLIT,$0-20
|
||||
MOV fn+0(FP), X5
|
||||
MOV fn+0(FP), X11
|
||||
MOV arg+8(FP), X10
|
||||
|
||||
MOV X2, X8 // save original stack pointer
|
||||
@@ -461,7 +461,7 @@ g0:
|
||||
SUB X8, X9, X8
|
||||
MOV X8, 8(X2) // save depth in old g stack (can't just save SP, as stack might be copied during a callback)
|
||||
|
||||
JALR RA, (X5)
|
||||
JALR RA, (X11)
|
||||
|
||||
// Restore g, stack pointer. X10 is return value.
|
||||
MOV 0(X2), g
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
// Called from instrumented code.
|
||||
TEXT runtime·raceread<ABIInternal>(SB), NOSPLIT, $0-8
|
||||
// void __tsan_read(ThreadState *thr, void *addr, void *pc);
|
||||
MOV $__tsan_read(SB), X5
|
||||
MOV $__tsan_read(SB), X23
|
||||
MOV X10, X11
|
||||
MOV X1, X12
|
||||
JMP racecalladdr<>(SB)
|
||||
@@ -40,7 +40,7 @@ TEXT runtime·RaceRead(SB), NOSPLIT, $0-8
|
||||
// func runtime·racereadpc(void *addr, void *callpc, void *pc)
|
||||
TEXT runtime·racereadpc(SB), NOSPLIT, $0-24
|
||||
// void __tsan_read_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
|
||||
MOV $__tsan_read_pc(SB), X5
|
||||
MOV $__tsan_read_pc(SB), X23
|
||||
MOV addr+0(FP), X11
|
||||
MOV callpc+8(FP), X12
|
||||
MOV pc+16(FP), X13
|
||||
@@ -50,7 +50,7 @@ TEXT runtime·racereadpc(SB), NOSPLIT, $0-24
|
||||
// Called from instrumented code.
|
||||
TEXT runtime·racewrite<ABIInternal>(SB), NOSPLIT, $0-8
|
||||
// void __tsan_write(ThreadState *thr, void *addr, void *pc);
|
||||
MOV $__tsan_write(SB), X5
|
||||
MOV $__tsan_write(SB), X23
|
||||
MOV X10, X11
|
||||
MOV X1, X12
|
||||
JMP racecalladdr<>(SB)
|
||||
@@ -63,7 +63,7 @@ TEXT runtime·RaceWrite(SB), NOSPLIT, $0-8
|
||||
// func runtime·racewritepc(void *addr, void *callpc, void *pc)
|
||||
TEXT runtime·racewritepc(SB), NOSPLIT, $0-24
|
||||
// void __tsan_write_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
|
||||
MOV $__tsan_write_pc(SB), X5
|
||||
MOV $__tsan_write_pc(SB), X23
|
||||
MOV addr+0(FP), X11
|
||||
MOV callpc+8(FP), X12
|
||||
MOV pc+16(FP), X13
|
||||
@@ -73,7 +73,7 @@ TEXT runtime·racewritepc(SB), NOSPLIT, $0-24
|
||||
// Called from instrumented code.
|
||||
TEXT runtime·racereadrange<ABIInternal>(SB), NOSPLIT, $0-16
|
||||
// void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
|
||||
MOV $__tsan_read_range(SB), X5
|
||||
MOV $__tsan_read_range(SB), X23
|
||||
MOV X11, X12
|
||||
MOV X10, X11
|
||||
MOV X1, X13
|
||||
@@ -87,7 +87,7 @@ TEXT runtime·RaceReadRange(SB), NOSPLIT, $0-16
|
||||
// func runtime·racereadrangepc1(void *addr, uintptr sz, void *pc)
|
||||
TEXT runtime·racereadrangepc1(SB), NOSPLIT, $0-24
|
||||
// void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
|
||||
MOV $__tsan_read_range(SB), X5
|
||||
MOV $__tsan_read_range(SB), X23
|
||||
MOV addr+0(FP), X11
|
||||
MOV size+8(FP), X12
|
||||
MOV pc+16(FP), X13
|
||||
@@ -101,7 +101,7 @@ TEXT runtime·racereadrangepc1(SB), NOSPLIT, $0-24
|
||||
// Called from instrumented code.
|
||||
TEXT runtime·racewriterange<ABIInternal>(SB), NOSPLIT, $0-16
|
||||
// void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
|
||||
MOV $__tsan_write_range(SB), X5
|
||||
MOV $__tsan_write_range(SB), X23
|
||||
MOV X11, X12
|
||||
MOV X10, X11
|
||||
MOV X1, X13
|
||||
@@ -115,7 +115,7 @@ TEXT runtime·RaceWriteRange(SB), NOSPLIT, $0-16
|
||||
// func runtime·racewriterangepc1(void *addr, uintptr sz, void *pc)
|
||||
TEXT runtime·racewriterangepc1(SB), NOSPLIT, $0-24
|
||||
// void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
|
||||
MOV $__tsan_write_range(SB), X5
|
||||
MOV $__tsan_write_range(SB), X23
|
||||
MOV addr+0(FP), X11
|
||||
MOV size+8(FP), X12
|
||||
MOV pc+16(FP), X13
|
||||
@@ -145,7 +145,7 @@ ret:
|
||||
// func runtime·racefuncenter(pc uintptr)
|
||||
// Called from instrumented code.
|
||||
TEXT runtime·racefuncenter<ABIInternal>(SB), NOSPLIT, $0-8
|
||||
MOV $__tsan_func_enter(SB), X5
|
||||
MOV $__tsan_func_enter(SB), X23
|
||||
MOV X10, X11
|
||||
MOV g_racectx(g), X10
|
||||
JMP racecall<>(SB)
|
||||
@@ -154,7 +154,7 @@ TEXT runtime·racefuncenter<ABIInternal>(SB), NOSPLIT, $0-8
|
||||
// X1 = caller's return address
|
||||
TEXT racefuncenter<>(SB), NOSPLIT, $0-0
|
||||
// void __tsan_func_enter(ThreadState *thr, void *pc);
|
||||
MOV $__tsan_func_enter(SB), X5
|
||||
MOV $__tsan_func_enter(SB), X23
|
||||
MOV g_racectx(g), X10
|
||||
MOV X1, X11
|
||||
JMP racecall<>(SB)
|
||||
@@ -163,7 +163,7 @@ TEXT racefuncenter<>(SB), NOSPLIT, $0-0
|
||||
// Called from instrumented code.
|
||||
TEXT runtime·racefuncexit<ABIInternal>(SB), NOSPLIT, $0-0
|
||||
// void __tsan_func_exit(ThreadState *thr);
|
||||
MOV $__tsan_func_exit(SB), X5
|
||||
MOV $__tsan_func_exit(SB), X23
|
||||
MOV g_racectx(g), X10
|
||||
JMP racecall<>(SB)
|
||||
|
||||
@@ -173,13 +173,13 @@ TEXT runtime·racefuncexit<ABIInternal>(SB), NOSPLIT, $0-0
|
||||
|
||||
TEXT sync∕atomic·LoadInt32(SB), NOSPLIT, $0-12
|
||||
GO_ARGS
|
||||
MOV $__tsan_go_atomic32_load(SB), X5
|
||||
MOV $__tsan_go_atomic32_load(SB), X23
|
||||
CALL racecallatomic<>(SB)
|
||||
RET
|
||||
|
||||
TEXT sync∕atomic·LoadInt64(SB), NOSPLIT, $0-16
|
||||
GO_ARGS
|
||||
MOV $__tsan_go_atomic64_load(SB), X5
|
||||
MOV $__tsan_go_atomic64_load(SB), X23
|
||||
CALL racecallatomic<>(SB)
|
||||
RET
|
||||
|
||||
@@ -203,13 +203,13 @@ TEXT sync∕atomic·LoadPointer(SB), NOSPLIT, $0-16
|
||||
|
||||
TEXT sync∕atomic·StoreInt32(SB), NOSPLIT, $0-12
|
||||
GO_ARGS
|
||||
MOV $__tsan_go_atomic32_store(SB), X5
|
||||
MOV $__tsan_go_atomic32_store(SB), X23
|
||||
CALL racecallatomic<>(SB)
|
||||
RET
|
||||
|
||||
TEXT sync∕atomic·StoreInt64(SB), NOSPLIT, $0-16
|
||||
GO_ARGS
|
||||
MOV $__tsan_go_atomic64_store(SB), X5
|
||||
MOV $__tsan_go_atomic64_store(SB), X23
|
||||
CALL racecallatomic<>(SB)
|
||||
RET
|
||||
|
||||
@@ -229,13 +229,13 @@ TEXT sync∕atomic·StoreUintptr(SB), NOSPLIT, $0-16
|
||||
|
||||
TEXT sync∕atomic·SwapInt32(SB), NOSPLIT, $0-20
|
||||
GO_ARGS
|
||||
MOV $__tsan_go_atomic32_exchange(SB), X5
|
||||
MOV $__tsan_go_atomic32_exchange(SB), X23
|
||||
CALL racecallatomic<>(SB)
|
||||
RET
|
||||
|
||||
TEXT sync∕atomic·SwapInt64(SB), NOSPLIT, $0-24
|
||||
GO_ARGS
|
||||
MOV $__tsan_go_atomic64_exchange(SB), X5
|
||||
MOV $__tsan_go_atomic64_exchange(SB), X23
|
||||
CALL racecallatomic<>(SB)
|
||||
RET
|
||||
|
||||
@@ -255,7 +255,7 @@ TEXT sync∕atomic·SwapUintptr(SB), NOSPLIT, $0-24
|
||||
|
||||
TEXT sync∕atomic·AddInt32(SB), NOSPLIT, $0-20
|
||||
GO_ARGS
|
||||
MOV $__tsan_go_atomic32_fetch_add(SB), X5
|
||||
MOV $__tsan_go_atomic32_fetch_add(SB), X23
|
||||
CALL racecallatomic<>(SB)
|
||||
// TSan performed fetch_add, but Go needs add_fetch.
|
||||
MOVW add+8(FP), X5
|
||||
@@ -266,7 +266,7 @@ TEXT sync∕atomic·AddInt32(SB), NOSPLIT, $0-20
|
||||
|
||||
TEXT sync∕atomic·AddInt64(SB), NOSPLIT, $0-24
|
||||
GO_ARGS
|
||||
MOV $__tsan_go_atomic64_fetch_add(SB), X5
|
||||
MOV $__tsan_go_atomic64_fetch_add(SB), X23
|
||||
CALL racecallatomic<>(SB)
|
||||
// TSan performed fetch_add, but Go needs add_fetch.
|
||||
MOV add+8(FP), X5
|
||||
@@ -290,13 +290,13 @@ TEXT sync∕atomic·AddUintptr(SB), NOSPLIT, $0-24
|
||||
// And
|
||||
TEXT sync∕atomic·AndInt32(SB), NOSPLIT, $0-20
|
||||
GO_ARGS
|
||||
MOV $__tsan_go_atomic32_fetch_and(SB), X5
|
||||
MOV $__tsan_go_atomic32_fetch_and(SB), X23
|
||||
CALL racecallatomic<>(SB)
|
||||
RET
|
||||
|
||||
TEXT sync∕atomic·AndInt64(SB), NOSPLIT, $0-24
|
||||
GO_ARGS
|
||||
MOV $__tsan_go_atomic64_fetch_and(SB), X5
|
||||
MOV $__tsan_go_atomic64_fetch_and(SB), X23
|
||||
CALL racecallatomic<>(SB)
|
||||
RET
|
||||
|
||||
@@ -315,13 +315,13 @@ TEXT sync∕atomic·AndUintptr(SB), NOSPLIT, $0-24
|
||||
// Or
|
||||
TEXT sync∕atomic·OrInt32(SB), NOSPLIT, $0-20
|
||||
GO_ARGS
|
||||
MOV $__tsan_go_atomic32_fetch_or(SB), X5
|
||||
MOV $__tsan_go_atomic32_fetch_or(SB), X23
|
||||
CALL racecallatomic<>(SB)
|
||||
RET
|
||||
|
||||
TEXT sync∕atomic·OrInt64(SB), NOSPLIT, $0-24
|
||||
GO_ARGS
|
||||
MOV $__tsan_go_atomic64_fetch_or(SB), X5
|
||||
MOV $__tsan_go_atomic64_fetch_or(SB), X23
|
||||
CALL racecallatomic<>(SB)
|
||||
RET
|
||||
|
||||
@@ -341,13 +341,13 @@ TEXT sync∕atomic·OrUintptr(SB), NOSPLIT, $0-24
|
||||
|
||||
TEXT sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT, $0-17
|
||||
GO_ARGS
|
||||
MOV $__tsan_go_atomic32_compare_exchange(SB), X5
|
||||
MOV $__tsan_go_atomic32_compare_exchange(SB), X23
|
||||
CALL racecallatomic<>(SB)
|
||||
RET
|
||||
|
||||
TEXT sync∕atomic·CompareAndSwapInt64(SB), NOSPLIT, $0-25
|
||||
GO_ARGS
|
||||
MOV $__tsan_go_atomic64_compare_exchange(SB), X5
|
||||
MOV $__tsan_go_atomic64_compare_exchange(SB), X23
|
||||
CALL racecallatomic<>(SB)
|
||||
RET
|
||||
|
||||
@@ -364,7 +364,7 @@ TEXT sync∕atomic·CompareAndSwapUintptr(SB), NOSPLIT, $0-25
|
||||
JMP sync∕atomic·CompareAndSwapInt64(SB)
|
||||
|
||||
// Generic atomic operation implementation.
|
||||
// X5 = addr of target function
|
||||
// X23 = addr of target function
|
||||
TEXT racecallatomic<>(SB), NOSPLIT, $0
|
||||
// Set up these registers
|
||||
// X10 = *ThreadState
|
||||
@@ -398,11 +398,11 @@ racecallatomic_ignore:
|
||||
// Call __tsan_go_ignore_sync_begin to ignore synchronization during the atomic op.
|
||||
// An attempt to synchronize on the address would cause crash.
|
||||
MOV X1, X20 // save PC
|
||||
MOV X5, X21 // save target function
|
||||
MOV $__tsan_go_ignore_sync_begin(SB), X5
|
||||
MOV X23, X21 // save target function
|
||||
MOV $__tsan_go_ignore_sync_begin(SB), X23
|
||||
MOV g_racectx(g), X10 // goroutine context
|
||||
CALL racecall<>(SB)
|
||||
MOV X21, X5 // restore the target function
|
||||
MOV X21, X23 // restore the target function
|
||||
// Call the atomic function.
|
||||
MOV g_racectx(g), X10 // goroutine context
|
||||
MOV 8(X2), X11 // caller pc
|
||||
@@ -410,7 +410,7 @@ racecallatomic_ignore:
|
||||
ADD $24, X2, X13 // arguments
|
||||
CALL racecall<>(SB)
|
||||
// Call __tsan_go_ignore_sync_end.
|
||||
MOV $__tsan_go_ignore_sync_end(SB), X5
|
||||
MOV $__tsan_go_ignore_sync_end(SB), X23
|
||||
MOV g_racectx(g), X10 // goroutine context
|
||||
CALL racecall<>(SB)
|
||||
RET
|
||||
@@ -420,14 +420,14 @@ racecallatomic_ignore:
|
||||
// The arguments are never heap-object-preserving pointers, so we pretend there
|
||||
// are no arguments.
|
||||
TEXT runtime·racecall(SB), NOSPLIT, $0-0
|
||||
MOV fn+0(FP), X5
|
||||
MOV fn+0(FP), X23
|
||||
MOV arg0+8(FP), X10
|
||||
MOV arg1+16(FP), X11
|
||||
MOV arg2+24(FP), X12
|
||||
MOV arg3+32(FP), X13
|
||||
JMP racecall<>(SB)
|
||||
|
||||
// Switches SP to g0 stack and calls X5. Arguments are already set.
|
||||
// Switches SP to g0 stack and calls X23. Arguments are already set.
|
||||
TEXT racecall<>(SB), NOSPLIT|NOFRAME, $0-0
|
||||
MOV X1, X18 // Save RA in callee save register
|
||||
MOV X2, X19 // Save SP in callee save register
|
||||
@@ -443,7 +443,7 @@ TEXT racecall<>(SB), NOSPLIT|NOFRAME, $0-0
|
||||
|
||||
MOV (g_sched+gobuf_sp)(X7), X2 // Switch to g0 stack
|
||||
call:
|
||||
JALR RA, (X5) // Call C function
|
||||
JALR RA, (X23) // Call C function
|
||||
MOV X19, X2 // Restore SP
|
||||
JMP (X18) // Return to Go.
|
||||
|
||||
@@ -458,7 +458,7 @@ TEXT runtime·racecallbackthunk(SB), NOSPLIT|NOFRAME, $0
|
||||
// can be executed on g0. Second, it is called frequently, so will
|
||||
// benefit from this fast path.
|
||||
BNEZ X10, rest
|
||||
MOV X1, X5
|
||||
MOV X1, X23
|
||||
MOV g, X6
|
||||
CALL runtime·load_g(SB)
|
||||
MOV g_m(g), X7
|
||||
@@ -466,7 +466,7 @@ TEXT runtime·racecallbackthunk(SB), NOSPLIT|NOFRAME, $0
|
||||
MOV p_raceprocctx(X7), X7
|
||||
MOV X7, (X11)
|
||||
MOV X6, g
|
||||
JMP (X5)
|
||||
JMP (X23)
|
||||
rest:
|
||||
// Save callee-save registers (X8, X9, X18..X27, F8, F9, F18..F27),
|
||||
// since Go code will not respect this.
|
||||
|
||||
@@ -89,8 +89,8 @@ TEXT runtime·sigfwd(SB),NOSPLIT,$0-32
|
||||
MOVW sig+8(FP), X10
|
||||
MOV info+16(FP), X11
|
||||
MOV ctx+24(FP), X12
|
||||
MOV fn+0(FP), X5
|
||||
JALR X1, X5
|
||||
MOV fn+0(FP), X6
|
||||
JALR X1, X6
|
||||
RET
|
||||
|
||||
TEXT runtime·sigtramp(SB),NOSPLIT|TOPFRAME,$224
|
||||
@@ -127,8 +127,8 @@ TEXT runtime·sigtramp(SB),NOSPLIT|TOPFRAME,$224
|
||||
MOVW X10, 8(X2)
|
||||
MOV X11, 16(X2)
|
||||
MOV X12, 24(X2)
|
||||
MOV $runtime·sigtrampgo(SB), X5
|
||||
JALR X1, X5
|
||||
MOV $runtime·sigtrampgo(SB), X6
|
||||
JALR X1, X6
|
||||
|
||||
// Restore callee-save registers.
|
||||
MOV (4*8)(X2), X8
|
||||
@@ -458,13 +458,13 @@ TEXT runtime·issetugid_trampoline(SB),NOSPLIT,$0
|
||||
TEXT runtime·syscall(SB),NOSPLIT,$8
|
||||
MOV X10, X9 // pointer to args
|
||||
|
||||
MOV (0*8)(X9), X5 // fn
|
||||
MOV (0*8)(X9), X6 // fn
|
||||
MOV (1*8)(X9), X10 // a1
|
||||
MOV (2*8)(X9), X11 // a2
|
||||
MOV (3*8)(X9), X12 // a3
|
||||
MOV $0, X13 // vararg
|
||||
|
||||
JALR X1, X5
|
||||
JALR X1, X6
|
||||
|
||||
MOV X10, (4*8)(X9) // r1
|
||||
MOV X11, (5*8)(X9) // r2
|
||||
@@ -502,13 +502,13 @@ ok:
|
||||
TEXT runtime·syscallX(SB),NOSPLIT,$8
|
||||
MOV X10, X9 // pointer to args
|
||||
|
||||
MOV (0*8)(X9), X5 // fn
|
||||
MOV (0*8)(X9), X6 // fn
|
||||
MOV (1*8)(X9), X10 // a1
|
||||
MOV (2*8)(X9), X11 // a2
|
||||
MOV (3*8)(X9), X12 // a3
|
||||
MOV $0, X13 // vararg
|
||||
|
||||
JALR X1, X5
|
||||
JALR X1, X6
|
||||
|
||||
MOV X10, (4*8)(X9) // r1
|
||||
MOV X11, (5*8)(X9) // r2
|
||||
@@ -548,7 +548,7 @@ ok:
|
||||
TEXT runtime·syscall6(SB),NOSPLIT,$8
|
||||
MOV X10, X9 // pointer to args
|
||||
|
||||
MOV (0*8)(X9), X5 // fn
|
||||
MOV (0*8)(X9), X6 // fn
|
||||
MOV (1*8)(X9), X10 // a1
|
||||
MOV (2*8)(X9), X11 // a2
|
||||
MOV (3*8)(X9), X12 // a3
|
||||
@@ -557,7 +557,7 @@ TEXT runtime·syscall6(SB),NOSPLIT,$8
|
||||
MOV (6*8)(X9), X15 // a6
|
||||
MOV $0, X16 // vararg
|
||||
|
||||
JALR X1, X5
|
||||
JALR X1, X6
|
||||
|
||||
MOV X10, (7*8)(X9) // r1
|
||||
MOV X11, (8*8)(X9) // r2
|
||||
@@ -598,7 +598,7 @@ ok:
|
||||
TEXT runtime·syscall6X(SB),NOSPLIT,$8
|
||||
MOV X10, X9 // pointer to args
|
||||
|
||||
MOV (0*8)(X9), X5 // fn
|
||||
MOV (0*8)(X9), X6 // fn
|
||||
MOV (1*8)(X9), X10 // a1
|
||||
MOV (2*8)(X9), X11 // a2
|
||||
MOV (3*8)(X9), X12 // a3
|
||||
@@ -607,7 +607,7 @@ TEXT runtime·syscall6X(SB),NOSPLIT,$8
|
||||
MOV (6*8)(X9), X15 // a6
|
||||
MOV $0, X16 // vararg
|
||||
|
||||
JALR X1, X5
|
||||
JALR X1, X6
|
||||
|
||||
MOV X10, (7*8)(X9) // r1
|
||||
MOV X11, (8*8)(X9) // r2
|
||||
@@ -652,7 +652,7 @@ TEXT runtime·syscall10(SB),NOSPLIT,$0
|
||||
|
||||
ADD $-16, X2
|
||||
|
||||
MOV (0*8)(X9), X5 // fn
|
||||
MOV (0*8)(X9), X6 // fn
|
||||
MOV (1*8)(X9), X10 // a1
|
||||
MOV (2*8)(X9), X11 // a2
|
||||
MOV (3*8)(X9), X12 // a3
|
||||
@@ -662,7 +662,7 @@ TEXT runtime·syscall10(SB),NOSPLIT,$0
|
||||
MOV (7*8)(X9), X16 // a7
|
||||
MOV (8*8)(X9), X17 // a8
|
||||
|
||||
JALR X1, X5
|
||||
JALR X1, X6
|
||||
|
||||
MOV X10, (11*8)(X9) // r1
|
||||
MOV X11, (12*8)(X9) // r2
|
||||
@@ -712,7 +712,7 @@ TEXT runtime·syscall10X(SB),NOSPLIT,$0
|
||||
|
||||
ADD $-16, X2
|
||||
|
||||
MOV (0*8)(X9), X5 // fn
|
||||
MOV (0*8)(X9), X6 // fn
|
||||
MOV (1*8)(X9), X10 // a1
|
||||
MOV (2*8)(X9), X11 // a2
|
||||
MOV (3*8)(X9), X12 // a3
|
||||
@@ -722,7 +722,7 @@ TEXT runtime·syscall10X(SB),NOSPLIT,$0
|
||||
MOV (7*8)(X9), X16 // a7
|
||||
MOV (8*8)(X9), X17 // a8
|
||||
|
||||
JALR X1, X5
|
||||
JALR X1, X6
|
||||
|
||||
MOV X10, (11*8)(X9) // r1
|
||||
MOV X11, (12*8)(X9) // r2
|
||||
|
||||
@@ -95,7 +95,7 @@
|
||||
- go: SHA256Message1
|
||||
commutative: false
|
||||
documentation: !string |-
|
||||
// NAME does the sigma and addtion of 1 in SHA1 algorithm defined in FIPS 180-4.
|
||||
// NAME does the sigma and addition of 1 in SHA1 algorithm defined in FIPS 180-4.
|
||||
// x = {W0, W1, W2, W3}
|
||||
// y = {W4, 0, 0, 0}
|
||||
// result = {W0+σ(W1), W1+σ(W2), W2+σ(W3), W3+σ(W4)}
|
||||
@@ -105,4 +105,4 @@
|
||||
// NAME does the sigma and addition of 3 in SHA1 algorithm defined in FIPS 180-4.
|
||||
// x = result of 2
|
||||
// y = {0, 0, W14, W15}
|
||||
// result = {W16, W17, W18, W19}
|
||||
// result = {W16, W17, W18, W19}
|
||||
|
||||
Reference in New Issue
Block a user