mirror of
https://github.com/golang/go.git
synced 2026-01-29 15:12:08 +03:00
Compare commits
159 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
752b009010 | ||
|
|
a79ea27e36 | ||
|
|
78d89b2b67 | ||
|
|
58e77ad9b9 | ||
|
|
891ac91e5c | ||
|
|
7450117243 | ||
|
|
d8392e6997 | ||
|
|
ae5913347d | ||
|
|
30d8550669 | ||
|
|
efb7cc4275 | ||
|
|
0bd1a2289d | ||
|
|
140b37d659 | ||
|
|
6d229889d8 | ||
|
|
63992defa8 | ||
|
|
bf80213b12 | ||
|
|
20586c0dbe | ||
|
|
be5b52bea6 | ||
|
|
3643147a29 | ||
|
|
3a588774a5 | ||
|
|
263c059b09 | ||
|
|
99e44c71f6 | ||
|
|
6d31b27150 | ||
|
|
f38fca30a7 | ||
|
|
b214108e72 | ||
|
|
f997dfd33a | ||
|
|
f29208030a | ||
|
|
2fdad8af6d | ||
|
|
01c93ad049 | ||
|
|
db74bfba18 | ||
|
|
916e6cddf1 | ||
|
|
6552f3d4ac | ||
|
|
3960318b87 | ||
|
|
00f974eb1f | ||
|
|
2f91c16e68 | ||
|
|
2540b1436f | ||
|
|
7e34c4308f | ||
|
|
491c1e7e95 | ||
|
|
caafb50c0d | ||
|
|
cc85462b3d | ||
|
|
d2cb140194 | ||
|
|
368e2a9461 | ||
|
|
8c6078adfb | ||
|
|
f6b203c828 | ||
|
|
43818206dc | ||
|
|
cf65d74bc5 | ||
|
|
ad1ec60a5b | ||
|
|
bbab863ada | ||
|
|
7dc67e8f29 | ||
|
|
9f8b3ac8c4 | ||
|
|
8c840b10d0 | ||
|
|
6018ad99a4 | ||
|
|
ec8c526e4b | ||
|
|
f7a79cb5fc | ||
|
|
3684d19c20 | ||
|
|
9e43850a32 | ||
|
|
8caf4bb3e7 | ||
|
|
23c943e529 | ||
|
|
4952f41180 | ||
|
|
f26fa05522 | ||
|
|
8ae493b5b8 | ||
|
|
b9f245b8d3 | ||
|
|
caacf3a09a | ||
|
|
1e91861f67 | ||
|
|
ed817f1c40 | ||
|
|
9e933c189c | ||
|
|
434af8537e | ||
|
|
7b04d81cbc | ||
|
|
f9a31cda3c | ||
|
|
64b6c48107 | ||
|
|
ef6993f327 | ||
|
|
bae01521f3 | ||
|
|
236c07c049 | ||
|
|
9465990e0e | ||
|
|
883f062fc0 | ||
|
|
24ae2d9272 | ||
|
|
26b5783b72 | ||
|
|
2ddfc04d12 | ||
|
|
a15ef1bb0f | ||
|
|
41d71a5afa | ||
|
|
0b6b0a275a | ||
|
|
cd671a1180 | ||
|
|
fc57cc31a0 | ||
|
|
9bec49cf52 | ||
|
|
3ef4f939c3 | ||
|
|
556e9c36ba | ||
|
|
b64dc5f499 | ||
|
|
cd66ca0636 | ||
|
|
d7a0626806 | ||
|
|
2c1e5b05fe | ||
|
|
bbd043ff0d | ||
|
|
b0e1d3ea26 | ||
|
|
d25a935574 | ||
|
|
e3ba569c78 | ||
|
|
8dc6ad1c61 | ||
|
|
06df3292a8 | ||
|
|
b120517ffd | ||
|
|
0a9582163c | ||
|
|
91a4e74b98 | ||
|
|
6385a6fb18 | ||
|
|
2d07bb86f0 | ||
|
|
745b81b6e6 | ||
|
|
13339c75b8 | ||
|
|
2977709875 | ||
|
|
2d4746f37b | ||
|
|
2b8026f025 | ||
|
|
7c97cc7d97 | ||
|
|
cb6ea94996 | ||
|
|
45b98bfb79 | ||
|
|
bac083a584 | ||
|
|
70aa116c4a | ||
|
|
31c5a236bc | ||
|
|
25ec110e51 | ||
|
|
6634ce2f41 | ||
|
|
25c6dce188 | ||
|
|
4e34f2e81d | ||
|
|
d91843ff67 | ||
|
|
7437db1085 | ||
|
|
ed527ecfb2 | ||
|
|
b78e8cc145 | ||
|
|
3475e6af4c | ||
|
|
179821c9e1 | ||
|
|
9398951479 | ||
|
|
75d8be5fb4 | ||
|
|
1755d14559 | ||
|
|
c19c4c566c | ||
|
|
e973d24261 | ||
|
|
2e6276df34 | ||
|
|
aeef93cd64 | ||
|
|
35de5f2b0e | ||
|
|
a3b092d65e | ||
|
|
07c72a0915 | ||
|
|
041dd5ce05 | ||
|
|
a51957fb0b | ||
|
|
363f2594aa | ||
|
|
9b53b9b585 | ||
|
|
4a14d9c9af | ||
|
|
9786164333 | ||
|
|
6df6e61cbb | ||
|
|
b25266c58d | ||
|
|
1ea8d38517 | ||
|
|
b2ffc23a82 | ||
|
|
8472fcb62d | ||
|
|
b36e5555dd | ||
|
|
ed977e2f47 | ||
|
|
2fabb143d7 | ||
|
|
c9f01f0ec7 | ||
|
|
252f20b2c1 | ||
|
|
7ee7a21ef2 | ||
|
|
06a9034b60 | ||
|
|
03c7e96be9 | ||
|
|
c2de6836c1 | ||
|
|
4aeac326b5 | ||
|
|
9480b4adf9 | ||
|
|
cc0cb3020d | ||
|
|
d8117459c5 | ||
|
|
ebbff91f59 | ||
|
|
1c1c82432a | ||
|
|
b4a0665266 | ||
|
|
577e7b9bb9 |
@@ -60,7 +60,9 @@ pkg crypto/tls, method (*QUICConn) Close() error #44886
|
||||
pkg crypto/tls, method (*QUICConn) ConnectionState() ConnectionState #44886
|
||||
pkg crypto/tls, method (*QUICConn) HandleData(QUICEncryptionLevel, []uint8) error #44886
|
||||
pkg crypto/tls, method (*QUICConn) NextEvent() QUICEvent #44886
|
||||
pkg crypto/tls, method (*QUICConn) SendSessionTicket(bool) error #60107
|
||||
pkg crypto/tls, method (*QUICConn) SendSessionTicket(QUICSessionTicketOptions) error #60107
|
||||
pkg crypto/tls, type QUICSessionTicketOptions struct #60107
|
||||
pkg crypto/tls, type QUICSessionTicketOptions struct, EarlyData bool #60107
|
||||
pkg crypto/tls, method (*QUICConn) SetTransportParameters([]uint8) #44886
|
||||
pkg crypto/tls, method (*QUICConn) Start(context.Context) error #44886
|
||||
pkg crypto/tls, method (QUICEncryptionLevel) String() string #44886
|
||||
@@ -344,8 +346,6 @@ pkg maps, func Copy[$0 interface{ ~map[$2]$3 }, $1 interface{ ~map[$2]$3 }, $2 c
|
||||
pkg maps, func DeleteFunc[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0, func($1, $2) bool) #57436
|
||||
pkg maps, func Equal[$0 interface{ ~map[$2]$3 }, $1 interface{ ~map[$2]$3 }, $2 comparable, $3 comparable]($0, $1) bool #57436
|
||||
pkg maps, func EqualFunc[$0 interface{ ~map[$2]$3 }, $1 interface{ ~map[$2]$4 }, $2 comparable, $3 interface{}, $4 interface{}]($0, $1, func($3, $4) bool) bool #57436
|
||||
pkg maps, func Keys[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0) []$1 #57436
|
||||
pkg maps, func Values[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0) []$2 #57436
|
||||
pkg math/big, method (*Int) Float64() (float64, Accuracy) #56984
|
||||
pkg net/http, method (*ProtocolError) Is(error) bool #41198
|
||||
pkg net/http, method (*ResponseController) EnableFullDuplex() error #57786
|
||||
|
||||
@@ -1 +1,2 @@
|
||||
branch: master
|
||||
branch: release-branch.go1.21
|
||||
parent-branch: master
|
||||
|
||||
1264
doc/go1.21.html
1264
doc/go1.21.html
File diff suppressed because it is too large
Load Diff
756
doc/go_spec.html
756
doc/go_spec.html
@@ -1,6 +1,6 @@
|
||||
<!--{
|
||||
"Title": "The Go Programming Language Specification",
|
||||
"Subtitle": "Version of June 14, 2023",
|
||||
"Subtitle": "Version of Aug 2, 2023",
|
||||
"Path": "/ref/spec"
|
||||
}-->
|
||||
|
||||
@@ -2511,7 +2511,7 @@ type (
|
||||
|
||||
<p>
|
||||
A type definition creates a new, distinct type with the same
|
||||
<a href="#Types">underlying type</a> and operations as the given type
|
||||
<a href="#Underlying_types">underlying type</a> and operations as the given type
|
||||
and binds an identifier, the <i>type name</i>, to it.
|
||||
</p>
|
||||
|
||||
@@ -4343,7 +4343,7 @@ type parameter list type arguments after substitution
|
||||
When using a generic function, type arguments may be provided explicitly,
|
||||
or they may be partially or completely <a href="#Type_inference">inferred</a>
|
||||
from the context in which the function is used.
|
||||
Provided that they can be inferred, type arguments may be omitted entirely if the function is:
|
||||
Provided that they can be inferred, type argument lists may be omitted entirely if the function is:
|
||||
</p>
|
||||
|
||||
<ul>
|
||||
@@ -4351,7 +4351,7 @@ Provided that they can be inferred, type arguments may be omitted entirely if th
|
||||
<a href="#Calls">called</a> with ordinary arguments,
|
||||
</li>
|
||||
<li>
|
||||
<a href="#Assignment_statements">assigned</a> to a variable with an explicitly declared type,
|
||||
<a href="#Assignment_statements">assigned</a> to a variable with a known type
|
||||
</li>
|
||||
<li>
|
||||
<a href="#Calls">passed as an argument</a> to another function, or
|
||||
@@ -4371,7 +4371,7 @@ must be inferrable from the context in which the function is used.
|
||||
// sum returns the sum (concatenation, for strings) of its arguments.
|
||||
func sum[T ~int | ~float64 | ~string](x... T) T { … }
|
||||
|
||||
x := sum // illegal: sum must have a type argument (x is a variable without a declared type)
|
||||
x := sum // illegal: the type of x is unknown
|
||||
intSum := sum[int] // intSum has type func(x... int) int
|
||||
a := intSum(2, 3) // a has value 5 of type int
|
||||
b := sum[float64](2.0, 3) // b has value 5.0 of type float64
|
||||
@@ -4406,402 +4406,323 @@ For a generic type, all type arguments must always be provided explicitly.
|
||||
<h3 id="Type_inference">Type inference</h3>
|
||||
|
||||
<p>
|
||||
<em>NOTE: This section is not yet up-to-date for Go 1.21.</em>
|
||||
A use of a generic function may omit some or all type arguments if they can be
|
||||
<i>inferred</i> from the context within which the function is used, including
|
||||
the constraints of the function's type parameters.
|
||||
Type inference succeeds if it can infer the missing type arguments
|
||||
and <a href="#Instantiations">instantiation</a> succeeds with the
|
||||
inferred type arguments.
|
||||
Otherwise, type inference fails and the program is invalid.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Missing function type arguments may be <i>inferred</i> by a series of steps, described below.
|
||||
Each step attempts to use known information to infer additional type arguments.
|
||||
Type inference stops as soon as all type arguments are known.
|
||||
After type inference is complete, it is still necessary to substitute all type arguments
|
||||
for type parameters and verify that each type argument
|
||||
<a href="#Implementing_an_interface">implements</a> the relevant constraint;
|
||||
it is possible for an inferred type argument to fail to implement a constraint, in which
|
||||
case instantiation fails.
|
||||
Type inference uses the type relationships between pairs of types for inference:
|
||||
For instance, a function argument must be <a href="#Assignability">assignable</a>
|
||||
to its respective function parameter; this establishes a relationship between the
|
||||
type of the argument and the type of the parameter.
|
||||
If either of these two types contains type parameters, type inference looks for the
|
||||
type arguments to substitute the type parameters with such that the assignability
|
||||
relationship is satisfied.
|
||||
Similarly, type inference uses the fact that a type argument must
|
||||
<a href="#Satisfying_a_type_constraint">satisfy</a> the constraint of its respective
|
||||
type parameter.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Type inference is based on
|
||||
Each such pair of matched types corresponds to a <i>type equation</i> containing
|
||||
one or multiple type parameters, from one or possibly multiple generic functions.
|
||||
Inferring the missing type arguments means solving the resulting set of type
|
||||
equations for the respective type parameters.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
For example, given
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
// dedup returns a copy of the argument slice with any duplicate entries removed.
|
||||
func dedup[S ~[]E, E comparable](S) S { … }
|
||||
|
||||
type Slice []int
|
||||
var s Slice
|
||||
s = dedup(s) // same as s = dedup[Slice, int](s)
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
the variable <code>s</code> of type <code>Slice</code> must be assignable to
|
||||
the function parameter type <code>S</code> for the program to be valid.
|
||||
To reduce complexity, type inference ignores the directionality of assignments,
|
||||
so the type relationship between <code>Slice</code> and <code>S</code> can be
|
||||
expressed via the (symmetric) type equation <code>Slice ≡<sub>A</sub> S</code>
|
||||
(or <code>S ≡<sub>A</sub> Slice</code> for that matter),
|
||||
where the <code><sub>A</sub></code> in <code>≡<sub>A</sub></code>
|
||||
indicates that the LHS and RHS types must match per assignability rules
|
||||
(see the section on <a href="#Type_unification">type unification</a> for
|
||||
details).
|
||||
Similarly, the type parameter <code>S</code> must satisfy its constraint
|
||||
<code>~[]E</code>. This can be expressed as <code>S ≡<sub>C</sub> ~[]E</code>
|
||||
where <code>X ≡<sub>C</sub> Y</code> stands for
|
||||
"<code>X</code> satisfies constraint <code>Y</code>".
|
||||
These observations lead to a set of two equations
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
Slice ≡<sub>A</sub> S (1)
|
||||
S ≡<sub>C</sub> ~[]E (2)
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
which now can be solved for the type parameters <code>S</code> and <code>E</code>.
|
||||
From (1) a compiler can infer that the type argument for <code>S</code> is <code>Slice</code>.
|
||||
Similarly, because the underlying type of <code>Slice</code> is <code>[]int</code>
|
||||
and <code>[]int</code> must match <code>[]E</code> of the constraint,
|
||||
a compiler can infer that <code>E</code> must be <code>int</code>.
|
||||
Thus, for these two equations, type inference infers
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
S ➞ Slice
|
||||
E ➞ int
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
Given a set of type equations, the type parameters to solve for are
|
||||
the type parameters of the functions that need to be instantiated
|
||||
and for which no explicit type arguments is provided.
|
||||
These type parameters are called <i>bound</i> type parameters.
|
||||
For instance, in the <code>dedup</code> example above, the type parameters
|
||||
<code>P</code> and <code>E</code> are bound to <code>dedup</code>.
|
||||
An argument to a generic function call may be a generic function itself.
|
||||
The type parameters of that function are included in the set of bound
|
||||
type parameters.
|
||||
The types of function arguments may contain type parameters from other
|
||||
functions (such as a generic function enclosing a function call).
|
||||
Those type parameters may also appear in type equations but they are
|
||||
not bound in that context.
|
||||
Type equations are always solved for the bound type parameters only.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Type inference supports calls of generic functions and assignments
|
||||
of generic functions to (explicitly function-typed) variables.
|
||||
This includes passing generic functions as arguments to other
|
||||
(possibly also generic) functions, and returning generic functions
|
||||
as results.
|
||||
Type inference operates on a set of equations specific to each of
|
||||
these cases.
|
||||
The equations are as follows (type argument lists are omitted for clarity):
|
||||
</p>
|
||||
|
||||
<ul>
|
||||
<li>
|
||||
a <a href="#Type_parameter_declarations">type parameter list</a>
|
||||
<p>
|
||||
For a function call <code>f(a<sub>0</sub>, a<sub>1</sub>, …)</code> where
|
||||
<code>f</code> or a function argument <code>a<sub>i</sub></code> is
|
||||
a generic function:
|
||||
<br>
|
||||
Each pair <code>(a<sub>i</sub>, p<sub>i</sub>)</code> of corresponding
|
||||
function arguments and parameters where <code>a<sub>i</sub></code> is not an
|
||||
<a href="#Constants">untyped constant</a> yields an equation
|
||||
<code>typeof(p<sub>i</sub>) ≡<sub>A</sub> typeof(a<sub>i</sub>)</code>.
|
||||
<br>
|
||||
If <code>a<sub>i</sub></code> is an untyped constant <code>c<sub>j</sub></code>,
|
||||
and <code>typeof(p<sub>i</sub>)</code> is a bound type parameter <code>P<sub>k</sub></code>,
|
||||
the pair <code>(c<sub>j</sub>, P<sub>k</sub>)</code> is collected separately from
|
||||
the type equations.
|
||||
</p>
|
||||
</li>
|
||||
<li>
|
||||
a substitution map <i>M</i> initialized with the known type arguments, if any
|
||||
<p>
|
||||
For an assignment <code>v = f</code> of a generic function <code>f</code> to a
|
||||
(non-generic) variable <code>v</code> of function type:
|
||||
<br>
|
||||
<code>typeof(v) ≡<sub>A</sub> typeof(f)</code>.
|
||||
</p>
|
||||
</li>
|
||||
<li>
|
||||
a (possibly empty) list of ordinary function arguments (in case of a function call only)
|
||||
<p>
|
||||
For a return statement <code>return …, f, … </code> where <code>f</code> is a
|
||||
generic function returned as a result to a (non-generic) result variable
|
||||
<code>r</code> of function type:
|
||||
<br>
|
||||
<code>typeof(r) ≡<sub>A</sub> typeof(f)</code>.
|
||||
</p>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<p>
|
||||
and then proceeds with the following steps:
|
||||
Additionally, each type parameter <code>P<sub>k</sub></code> and corresponding type constraint
|
||||
<code>C<sub>k</sub></code> yields the type equation
|
||||
<code>P<sub>k</sub> ≡<sub>C</sub> C<sub>k</sub></code>.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Type inference gives precedence to type information obtained from typed operands
|
||||
before considering untyped constants.
|
||||
Therefore, inference proceeds in two phases:
|
||||
</p>
|
||||
|
||||
<ol>
|
||||
<li>
|
||||
apply <a href="#Function_argument_type_inference"><i>function argument type inference</i></a>
|
||||
to all <i>typed</i> ordinary function arguments
|
||||
<p>
|
||||
The type equations are solved for the bound
|
||||
type parameters using <a href="#Type_unification">type unification</a>.
|
||||
If unification fails, type inference fails.
|
||||
</p>
|
||||
</li>
|
||||
<li>
|
||||
apply <a href="#Constraint_type_inference"><i>constraint type inference</i></a>
|
||||
</li>
|
||||
<li>
|
||||
apply function argument type inference to all <i>untyped</i> ordinary function arguments
|
||||
using the default type for each of the untyped function arguments
|
||||
</li>
|
||||
<li>
|
||||
apply constraint type inference
|
||||
<p>
|
||||
For each bound type parameter <code>P<sub>k</sub></code> for which no type argument
|
||||
has been inferred yet and for which one or more pairs
|
||||
<code>(c<sub>j</sub>, P<sub>k</sub>)</code> with that same type parameter
|
||||
were collected, determine the <a href="#Constant_expressions">constant kind</a>
|
||||
of the constants <code>c<sub>j</sub></code> in all those pairs the same way as for
|
||||
<a href="#Constant_expressions">constant expressions</a>.
|
||||
The type argument for <code>P<sub>k</sub></code> is the
|
||||
<a href="#Constants">default type</a> for the determined constant kind.
|
||||
If a constant kind cannot be determined due to conflicting constant kinds,
|
||||
type inference fails.
|
||||
</p>
|
||||
</li>
|
||||
</ol>
|
||||
|
||||
<p>
|
||||
If there are no ordinary or untyped function arguments, the respective steps are skipped.
|
||||
Constraint type inference is skipped if the previous step didn't infer any new type arguments,
|
||||
but it is run at least once if there are missing type arguments.
|
||||
If not all type arguments have been found after these two phases, type inference fails.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
The substitution map <i>M</i> is carried through all steps, and each step may add entries to <i>M</i>.
|
||||
The process stops as soon as <i>M</i> has a type argument for each type parameter or if an inference step fails.
|
||||
If an inference step fails, or if <i>M</i> is still missing type arguments after the last step, type inference fails.
|
||||
If the two phases are successful, type inference determined a type argument for each
|
||||
bound type parameter:
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
P<sub>k</sub> ➞ A<sub>k</sub>
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
A type argument <code>A<sub>k</sub></code> may be a composite type,
|
||||
containing other bound type parameters <code>P<sub>k</sub></code> as element types
|
||||
(or even be just another bound type parameter).
|
||||
In a process of repeated simplification, the bound type parameters in each type
|
||||
argument are substituted with the respective type arguments for those type
|
||||
parameters until each type argument is free of bound type parameters.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
If type arguments contain cyclic references to themselves
|
||||
through bound type parameters, simplification and thus type
|
||||
inference fails.
|
||||
Otherwise, type inference succeeds.
|
||||
</p>
|
||||
|
||||
<h4 id="Type_unification">Type unification</h4>
|
||||
|
||||
<p>
|
||||
Type inference is based on <i>type unification</i>. A single unification step
|
||||
applies to a <a href="#Type_inference">substitution map</a> and two types, either
|
||||
or both of which may be or contain type parameters. The substitution map tracks
|
||||
the known (explicitly provided or already inferred) type arguments: the map
|
||||
contains an entry <code>P</code> → <code>A</code> for each type
|
||||
parameter <code>P</code> and corresponding known type argument <code>A</code>.
|
||||
During unification, known type arguments take the place of their corresponding type
|
||||
parameters when comparing types. Unification is the process of finding substitution
|
||||
map entries that make the two types equivalent.
|
||||
Type inference solves type equations through <i>type unification</i>.
|
||||
Type unification recursively compares the LHS and RHS types of an
|
||||
equation, where either or both types may be or contain bound type parameters,
|
||||
and looks for type arguments for those type parameters such that the LHS
|
||||
and RHS match (become identical or assignment-compatible, depending on
|
||||
context).
|
||||
To that effect, type inference maintains a map of bound type parameters
|
||||
to inferred type arguments; this map is consulted and updated during type unification.
|
||||
Initially, the bound type parameters are known but the map is empty.
|
||||
During type unification, if a new type argument <code>A</code> is inferred,
|
||||
the respective mapping <code>P ➞ A</code> from type parameter to argument
|
||||
is added to the map.
|
||||
Conversely, when comparing types, a known type argument
|
||||
(a type argument for which a map entry already exists)
|
||||
takes the place of its corresponding type parameter.
|
||||
As type inference progresses, the map is populated more and more
|
||||
until all equations have been considered, or until unification fails.
|
||||
Type inference succeeds if no unification step fails and the map has
|
||||
an entry for each type parameter.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
For unification, two types that don't contain any type parameters from the current type
|
||||
parameter list are <i>equivalent</i>
|
||||
if they are identical, or if they are channel types that are identical ignoring channel
|
||||
direction, or if their underlying types are equivalent.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Unification works by comparing the structure of pairs of types: their structure
|
||||
disregarding type parameters must be identical, and types other than type parameters
|
||||
must be equivalent.
|
||||
A type parameter in one type may match any complete subtype in the other type;
|
||||
each successful match causes an entry to be added to the substitution map.
|
||||
If the structure differs, or types other than type parameters are not equivalent,
|
||||
unification fails.
|
||||
</p>
|
||||
|
||||
<!--
|
||||
TODO(gri) Somewhere we need to describe the process of adding an entry to the
|
||||
substitution map: if the entry is already present, the type argument
|
||||
values are themselves unified.
|
||||
-->
|
||||
|
||||
<p>
|
||||
For example, if <code>T1</code> and <code>T2</code> are type parameters,
|
||||
<code>[]map[int]bool</code> can be unified with any of the following:
|
||||
</pre>
|
||||
For example, given the type equation with the bound type parameter
|
||||
<code>P</code>
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
[]map[int]bool // types are identical
|
||||
T1 // adds T1 → []map[int]bool to substitution map
|
||||
[]T1 // adds T1 → map[int]bool to substitution map
|
||||
[]map[T1]T2 // adds T1 → int and T2 → bool to substitution map
|
||||
[10]struct{ elem P, list []P } ≡<sub>A</sub> [10]struct{ elem string; list []string }
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
On the other hand, <code>[]map[int]bool</code> cannot be unified with any of
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
int // int is not a slice
|
||||
struct{} // a struct is not a slice
|
||||
[]struct{} // a struct is not a map
|
||||
[]map[T1]string // map element types don't match
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
As an exception to this general rule, because a <a href="#Type_definitions">defined type</a>
|
||||
<code>D</code> and a type literal <code>L</code> are never equivalent,
|
||||
unification compares the underlying type of <code>D</code> with <code>L</code> instead.
|
||||
For example, given the defined type
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
type Vector []float64
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
and the type literal <code>[]E</code>, unification compares <code>[]float64</code> with
|
||||
<code>[]E</code> and adds an entry <code>E</code> → <code>float64</code> to
|
||||
the substitution map.
|
||||
</p>
|
||||
|
||||
<h4 id="Function_argument_type_inference">Function argument type inference</h4>
|
||||
|
||||
<!-- In this section and the section on constraint type inference we start with examples
|
||||
rather than have the examples follow the rules as is customary elsewhere in spec.
|
||||
Hopefully this helps building an intuition and makes the rules easier to follow. -->
|
||||
|
||||
<p>
|
||||
Function argument type inference infers type arguments from function arguments:
|
||||
if a function parameter is declared with a type <code>T</code> that uses
|
||||
type parameters,
|
||||
<a href="#Type_unification">unifying</a> the type of the corresponding
|
||||
function argument with <code>T</code> may infer type arguments for the type
|
||||
parameters used by <code>T</code>.
|
||||
type inference starts with an empty map.
|
||||
Unification first compares the top-level structure of the LHS and RHS
|
||||
types.
|
||||
Both are arrays of the same length; they unify if the element types unify.
|
||||
Both element types are structs; they unify if they have
|
||||
the same number of fields with the same names and if the
|
||||
field types unify.
|
||||
The type argument for <code>P</code> is not known yet (there is no map entry),
|
||||
so unifying <code>P</code> with <code>string</code> adds
|
||||
the mapping <code>P ➞ string</code> to the map.
|
||||
Unifying the types of the <code>list</code> field requires
|
||||
unifying <code>[]P</code> and <code>[]string</code> and
|
||||
thus <code>P</code> and <code>string</code>.
|
||||
Since the type argument for <code>P</code> is known at this point
|
||||
(there is a map entry for <code>P</code>), its type argument
|
||||
<code>string</code> takes the place of <code>P</code>.
|
||||
And since <code>string</code> is identical to <code>string</code>,
|
||||
this unification step succeeds as well.
|
||||
Unification of the LHS and RHS of the equation is now finished.
|
||||
Type inference succeeds because there is only one type equation,
|
||||
no unification step failed, and the map is fully populated.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
For instance, given the generic function
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
func scale[Number ~int64|~float64|~complex128](v []Number, s Number) []Number
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
and the call
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
var vector []float64
|
||||
scaledVector := scale(vector, 42)
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
the type argument for <code>Number</code> can be inferred from the function argument
|
||||
<code>vector</code> by unifying the type of <code>vector</code> with the corresponding
|
||||
parameter type: <code>[]float64</code> and <code>[]Number</code>
|
||||
match in structure and <code>float64</code> matches with <code>Number</code>.
|
||||
This adds the entry <code>Number</code> → <code>float64</code> to the
|
||||
<a href="#Type_unification">substitution map</a>.
|
||||
Untyped arguments, such as the second function argument <code>42</code> here, are ignored
|
||||
in the first round of function argument type inference and only considered if there are
|
||||
unresolved type parameters left.
|
||||
Unification uses a combination of <i>exact</i> and <i>loose</i>
|
||||
unification depending on whether two types have to be
|
||||
<a href="#Type_identity">identical</a>,
|
||||
<a href="#Assignability">assignment-compatible</a>, or
|
||||
only structurally equal.
|
||||
The respective <a href="#Type_unification_rules">type unification rules</a>
|
||||
are spelled out in detail in the <a href="#Appendix">Appendix</a>.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Inference happens in two separate phases; each phase operates on a specific list of
|
||||
(parameter, argument) pairs:
|
||||
For an equation of the form <code>X ≡<sub>A</sub> Y</code>,
|
||||
where <code>X</code> and <code>Y</code> are types involved
|
||||
in an assignment (including parameter passing and return statements),
|
||||
the top-level type structures may unify loosely but element types
|
||||
must unify exactly, matching the rules for assignments.
|
||||
</p>
|
||||
|
||||
<ol>
|
||||
<p>
|
||||
For an equation of the form <code>P ≡<sub>C</sub> C</code>,
|
||||
where <code>P</code> is a type parameter and <code>C</code>
|
||||
its corresponding constraint, the unification rules are bit
|
||||
more complicated:
|
||||
</p>
|
||||
|
||||
<ul>
|
||||
<li>
|
||||
The list <i>Lt</i> contains all (parameter, argument) pairs where the parameter
|
||||
type uses type parameters and where the function argument is <i>typed</i>.
|
||||
If <code>C</code> has a <a href="#Core_types">core type</a>
|
||||
<code>core(C)</code>
|
||||
and <code>P</code> has a known type argument <code>A</code>,
|
||||
<code>core(C)</code> and <code>A</code> must unify loosely.
|
||||
If <code>P</code> does not have a known type argument
|
||||
and <code>C</code> contains exactly one type term <code>T</code>
|
||||
that is not an underlying (tilde) type, unification adds the
|
||||
mapping <code>P ➞ T</code> to the map.
|
||||
</li>
|
||||
<li>
|
||||
The list <i>Lu</i> contains all remaining pairs where the parameter type is a single
|
||||
type parameter. In this list, the respective function arguments are untyped.
|
||||
If <code>C</code> does not have a core type
|
||||
and <code>P</code> has a known type argument <code>A</code>,
|
||||
<code>A</code> must have all methods of <code>C</code>, if any,
|
||||
and corresponding method types must unify exactly.
|
||||
</li>
|
||||
</ol>
|
||||
</ul>
|
||||
|
||||
<p>
|
||||
Any other (parameter, argument) pair is ignored.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
By construction, the arguments of the pairs in <i>Lu</i> are <i>untyped</i> constants
|
||||
(or the untyped boolean result of a comparison). And because <a href="#Constants">default types</a>
|
||||
of untyped values are always predeclared non-composite types, they can never match against
|
||||
a composite type, so it is sufficient to only consider parameter types that are single type
|
||||
parameters.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Each list is processed in a separate phase:
|
||||
</p>
|
||||
|
||||
<ol>
|
||||
<li>
|
||||
In the first phase, the parameter and argument types of each pair in <i>Lt</i>
|
||||
are unified. If unification succeeds for a pair, it may yield new entries that
|
||||
are added to the substitution map <i>M</i>. If unification fails, type inference
|
||||
fails.
|
||||
</li>
|
||||
<li>
|
||||
The second phase considers the entries of list <i>Lu</i>. Type parameters for
|
||||
which the type argument has already been determined are ignored in this phase.
|
||||
For each remaining pair, the parameter type (which is a single type parameter) and
|
||||
the <a href="#Constants">default type</a> of the corresponding untyped argument is
|
||||
unified. If unification fails, type inference fails.
|
||||
</li>
|
||||
</ol>
|
||||
|
||||
<p>
|
||||
While unification is successful, processing of each list continues until all list elements
|
||||
are considered, even if all type arguments are inferred before the last list element has
|
||||
been processed.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Example:
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
func min[T ~int|~float64](x, y T) T
|
||||
|
||||
var x int
|
||||
min(x, 2.0) // T is int, inferred from typed argument x; 2.0 is assignable to int
|
||||
min(1.0, 2.0) // T is float64, inferred from default type for 1.0 and matches default type for 2.0
|
||||
min(1.0, 2) // illegal: default type float64 (for 1.0) doesn't match default type int (for 2)
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
In the example <code>min(1.0, 2)</code>, processing the function argument <code>1.0</code>
|
||||
yields the substitution map entry <code>T</code> → <code>float64</code>. Because
|
||||
processing continues until all untyped arguments are considered, an error is reported. This
|
||||
ensures that type inference does not depend on the order of the untyped arguments.
|
||||
</p>
|
||||
|
||||
<h4 id="Constraint_type_inference">Constraint type inference</h4>
|
||||
|
||||
<p>
|
||||
Constraint type inference infers type arguments by considering type constraints.
|
||||
If a type parameter <code>P</code> has a constraint with a
|
||||
<a href="#Core_types">core type</a> <code>C</code>,
|
||||
<a href="#Type_unification">unifying</a> <code>P</code> with <code>C</code>
|
||||
may infer additional type arguments, either the type argument for <code>P</code>,
|
||||
or if that is already known, possibly the type arguments for type parameters
|
||||
used in <code>C</code>.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
For instance, consider the type parameter list with type parameters <code>List</code> and
|
||||
<code>Elem</code>:
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
[List ~[]Elem, Elem any]
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
Constraint type inference can deduce the type of <code>Elem</code> from the type argument
|
||||
for <code>List</code> because <code>Elem</code> is a type parameter in the core type
|
||||
<code>[]Elem</code> of <code>List</code>.
|
||||
If the type argument is <code>Bytes</code>:
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
type Bytes []byte
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
unifying the underlying type of <code>Bytes</code> with the core type means
|
||||
unifying <code>[]byte</code> with <code>[]Elem</code>. That unification succeeds and yields
|
||||
the <a href="#Type_unification">substitution map</a> entry
|
||||
<code>Elem</code> → <code>byte</code>.
|
||||
Thus, in this example, constraint type inference can infer the second type argument from the
|
||||
first one.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Using the core type of a constraint may lose some information: In the (unlikely) case that
|
||||
the constraint's type set contains a single <a href="#Type_definitions">defined type</a>
|
||||
<code>N</code>, the corresponding core type is <code>N</code>'s underlying type rather than
|
||||
<code>N</code> itself. In this case, constraint type inference may succeed but instantiation
|
||||
will fail because the inferred type is not in the type set of the constraint.
|
||||
Thus, constraint type inference uses the <i>adjusted core type</i> of
|
||||
a constraint: if the type set contains a single type, use that type; otherwise use the
|
||||
constraint's core type.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Generally, constraint type inference proceeds in two phases: Starting with a given
|
||||
substitution map <i>M</i>
|
||||
</p>
|
||||
|
||||
<ol>
|
||||
<li>
|
||||
For all type parameters with an adjusted core type, unify the type parameter with that
|
||||
type. If any unification fails, constraint type inference fails.
|
||||
</li>
|
||||
|
||||
<li>
|
||||
At this point, some entries in <i>M</i> may map type parameters to other
|
||||
type parameters or to types containing type parameters. For each entry
|
||||
<code>P</code> → <code>A</code> in <i>M</i> where <code>A</code> is or
|
||||
contains type parameters <code>Q</code> for which there exist entries
|
||||
<code>Q</code> → <code>B</code> in <i>M</i>, substitute those
|
||||
<code>Q</code> with the respective <code>B</code> in <code>A</code>.
|
||||
Stop when no further substitution is possible.
|
||||
</li>
|
||||
</ol>
|
||||
|
||||
<p>
|
||||
The result of constraint type inference is the final substitution map <i>M</i> from type
|
||||
parameters <code>P</code> to type arguments <code>A</code> where no type parameter <code>P</code>
|
||||
appears in any of the <code>A</code>.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
For instance, given the type parameter list
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
[A any, B []C, C *A]
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
and the single provided type argument <code>int</code> for type parameter <code>A</code>,
|
||||
the initial substitution map <i>M</i> contains the entry <code>A</code> → <code>int</code>.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
In the first phase, the type parameters <code>B</code> and <code>C</code> are unified
|
||||
with the core type of their respective constraints. This adds the entries
|
||||
<code>B</code> → <code>[]C</code> and <code>C</code> → <code>*A</code>
|
||||
to <i>M</i>.
|
||||
|
||||
<p>
|
||||
At this point there are two entries in <i>M</i> where the right-hand side
|
||||
is or contains type parameters for which there exists other entries in <i>M</i>:
|
||||
<code>[]C</code> and <code>*A</code>.
|
||||
In the second phase, these type parameters are replaced with their respective
|
||||
types. It doesn't matter in which order this happens. Starting with the state
|
||||
of <i>M</i> after the first phase:
|
||||
</p>
|
||||
|
||||
<p>
|
||||
<code>A</code> → <code>int</code>,
|
||||
<code>B</code> → <code>[]C</code>,
|
||||
<code>C</code> → <code>*A</code>
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Replace <code>A</code> on the right-hand side of → with <code>int</code>:
|
||||
</p>
|
||||
|
||||
<p>
|
||||
<code>A</code> → <code>int</code>,
|
||||
<code>B</code> → <code>[]C</code>,
|
||||
<code>C</code> → <code>*int</code>
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Replace <code>C</code> on the right-hand side of → with <code>*int</code>:
|
||||
</p>
|
||||
|
||||
<p>
|
||||
<code>A</code> → <code>int</code>,
|
||||
<code>B</code> → <code>[]*int</code>,
|
||||
<code>C</code> → <code>*int</code>
|
||||
</p>
|
||||
|
||||
<p>
|
||||
At this point no further substitution is possible and the map is full.
|
||||
Therefore, <code>M</code> represents the final map of type parameters
|
||||
to type arguments for the given type parameter list.
|
||||
When solving type equations from type constraints,
|
||||
solving one equation may infer additional type arguments,
|
||||
which in turn may enable solving other equations that depend
|
||||
on those type arguments.
|
||||
Type inference repeats type unification as long as new type
|
||||
arguments are inferred.
|
||||
</p>
|
||||
|
||||
<h3 id="Operators">Operators</h3>
|
||||
@@ -5479,7 +5400,7 @@ in any of these cases:
|
||||
ignoring struct tags (see below),
|
||||
<code>x</code>'s type and <code>T</code> are not
|
||||
<a href="#Type_parameter_declarations">type parameters</a> but have
|
||||
<a href="#Type_identity">identical</a> <a href="#Types">underlying types</a>.
|
||||
<a href="#Type_identity">identical</a> <a href="#Underlying_types">underlying types</a>.
|
||||
</li>
|
||||
<li>
|
||||
ignoring struct tags (see below),
|
||||
@@ -7324,7 +7245,8 @@ clear(t) type parameter see below
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
If the argument type is a <a href="#Type_parameter_declarations">type parameter</a>,
|
||||
If the type of the argument to <code>clear</code> is a
|
||||
<a href="#Type_parameter_declarations">type parameter</a>,
|
||||
all types in its type set must be maps or slices, and <code>clear</code>
|
||||
performs the operation corresponding to the actual type argument.
|
||||
</p>
|
||||
@@ -8290,7 +8212,7 @@ of if the general conversion rules take care of this.
|
||||
<p>
|
||||
A <code>Pointer</code> is a <a href="#Pointer_types">pointer type</a> but a <code>Pointer</code>
|
||||
value may not be <a href="#Address_operators">dereferenced</a>.
|
||||
Any pointer or value of <a href="#Types">underlying type</a> <code>uintptr</code> can be
|
||||
Any pointer or value of <a href="#Underlying_types">underlying type</a> <code>uintptr</code> can be
|
||||
<a href="#Conversions">converted</a> to a type of underlying type <code>Pointer</code> and vice versa.
|
||||
The effect of converting between <code>Pointer</code> and <code>uintptr</code> is implementation-defined.
|
||||
</p>
|
||||
@@ -8438,3 +8360,145 @@ The following minimal alignment properties are guaranteed:
|
||||
<p>
|
||||
A struct or array type has size zero if it contains no fields (or elements, respectively) that have a size greater than zero. Two distinct zero-size variables may have the same address in memory.
|
||||
</p>
|
||||
|
||||
<h2 id="Appendix">Appendix</h2>
|
||||
|
||||
<h3 id="Type_unification_rules">Type unification rules</h3>
|
||||
|
||||
<p>
|
||||
The type unification rules describe if and how two types unify.
|
||||
The precise details are relevant for Go implementations,
|
||||
affect the specifics of error messages (such as whether
|
||||
a compiler reports a type inference or other error),
|
||||
and may explain why type inference fails in unusual code situations.
|
||||
But by and large these rules can be ignored when writing Go code:
|
||||
type inference is designed to mostly "work as expected",
|
||||
and the unification rules are fine-tuned accordingly.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Type unification is controlled by a <i>matching mode</i>, which may
|
||||
be <i>exact</i> or <i>loose</i>.
|
||||
As unification recursively descends a composite type structure,
|
||||
the matching mode used for elements of the type, the <i>element matching mode</i>,
|
||||
remains the same as the matching mode except when two types are unified for
|
||||
<a href="#Assignability">assignability</a> (<code>≡<sub>A</sub></code>):
|
||||
in this case, the matching mode is <i>loose</i> at the top level but
|
||||
then changes to <i>exact</i> for element types, reflecting the fact
|
||||
that types don't have to be identical to be assignable.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Two types that are not bound type parameters unify exactly if any of
|
||||
following conditions is true:
|
||||
</p>
|
||||
|
||||
<ul>
|
||||
<li>
|
||||
Both types are <a href="#Type_identity">identical</a>.
|
||||
</li>
|
||||
<li>
|
||||
Both types have identical structure and their element types
|
||||
unify exactly.
|
||||
</li>
|
||||
<li>
|
||||
Exactly one type is an <a href="#Type_inference">unbound</a>
|
||||
type parameter with a <a href="#Core_types">core type</a>,
|
||||
and that core type unifies with the other type per the
|
||||
unification rules for <code>≡<sub>A</sub></code>
|
||||
(loose unification at the top level and exact unification
|
||||
for element types).
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<p>
|
||||
If both types are bound type parameters, they unify per the given
|
||||
matching modes if:
|
||||
</p>
|
||||
|
||||
<ul>
|
||||
<li>
|
||||
Both type parameters are identical.
|
||||
</li>
|
||||
<li>
|
||||
At most one of the type parameters has a known type argument.
|
||||
In this case, the type parameters are <i>joined</i>:
|
||||
they both stand for the same type argument.
|
||||
If neither type parameter has a known type argument yet,
|
||||
a future type argument inferred for one the type parameters
|
||||
is simultaneously inferred for both of them.
|
||||
</li>
|
||||
<li>
|
||||
Both type parameters have a known type argument
|
||||
and the type arguments unify per the given matching modes.
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<p>
|
||||
A single bound type parameter <code>P</code> and another type <code>T</code> unify
|
||||
per the given matching modes if:
|
||||
</p>
|
||||
|
||||
<ul>
|
||||
<li>
|
||||
<code>P</code> doesn't have a known type argument.
|
||||
In this case, <code>T</code> is inferred as the type argument for <code>P</code>.
|
||||
</li>
|
||||
<li>
|
||||
<code>P</code> does have a known type argument <code>A</code>,
|
||||
<code>A</code> and <code>T</code> unify per the given matching modes,
|
||||
and one of the following conditions is true:
|
||||
<ul>
|
||||
<li>
|
||||
Both <code>A</code> and <code>T</code> are interface types:
|
||||
In this case, if both <code>A</code> and <code>T</code> are
|
||||
also <a href="#Type_definitions">defined</a> types,
|
||||
they must be <a href="#Type_identity">identical</a>.
|
||||
Otherwise, if neither of them is a defined type, they must
|
||||
have the same number of methods
|
||||
(unification of <code>A</code> and <code>T</code> already
|
||||
established that the methods match).
|
||||
</li>
|
||||
<li>
|
||||
Neither <code>A</code> nor <code>T</code> are interface types:
|
||||
In this case, if <code>T</code> is a defined type, <code>T</code>
|
||||
replaces <code>A</code> as the inferred type argument for <code>P</code>.
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<p>
|
||||
Finally, two types that are not bound type parameters unify loosely
|
||||
(and per the element matching mode) if:
|
||||
</p>
|
||||
|
||||
<ul>
|
||||
<li>
|
||||
Both types unify exactly.
|
||||
</li>
|
||||
<li>
|
||||
One type is a <a href="#Type_definitions">defined type</a>,
|
||||
the other type is a type literal, but not an interface,
|
||||
and their underlying types unify per the element matching mode.
|
||||
</li>
|
||||
<li>
|
||||
Both types are interfaces (but not type parameters) with
|
||||
identical <a href="#Interface_types">type terms</a>,
|
||||
both or neither embed the predeclared type
|
||||
<a href="#Predeclared_identifiers">comparable</a>,
|
||||
corresponding method types unify per the element matching mode,
|
||||
and the method set of one of the interfaces is a subset of
|
||||
the method set of the other interface.
|
||||
</li>
|
||||
<li>
|
||||
Only one type is an interface (but not a type parameter),
|
||||
corresponding methods of the two types unify per the element matching mode,
|
||||
and the method set of the interface is a subset of
|
||||
the method set of the other type.
|
||||
</li>
|
||||
<li>
|
||||
Both types have the same structure and their element types
|
||||
unify per the element matching mode.
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -126,6 +126,27 @@ for example,
|
||||
see the [runtime documentation](/pkg/runtime#hdr-Environment_Variables)
|
||||
and the [go command documentation](/cmd/go#hdr-Build_and_test_caching).
|
||||
|
||||
### Go 1.22
|
||||
|
||||
Go 1.22 adds a configurable limit to control the maximum acceptable RSA key size
|
||||
that can be used in TLS handshakes, controlled by the [`tlsmaxrsasize`setting](/pkg/crypto/tls#Conn.Handshake).
|
||||
The default is tlsmaxrsasize=8192, limiting RSA to 8192-bit keys. To avoid
|
||||
denial of service attacks, this setting and default was backported to Go
|
||||
1.19.13, Go 1.20.8, and Go 1.21.1.
|
||||
|
||||
Go 1.22 changed how the runtime interacts with transparent huge pages on Linux.
|
||||
In particular, a common default Linux kernel configuration can result in
|
||||
significant memory overheads, and Go 1.22 no longer works around this default.
|
||||
To work around this issue without adjusting kernel settings, transparent huge
|
||||
pages can be disabled for Go memory with the
|
||||
[`disablethp` setting](/pkg/runtime#hdr-Environment_Variable).
|
||||
This behavior was backported to Go 1.21.1, but the setting is only available
|
||||
starting with Go 1.21.6.
|
||||
This setting may be removed in a future release, and users impacted by this issue
|
||||
should adjust their Linux configuration according to the recommendations in the
|
||||
[GC guide](/doc/gc-guide#Linux_transparent_huge_pages), or switch to a Linux
|
||||
distribution that disables transparent huge pages altogether.
|
||||
|
||||
### Go 1.21
|
||||
|
||||
Go 1.21 made it a run-time error to call `panic` with a nil interface value,
|
||||
|
||||
@@ -10,12 +10,12 @@ case "$GOWASIRUNTIME" in
|
||||
"wasmer")
|
||||
exec wasmer run --dir=/ --env PWD="$PWD" --env PATH="$PATH" ${GOWASIRUNTIMEARGS:-} "$1" -- "${@:2}"
|
||||
;;
|
||||
"wasmtime")
|
||||
exec wasmtime run --dir=/ --env PWD="$PWD" --env PATH="$PATH" --max-wasm-stack 1048576 ${GOWASIRUNTIMEARGS:-} "$1" -- "${@:2}"
|
||||
;;
|
||||
"wazero" | "")
|
||||
"wazero")
|
||||
exec wazero run -mount /:/ -env-inherit -cachedir "${TMPDIR:-/tmp}"/wazero ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}"
|
||||
;;
|
||||
"wasmtime" | "")
|
||||
exec wasmtime run --dir=/ --env PWD="$PWD" --env PATH="$PATH" --max-wasm-stack 1048576 ${GOWASIRUNTIMEARGS:-} "$1" -- "${@:2}"
|
||||
;;
|
||||
*)
|
||||
echo "Unknown Go WASI runtime specified: $GOWASIRUNTIME"
|
||||
exit 1
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package api
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
//go:build boringcrypto
|
||||
|
||||
package api
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
@@ -2,9 +2,10 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package api computes the exported API of a set of Go packages.
|
||||
// This package computes the exported API of a set of Go packages.
|
||||
// It is only a test, not a command, nor a usefully importable package.
|
||||
package api
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
40
src/cmd/asm/internal/asm/testdata/riscv64.s
vendored
40
src/cmd/asm/internal/asm/testdata/riscv64.s
vendored
@@ -183,28 +183,28 @@ start:
|
||||
// 8.2: Load-Reserved/Store-Conditional
|
||||
LRW (X5), X6 // 2fa30214
|
||||
LRD (X5), X6 // 2fb30214
|
||||
SCW X5, (X6), X7 // af23531c
|
||||
SCD X5, (X6), X7 // af33531c
|
||||
SCW X5, (X6), X7 // af23531a
|
||||
SCD X5, (X6), X7 // af33531a
|
||||
|
||||
// 8.3: Atomic Memory Operations
|
||||
AMOSWAPW X5, (X6), X7 // af23530c
|
||||
AMOSWAPD X5, (X6), X7 // af33530c
|
||||
AMOADDW X5, (X6), X7 // af235304
|
||||
AMOADDD X5, (X6), X7 // af335304
|
||||
AMOANDW X5, (X6), X7 // af235364
|
||||
AMOANDD X5, (X6), X7 // af335364
|
||||
AMOORW X5, (X6), X7 // af235344
|
||||
AMOORD X5, (X6), X7 // af335344
|
||||
AMOXORW X5, (X6), X7 // af235324
|
||||
AMOXORD X5, (X6), X7 // af335324
|
||||
AMOMAXW X5, (X6), X7 // af2353a4
|
||||
AMOMAXD X5, (X6), X7 // af3353a4
|
||||
AMOMAXUW X5, (X6), X7 // af2353e4
|
||||
AMOMAXUD X5, (X6), X7 // af3353e4
|
||||
AMOMINW X5, (X6), X7 // af235384
|
||||
AMOMIND X5, (X6), X7 // af335384
|
||||
AMOMINUW X5, (X6), X7 // af2353c4
|
||||
AMOMINUD X5, (X6), X7 // af3353c4
|
||||
AMOSWAPW X5, (X6), X7 // af23530e
|
||||
AMOSWAPD X5, (X6), X7 // af33530e
|
||||
AMOADDW X5, (X6), X7 // af235306
|
||||
AMOADDD X5, (X6), X7 // af335306
|
||||
AMOANDW X5, (X6), X7 // af235366
|
||||
AMOANDD X5, (X6), X7 // af335366
|
||||
AMOORW X5, (X6), X7 // af235346
|
||||
AMOORD X5, (X6), X7 // af335346
|
||||
AMOXORW X5, (X6), X7 // af235326
|
||||
AMOXORD X5, (X6), X7 // af335326
|
||||
AMOMAXW X5, (X6), X7 // af2353a6
|
||||
AMOMAXD X5, (X6), X7 // af3353a6
|
||||
AMOMAXUW X5, (X6), X7 // af2353e6
|
||||
AMOMAXUD X5, (X6), X7 // af3353e6
|
||||
AMOMINW X5, (X6), X7 // af235386
|
||||
AMOMIND X5, (X6), X7 // af335386
|
||||
AMOMINUW X5, (X6), X7 // af2353c6
|
||||
AMOMINUD X5, (X6), X7 // af3353c6
|
||||
|
||||
// 10.1: Base Counters and Timers
|
||||
RDCYCLE X5 // f32200c0
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package cgotest
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lm
|
||||
#cgo !darwin LDFLAGS: -lm
|
||||
#include <math.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package issue8756
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lm
|
||||
#cgo !darwin LDFLAGS: -lm
|
||||
#include <math.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
@@ -23,7 +23,7 @@ package cgotest
|
||||
#include <unistd.h>
|
||||
#include <sys/stat.h>
|
||||
#include <errno.h>
|
||||
#cgo LDFLAGS: -lm
|
||||
#cgo !darwin LDFLAGS: -lm
|
||||
|
||||
#ifndef WIN32
|
||||
#include <pthread.h>
|
||||
|
||||
@@ -1365,3 +1365,35 @@ func TestDeepStack(t *testing.T) {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSharedObject(t *testing.T) {
|
||||
// Test that we can put a Go c-archive into a C shared object.
|
||||
globalSkip(t)
|
||||
testenv.MustHaveGoBuild(t)
|
||||
testenv.MustHaveCGO(t)
|
||||
testenv.MustHaveBuildMode(t, "c-archive")
|
||||
|
||||
t.Parallel()
|
||||
|
||||
if !testWork {
|
||||
defer func() {
|
||||
os.Remove("libgo_s.a")
|
||||
os.Remove("libgo_s.h")
|
||||
os.Remove("libgo_s.so")
|
||||
}()
|
||||
}
|
||||
|
||||
cmd := exec.Command("go", "build", "-buildmode=c-archive", "-o", "libgo_s.a", "./libgo")
|
||||
out, err := cmd.CombinedOutput()
|
||||
t.Logf("%v\n%s", cmd.Args, out)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ccArgs := append(cc, "-shared", "-o", "libgo_s.so", "libgo_s.a")
|
||||
out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput()
|
||||
t.Logf("%v\n%s", ccArgs, out)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -111,6 +111,7 @@ func TestReportsTypeErrors(t *testing.T) {
|
||||
for _, file := range []string{
|
||||
"err1.go",
|
||||
"err2.go",
|
||||
"err5.go",
|
||||
"issue11097a.go",
|
||||
"issue11097b.go",
|
||||
"issue18452.go",
|
||||
|
||||
10
src/cmd/cgo/internal/testerrors/testdata/err5.go
vendored
Normal file
10
src/cmd/cgo/internal/testerrors/testdata/err5.go
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
//line /tmp/_cgo_.go:1
|
||||
//go:cgo_dynamic_linker "/elf/interp" // ERROR HERE: only allowed in cgo-generated code
|
||||
|
||||
func main() {}
|
||||
@@ -389,9 +389,18 @@ func TestForkExec(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGeneric(t *testing.T) {
|
||||
func TestSymbolNameMangle(t *testing.T) {
|
||||
// Issue 58800: generic function name may contain weird characters
|
||||
// that confuse the external linker.
|
||||
// Issue 62098: the name mangling code doesn't handle some string
|
||||
// symbols correctly.
|
||||
globalSkip(t)
|
||||
goCmd(t, "build", "-buildmode=plugin", "-o", "generic.so", "./generic/plugin.go")
|
||||
goCmd(t, "build", "-buildmode=plugin", "-o", "mangle.so", "./mangle/plugin.go")
|
||||
}
|
||||
|
||||
func TestIssue62430(t *testing.T) {
|
||||
globalSkip(t)
|
||||
goCmd(t, "build", "-buildmode=plugin", "-o", "issue62430.so", "./issue62430/plugin.go")
|
||||
goCmd(t, "build", "-o", "issue62430.exe", "./issue62430/main.go")
|
||||
run(t, "./issue62430.exe")
|
||||
}
|
||||
|
||||
35
src/cmd/cgo/internal/testplugin/testdata/issue62430/main.go
vendored
Normal file
35
src/cmd/cgo/internal/testplugin/testdata/issue62430/main.go
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Issue 62430: a program that uses plugins may appear
|
||||
// to have no references to an initialized global map variable defined
|
||||
// in some stdlib package (ex: unicode), however there
|
||||
// may be references to that map var from a plugin that
|
||||
// gets loaded.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"plugin"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
func main() {
|
||||
p, err := plugin.Open("issue62430.so")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
s, err := p.Lookup("F")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
f := s.(func(string) *unicode.RangeTable)
|
||||
if f("C") == nil {
|
||||
panic("unicode.Categories not properly initialized")
|
||||
} else {
|
||||
fmt.Println("unicode.Categories properly initialized")
|
||||
}
|
||||
}
|
||||
11
src/cmd/cgo/internal/testplugin/testdata/issue62430/plugin.go
vendored
Normal file
11
src/cmd/cgo/internal/testplugin/testdata/issue62430/plugin.go
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"unicode"
|
||||
)
|
||||
|
||||
func F(s string) *unicode.RangeTable {
|
||||
return unicode.Categories[s]
|
||||
}
|
||||
|
||||
func main() {}
|
||||
@@ -2,21 +2,37 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Instantiated function name may contain weird characters
|
||||
// that confuse the external linker, so it needs to be
|
||||
// mangled.
|
||||
// Test cases for symbol name mangling.
|
||||
|
||||
package main
|
||||
|
||||
//go:noinline
|
||||
func F[T any]() {}
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Issue 58800:
|
||||
// Instantiated function name may contain weird characters
|
||||
// that confuse the external linker, so it needs to be
|
||||
// mangled.
|
||||
type S struct {
|
||||
X int `parser:"|@@)"`
|
||||
}
|
||||
|
||||
//go:noinline
|
||||
func F[T any]() {}
|
||||
|
||||
func P() {
|
||||
F[S]()
|
||||
}
|
||||
|
||||
// Issue 62098: the name mangling code doesn't handle some string
|
||||
// symbols correctly.
|
||||
func G(id string) error {
|
||||
if strings.ContainsAny(id, "&$@;/:+,?\\{^}%`]\">[~<#|") {
|
||||
return fmt.Errorf("invalid")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {}
|
||||
@@ -16,8 +16,10 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"internal/testenv"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
@@ -266,12 +268,28 @@ func compilerSupportsLocation() bool {
|
||||
case "gcc":
|
||||
return compiler.major >= 10
|
||||
case "clang":
|
||||
// TODO(65606): The clang toolchain on the LUCI builders is not built against
|
||||
// zlib, the ASAN runtime can't actually symbolize its own stack trace. Once
|
||||
// this is resolved, one way or another, switch this back to 'true'. We still
|
||||
// have coverage from the 'gcc' case above.
|
||||
if inLUCIBuild() {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// inLUCIBuild returns true if we're currently executing in a LUCI build.
|
||||
func inLUCIBuild() bool {
|
||||
u, err := user.Current()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return testenv.Builder() != "" && u.Username == "swarming"
|
||||
}
|
||||
|
||||
// compilerRequiredTsanVersion reports whether the compiler is the version required by Tsan.
|
||||
// Only restrictions for ppc64le are known; otherwise return true.
|
||||
func compilerRequiredTsanVersion(goos, goarch string) bool {
|
||||
|
||||
@@ -186,7 +186,7 @@ func (e *escape) callCommon(ks []hole, call ir.Node, init *ir.Nodes, wrapper *ir
|
||||
argument(e.discardHole(), &call.X)
|
||||
argument(e.discardHole(), &call.Y)
|
||||
|
||||
case ir.ODELETE, ir.OMAX, ir.OMIN, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
|
||||
case ir.ODELETE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
|
||||
call := call.(*ir.CallExpr)
|
||||
fixRecoverCall(call)
|
||||
for i := range call.Args {
|
||||
@@ -194,6 +194,14 @@ func (e *escape) callCommon(ks []hole, call ir.Node, init *ir.Nodes, wrapper *ir
|
||||
}
|
||||
argumentRType(&call.RType)
|
||||
|
||||
case ir.OMIN, ir.OMAX:
|
||||
call := call.(*ir.CallExpr)
|
||||
fixRecoverCall(call)
|
||||
for i := range call.Args {
|
||||
argument(ks[0], &call.Args[i])
|
||||
}
|
||||
argumentRType(&call.RType)
|
||||
|
||||
case ir.OLEN, ir.OCAP, ir.OREAL, ir.OIMAG, ir.OCLOSE, ir.OCLEAR:
|
||||
call := call.(*ir.UnaryExpr)
|
||||
argument(e.discardHole(), &call.X)
|
||||
|
||||
@@ -333,8 +333,14 @@ func (p *noder) pragma(pos syntax.Pos, blankLine bool, text string, old syntax.P
|
||||
// contain cgo directives, and for security reasons
|
||||
// (primarily misuse of linker flags), other files are not.
|
||||
// See golang.org/issue/23672.
|
||||
// Note that cmd/go ignores files whose names start with underscore,
|
||||
// so the only _cgo_ files we will see from cmd/go are generated by cgo.
|
||||
// It's easy to bypass this check by calling the compiler directly;
|
||||
// we only protect against uses by cmd/go.
|
||||
func isCgoGeneratedFile(pos syntax.Pos) bool {
|
||||
return strings.HasPrefix(filepath.Base(trimFilename(pos.Base())), "_cgo_")
|
||||
// We need the absolute file, independent of //line directives,
|
||||
// so we call pos.Base().Pos().
|
||||
return strings.HasPrefix(filepath.Base(trimFilename(pos.Base().Pos().Base())), "_cgo_")
|
||||
}
|
||||
|
||||
// safeArg reports whether arg is a "safe" command-line argument,
|
||||
|
||||
@@ -1552,33 +1552,27 @@
|
||||
(GreaterEqualU (FlagConstant [fc])) => (MOVDconst [b2i(fc.uge())])
|
||||
|
||||
// absorb InvertFlags into boolean values
|
||||
(Equal (InvertFlags x)) => (Equal x)
|
||||
(NotEqual (InvertFlags x)) => (NotEqual x)
|
||||
(LessThan (InvertFlags x)) => (GreaterThan x)
|
||||
(LessThanU (InvertFlags x)) => (GreaterThanU x)
|
||||
(GreaterThan (InvertFlags x)) => (LessThan x)
|
||||
(GreaterThanU (InvertFlags x)) => (LessThanU x)
|
||||
(LessEqual (InvertFlags x)) => (GreaterEqual x)
|
||||
(LessEqualU (InvertFlags x)) => (GreaterEqualU x)
|
||||
(GreaterEqual (InvertFlags x)) => (LessEqual x)
|
||||
(GreaterEqualU (InvertFlags x)) => (LessEqualU x)
|
||||
(LessThanF (InvertFlags x)) => (GreaterThanF x)
|
||||
(LessEqualF (InvertFlags x)) => (GreaterEqualF x)
|
||||
(GreaterThanF (InvertFlags x)) => (LessThanF x)
|
||||
(GreaterEqualF (InvertFlags x)) => (LessEqualF x)
|
||||
(Equal (InvertFlags x)) => (Equal x)
|
||||
(NotEqual (InvertFlags x)) => (NotEqual x)
|
||||
(LessThan (InvertFlags x)) => (GreaterThan x)
|
||||
(LessThanU (InvertFlags x)) => (GreaterThanU x)
|
||||
(GreaterThan (InvertFlags x)) => (LessThan x)
|
||||
(GreaterThanU (InvertFlags x)) => (LessThanU x)
|
||||
(LessEqual (InvertFlags x)) => (GreaterEqual x)
|
||||
(LessEqualU (InvertFlags x)) => (GreaterEqualU x)
|
||||
(GreaterEqual (InvertFlags x)) => (LessEqual x)
|
||||
(GreaterEqualU (InvertFlags x)) => (LessEqualU x)
|
||||
(LessThanF (InvertFlags x)) => (GreaterThanF x)
|
||||
(LessEqualF (InvertFlags x)) => (GreaterEqualF x)
|
||||
(GreaterThanF (InvertFlags x)) => (LessThanF x)
|
||||
(GreaterEqualF (InvertFlags x)) => (LessEqualF x)
|
||||
(LessThanNoov (InvertFlags x)) => (BIC (GreaterEqualNoov <typ.Bool> x) (Equal <typ.Bool> x))
|
||||
(GreaterEqualNoov (InvertFlags x)) => (OR (LessThanNoov <typ.Bool> x) (Equal <typ.Bool> x))
|
||||
|
||||
// Boolean-generating instructions (NOTE: NOT all boolean Values) always
|
||||
// zero upper bit of the register; no need to zero-extend
|
||||
(MOVBUreg x:((Equal|NotEqual|LessThan|LessThanU|LessThanF|LessEqual|LessEqualU|LessEqualF|GreaterThan|GreaterThanU|GreaterThanF|GreaterEqual|GreaterEqualU|GreaterEqualF) _)) => (MOVDreg x)
|
||||
|
||||
// omit unsign extension
|
||||
(MOVWUreg x) && zeroUpper32Bits(x, 3) => x
|
||||
|
||||
// omit sign extension
|
||||
(MOVWreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffff80000000) == 0 => (ANDconst <t> x [c])
|
||||
(MOVHreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffffffff8000) == 0 => (ANDconst <t> x [c])
|
||||
(MOVBreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffffffffff80) == 0 => (ANDconst <t> x [c])
|
||||
|
||||
// absorb flag constants into conditional instructions
|
||||
(CSEL [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
|
||||
(CSEL [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => y
|
||||
|
||||
@@ -13,7 +13,6 @@ import "strings"
|
||||
// - *const instructions may use a constant larger than the instruction can encode.
|
||||
// In this case the assembler expands to multiple instructions and uses tmp
|
||||
// register (R27).
|
||||
// - All 32-bit Ops will zero the upper 32 bits of the destination register.
|
||||
|
||||
// Suffixes encode the bit width of various instructions.
|
||||
// D (double word) = 64 bit
|
||||
|
||||
@@ -981,7 +981,7 @@
|
||||
(ConstNil <typ.Uintptr>)
|
||||
(ConstNil <typ.BytePtr>))
|
||||
|
||||
(NilCheck (GetG mem) mem) => mem
|
||||
(NilCheck ptr:(GetG mem) mem) => ptr
|
||||
|
||||
(If (Not cond) yes no) => (If cond no yes)
|
||||
(If (ConstBool [c]) yes no) && c => (First yes no)
|
||||
@@ -2055,19 +2055,19 @@
|
||||
&& isSameCall(call.Aux, "runtime.newobject")
|
||||
=> mem
|
||||
|
||||
(NilCheck (SelectN [0] call:(StaticLECall _ _)) _)
|
||||
(NilCheck ptr:(SelectN [0] call:(StaticLECall _ _)) _)
|
||||
&& isSameCall(call.Aux, "runtime.newobject")
|
||||
&& warnRule(fe.Debug_checknil(), v, "removed nil check")
|
||||
=> (Invalid)
|
||||
=> ptr
|
||||
|
||||
(NilCheck (OffPtr (SelectN [0] call:(StaticLECall _ _))) _)
|
||||
(NilCheck ptr:(OffPtr (SelectN [0] call:(StaticLECall _ _))) _)
|
||||
&& isSameCall(call.Aux, "runtime.newobject")
|
||||
&& warnRule(fe.Debug_checknil(), v, "removed nil check")
|
||||
=> (Invalid)
|
||||
=> ptr
|
||||
|
||||
// Addresses of globals are always non-nil.
|
||||
(NilCheck (Addr {_} (SB)) _) => (Invalid)
|
||||
(NilCheck (Convert (Addr {_} (SB)) _) _) => (Invalid)
|
||||
(NilCheck ptr:(Addr {_} (SB)) _) => ptr
|
||||
(NilCheck ptr:(Convert (Addr {_} (SB)) _) _) => ptr
|
||||
|
||||
// for late-expanded calls, recognize memequal applied to a single constant byte
|
||||
// Support is limited by 1, 2, 4, 8 byte sizes
|
||||
|
||||
@@ -471,7 +471,7 @@ var genericOps = []opData{
|
||||
{name: "IsNonNil", argLength: 1, typ: "Bool"}, // arg0 != nil
|
||||
{name: "IsInBounds", argLength: 2, typ: "Bool"}, // 0 <= arg0 < arg1. arg1 is guaranteed >= 0.
|
||||
{name: "IsSliceInBounds", argLength: 2, typ: "Bool"}, // 0 <= arg0 <= arg1. arg1 is guaranteed >= 0.
|
||||
{name: "NilCheck", argLength: 2, typ: "Void"}, // arg0=ptr, arg1=mem. Panics if arg0 is nil. Returns void.
|
||||
{name: "NilCheck", argLength: 2, nilCheck: true}, // arg0=ptr, arg1=mem. Panics if arg0 is nil. Returns the ptr unmodified.
|
||||
|
||||
// Pseudo-ops
|
||||
{name: "GetG", argLength: 1, zeroWidth: true}, // runtime.getg() (read g pointer). arg0=mem
|
||||
|
||||
@@ -317,7 +317,28 @@ func checkFunc(f *Func) {
|
||||
if !v.Aux.(*ir.Name).Type().HasPointers() {
|
||||
f.Fatalf("vardef must have pointer type %s", v.Aux.(*ir.Name).Type().String())
|
||||
}
|
||||
|
||||
case OpNilCheck:
|
||||
// nil checks have pointer type before scheduling, and
|
||||
// void type after scheduling.
|
||||
if f.scheduled {
|
||||
if v.Uses != 0 {
|
||||
f.Fatalf("nilcheck must have 0 uses %s", v.Uses)
|
||||
}
|
||||
if !v.Type.IsVoid() {
|
||||
f.Fatalf("nilcheck must have void type %s", v.Type.String())
|
||||
}
|
||||
} else {
|
||||
if !v.Type.IsPtrShaped() && !v.Type.IsUintptr() {
|
||||
f.Fatalf("nilcheck must have pointer type %s", v.Type.String())
|
||||
}
|
||||
}
|
||||
if !v.Args[0].Type.IsPtrShaped() && !v.Args[0].Type.IsUintptr() {
|
||||
f.Fatalf("nilcheck must have argument of pointer type %s", v.Args[0].Type.String())
|
||||
}
|
||||
if !v.Args[1].Type.IsMemory() {
|
||||
f.Fatalf("bad arg 1 type to %s: want mem, have %s",
|
||||
v.Op, v.Args[1].Type.String())
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: check for cycles in values
|
||||
|
||||
@@ -110,16 +110,15 @@ func liveValues(f *Func, reachable []bool) (live []bool, liveOrderStmts []*Value
|
||||
}
|
||||
}
|
||||
for _, v := range b.Values {
|
||||
if (opcodeTable[v.Op].call || opcodeTable[v.Op].hasSideEffects) && !live[v.ID] {
|
||||
if (opcodeTable[v.Op].call || opcodeTable[v.Op].hasSideEffects || opcodeTable[v.Op].nilCheck) && !live[v.ID] {
|
||||
live[v.ID] = true
|
||||
q = append(q, v)
|
||||
if v.Pos.IsStmt() != src.PosNotStmt {
|
||||
liveOrderStmts = append(liveOrderStmts, v)
|
||||
}
|
||||
}
|
||||
if v.Type.IsVoid() && !live[v.ID] {
|
||||
// The only Void ops are nil checks and inline marks. We must keep these.
|
||||
if v.Op == OpInlMark && !liveInlIdx[int(v.AuxInt)] {
|
||||
if v.Op == OpInlMark {
|
||||
if !liveInlIdx[int(v.AuxInt)] {
|
||||
// We don't need marks for bodies that
|
||||
// have been completely optimized away.
|
||||
// TODO: save marks only for bodies which
|
||||
|
||||
@@ -73,9 +73,9 @@ func dse(f *Func) {
|
||||
}
|
||||
|
||||
// Walk backwards looking for dead stores. Keep track of shadowed addresses.
|
||||
// A "shadowed address" is a pointer and a size describing a memory region that
|
||||
// is known to be written. We keep track of shadowed addresses in the shadowed
|
||||
// map, mapping the ID of the address to the size of the shadowed region.
|
||||
// A "shadowed address" is a pointer, offset, and size describing a memory region that
|
||||
// is known to be written. We keep track of shadowed addresses in the shadowed map,
|
||||
// mapping the ID of the address to a shadowRange where future writes will happen.
|
||||
// Since we're walking backwards, writes to a shadowed region are useless,
|
||||
// as they will be immediately overwritten.
|
||||
shadowed.clear()
|
||||
@@ -88,13 +88,20 @@ func dse(f *Func) {
|
||||
shadowed.clear()
|
||||
}
|
||||
if v.Op == OpStore || v.Op == OpZero {
|
||||
ptr := v.Args[0]
|
||||
var off int64
|
||||
for ptr.Op == OpOffPtr { // Walk to base pointer
|
||||
off += ptr.AuxInt
|
||||
ptr = ptr.Args[0]
|
||||
}
|
||||
var sz int64
|
||||
if v.Op == OpStore {
|
||||
sz = v.Aux.(*types.Type).Size()
|
||||
} else { // OpZero
|
||||
sz = v.AuxInt
|
||||
}
|
||||
if shadowedSize := int64(shadowed.get(v.Args[0].ID)); shadowedSize != -1 && shadowedSize >= sz {
|
||||
sr := shadowRange(shadowed.get(ptr.ID))
|
||||
if sr.contains(off, off+sz) {
|
||||
// Modify the store/zero into a copy of the memory state,
|
||||
// effectively eliding the store operation.
|
||||
if v.Op == OpStore {
|
||||
@@ -108,10 +115,8 @@ func dse(f *Func) {
|
||||
v.AuxInt = 0
|
||||
v.Op = OpCopy
|
||||
} else {
|
||||
if sz > 0x7fffffff { // work around sparseMap's int32 value type
|
||||
sz = 0x7fffffff
|
||||
}
|
||||
shadowed.set(v.Args[0].ID, int32(sz))
|
||||
// Extend shadowed region.
|
||||
shadowed.set(ptr.ID, int32(sr.merge(off, off+sz)))
|
||||
}
|
||||
}
|
||||
// walk to previous store
|
||||
@@ -131,6 +136,49 @@ func dse(f *Func) {
|
||||
}
|
||||
}
|
||||
|
||||
// A shadowRange encodes a set of byte offsets [lo():hi()] from
|
||||
// a given pointer that will be written to later in the block.
|
||||
// A zero shadowRange encodes an empty shadowed range (and so
|
||||
// does a -1 shadowRange, which is what sparsemap.get returns
|
||||
// on a failed lookup).
|
||||
type shadowRange int32
|
||||
|
||||
func (sr shadowRange) lo() int64 {
|
||||
return int64(sr & 0xffff)
|
||||
}
|
||||
func (sr shadowRange) hi() int64 {
|
||||
return int64((sr >> 16) & 0xffff)
|
||||
}
|
||||
|
||||
// contains reports whether [lo:hi] is completely within sr.
|
||||
func (sr shadowRange) contains(lo, hi int64) bool {
|
||||
return lo >= sr.lo() && hi <= sr.hi()
|
||||
}
|
||||
|
||||
// merge returns the union of sr and [lo:hi].
|
||||
// merge is allowed to return something smaller than the union.
|
||||
func (sr shadowRange) merge(lo, hi int64) shadowRange {
|
||||
if lo < 0 || hi > 0xffff {
|
||||
// Ignore offsets that are too large or small.
|
||||
return sr
|
||||
}
|
||||
if sr.lo() == sr.hi() {
|
||||
// Old range is empty - use new one.
|
||||
return shadowRange(lo + hi<<16)
|
||||
}
|
||||
if hi < sr.lo() || lo > sr.hi() {
|
||||
// The two regions don't overlap or abut, so we would
|
||||
// have to keep track of multiple disjoint ranges.
|
||||
// Because we can only keep one, keep the larger one.
|
||||
if sr.hi()-sr.lo() >= hi-lo {
|
||||
return sr
|
||||
}
|
||||
return shadowRange(lo + hi<<16)
|
||||
}
|
||||
// Regions overlap or abut - compute the union.
|
||||
return shadowRange(min(lo, sr.lo()) + max(hi, sr.hi())<<16)
|
||||
}
|
||||
|
||||
// elimDeadAutosGeneric deletes autos that are never accessed. To achieve this
|
||||
// we track the operations that the address of each auto reaches and if it only
|
||||
// reaches stores then we delete all the stores. The other operations will then
|
||||
@@ -201,7 +249,7 @@ func elimDeadAutosGeneric(f *Func) {
|
||||
}
|
||||
|
||||
if v.Uses == 0 && v.Op != OpNilCheck && !v.Op.IsCall() && !v.Op.HasSideEffects() || len(args) == 0 {
|
||||
// Nil check has no use, but we need to keep it.
|
||||
// We need to keep nil checks even if they have no use.
|
||||
// Also keep calls and values that have side effects.
|
||||
return
|
||||
}
|
||||
|
||||
@@ -855,7 +855,7 @@ func storeOneArg(x *expandState, pos src.XPos, b *Block, locs []*LocalSlot, suff
|
||||
// storeOneLoad creates a decomposed (one step) load that is then stored.
|
||||
func storeOneLoad(x *expandState, pos src.XPos, b *Block, source, mem *Value, t *types.Type, offArg, offStore int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value {
|
||||
from := x.offsetFrom(source.Block, source.Args[0], offArg, types.NewPtr(t))
|
||||
w := source.Block.NewValue2(source.Pos, OpLoad, t, from, mem)
|
||||
w := b.NewValue2(source.Pos, OpLoad, t, from, mem)
|
||||
return x.storeArgOrLoad(pos, b, w, mem, t, offStore, loadRegOffset, storeRc)
|
||||
}
|
||||
|
||||
@@ -962,7 +962,7 @@ func (x *expandState) storeArgOrLoad(pos src.XPos, b *Block, source, mem *Value,
|
||||
eltRO := x.regWidth(elt)
|
||||
source.Type = t
|
||||
for i := int64(0); i < t.NumElem(); i++ {
|
||||
sel := source.Block.NewValue1I(pos, OpArraySelect, elt, i, source)
|
||||
sel := b.NewValue1I(pos, OpArraySelect, elt, i, source)
|
||||
mem = x.storeArgOrLoad(pos, b, sel, mem, elt, storeOffset+i*elt.Size(), loadRegOffset, storeRc.at(t, 0))
|
||||
loadRegOffset += eltRO
|
||||
pos = pos.WithNotStmt()
|
||||
@@ -997,7 +997,7 @@ func (x *expandState) storeArgOrLoad(pos src.XPos, b *Block, source, mem *Value,
|
||||
source.Type = t
|
||||
for i := 0; i < t.NumFields(); i++ {
|
||||
fld := t.Field(i)
|
||||
sel := source.Block.NewValue1I(pos, OpStructSelect, fld.Type, int64(i), source)
|
||||
sel := b.NewValue1I(pos, OpStructSelect, fld.Type, int64(i), source)
|
||||
mem = x.storeArgOrLoad(pos, b, sel, mem, fld.Type, storeOffset+fld.Offset, loadRegOffset, storeRc.next(fld.Type))
|
||||
loadRegOffset += x.regWidth(fld.Type)
|
||||
pos = pos.WithNotStmt()
|
||||
@@ -1009,48 +1009,48 @@ func (x *expandState) storeArgOrLoad(pos src.XPos, b *Block, source, mem *Value,
|
||||
break
|
||||
}
|
||||
tHi, tLo := x.intPairTypes(t.Kind())
|
||||
sel := source.Block.NewValue1(pos, OpInt64Hi, tHi, source)
|
||||
sel := b.NewValue1(pos, OpInt64Hi, tHi, source)
|
||||
mem = x.storeArgOrLoad(pos, b, sel, mem, tHi, storeOffset+x.hiOffset, loadRegOffset+x.hiRo, storeRc.plus(x.hiRo))
|
||||
pos = pos.WithNotStmt()
|
||||
sel = source.Block.NewValue1(pos, OpInt64Lo, tLo, source)
|
||||
sel = b.NewValue1(pos, OpInt64Lo, tLo, source)
|
||||
return x.storeArgOrLoad(pos, b, sel, mem, tLo, storeOffset+x.lowOffset, loadRegOffset+x.loRo, storeRc.plus(x.hiRo))
|
||||
|
||||
case types.TINTER:
|
||||
sel := source.Block.NewValue1(pos, OpITab, x.typs.BytePtr, source)
|
||||
sel := b.NewValue1(pos, OpITab, x.typs.BytePtr, source)
|
||||
mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.BytePtr, storeOffset, loadRegOffset, storeRc.next(x.typs.BytePtr))
|
||||
pos = pos.WithNotStmt()
|
||||
sel = source.Block.NewValue1(pos, OpIData, x.typs.BytePtr, source)
|
||||
sel = b.NewValue1(pos, OpIData, x.typs.BytePtr, source)
|
||||
return x.storeArgOrLoad(pos, b, sel, mem, x.typs.BytePtr, storeOffset+x.ptrSize, loadRegOffset+RO_iface_data, storeRc)
|
||||
|
||||
case types.TSTRING:
|
||||
sel := source.Block.NewValue1(pos, OpStringPtr, x.typs.BytePtr, source)
|
||||
sel := b.NewValue1(pos, OpStringPtr, x.typs.BytePtr, source)
|
||||
mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.BytePtr, storeOffset, loadRegOffset, storeRc.next(x.typs.BytePtr))
|
||||
pos = pos.WithNotStmt()
|
||||
sel = source.Block.NewValue1(pos, OpStringLen, x.typs.Int, source)
|
||||
sel = b.NewValue1(pos, OpStringLen, x.typs.Int, source)
|
||||
return x.storeArgOrLoad(pos, b, sel, mem, x.typs.Int, storeOffset+x.ptrSize, loadRegOffset+RO_string_len, storeRc)
|
||||
|
||||
case types.TSLICE:
|
||||
et := types.NewPtr(t.Elem())
|
||||
sel := source.Block.NewValue1(pos, OpSlicePtr, et, source)
|
||||
sel := b.NewValue1(pos, OpSlicePtr, et, source)
|
||||
mem = x.storeArgOrLoad(pos, b, sel, mem, et, storeOffset, loadRegOffset, storeRc.next(et))
|
||||
pos = pos.WithNotStmt()
|
||||
sel = source.Block.NewValue1(pos, OpSliceLen, x.typs.Int, source)
|
||||
sel = b.NewValue1(pos, OpSliceLen, x.typs.Int, source)
|
||||
mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.Int, storeOffset+x.ptrSize, loadRegOffset+RO_slice_len, storeRc.next(x.typs.Int))
|
||||
sel = source.Block.NewValue1(pos, OpSliceCap, x.typs.Int, source)
|
||||
sel = b.NewValue1(pos, OpSliceCap, x.typs.Int, source)
|
||||
return x.storeArgOrLoad(pos, b, sel, mem, x.typs.Int, storeOffset+2*x.ptrSize, loadRegOffset+RO_slice_cap, storeRc)
|
||||
|
||||
case types.TCOMPLEX64:
|
||||
sel := source.Block.NewValue1(pos, OpComplexReal, x.typs.Float32, source)
|
||||
sel := b.NewValue1(pos, OpComplexReal, x.typs.Float32, source)
|
||||
mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.Float32, storeOffset, loadRegOffset, storeRc.next(x.typs.Float32))
|
||||
pos = pos.WithNotStmt()
|
||||
sel = source.Block.NewValue1(pos, OpComplexImag, x.typs.Float32, source)
|
||||
sel = b.NewValue1(pos, OpComplexImag, x.typs.Float32, source)
|
||||
return x.storeArgOrLoad(pos, b, sel, mem, x.typs.Float32, storeOffset+4, loadRegOffset+RO_complex_imag, storeRc)
|
||||
|
||||
case types.TCOMPLEX128:
|
||||
sel := source.Block.NewValue1(pos, OpComplexReal, x.typs.Float64, source)
|
||||
sel := b.NewValue1(pos, OpComplexReal, x.typs.Float64, source)
|
||||
mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.Float64, storeOffset, loadRegOffset, storeRc.next(x.typs.Float64))
|
||||
pos = pos.WithNotStmt()
|
||||
sel = source.Block.NewValue1(pos, OpComplexImag, x.typs.Float64, source)
|
||||
sel = b.NewValue1(pos, OpComplexImag, x.typs.Float64, source)
|
||||
return x.storeArgOrLoad(pos, b, sel, mem, x.typs.Float64, storeOffset+8, loadRegOffset+RO_complex_imag, storeRc)
|
||||
}
|
||||
|
||||
@@ -1113,6 +1113,9 @@ func (x *expandState) rewriteArgs(v *Value, firstArg int) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if x.debug > 1 {
|
||||
x.Printf("...storeArg %s, %v, %d\n", a.LongString(), aType, aOffset)
|
||||
}
|
||||
// "Dereference" of addressed (probably not-SSA-eligible) value becomes Move
|
||||
// TODO(register args) this will be more complicated with registers in the picture.
|
||||
mem = x.rewriteDereference(v.Block, sp, a, mem, aOffset, aux.SizeOfArg(auxI), aType, v.Pos)
|
||||
|
||||
@@ -169,7 +169,7 @@ func fuseBlockIf(b *Block) bool {
|
||||
// There may be false positives.
|
||||
func isEmpty(b *Block) bool {
|
||||
for _, v := range b.Values {
|
||||
if v.Uses > 0 || v.Op.IsCall() || v.Op.HasSideEffects() || v.Type.IsVoid() {
|
||||
if v.Uses > 0 || v.Op.IsCall() || v.Op.HasSideEffects() || v.Type.IsVoid() || opcodeTable[v.Op].nilCheck {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -254,7 +254,7 @@ func TestFuseSideEffects(t *testing.T) {
|
||||
Valu("p", OpArg, c.config.Types.IntPtr, 0, nil),
|
||||
If("c1", "z0", "exit")),
|
||||
Bloc("z0",
|
||||
Valu("nilcheck", OpNilCheck, types.TypeVoid, 0, nil, "p", "mem"),
|
||||
Valu("nilcheck", OpNilCheck, c.config.Types.IntPtr, 0, nil, "p", "mem"),
|
||||
Goto("exit")),
|
||||
Bloc("exit",
|
||||
Exit("mem"),
|
||||
|
||||
@@ -127,6 +127,13 @@ func findIndVar(f *Func) []indVar {
|
||||
less = false
|
||||
}
|
||||
|
||||
if ind.Block != b {
|
||||
// TODO: Could be extended to include disjointed loop headers.
|
||||
// I don't think this is causing missed optimizations in real world code often.
|
||||
// See https://go.dev/issue/63955
|
||||
continue
|
||||
}
|
||||
|
||||
// Expect the increment to be a nonzero constant.
|
||||
if !inc.isGenericIntConst() {
|
||||
continue
|
||||
|
||||
@@ -41,6 +41,7 @@ func memcombineLoads(f *Func) {
|
||||
}
|
||||
}
|
||||
for _, b := range f.Blocks {
|
||||
order = order[:0]
|
||||
for _, v := range b.Values {
|
||||
if v.Op != OpOr16 && v.Op != OpOr32 && v.Op != OpOr64 {
|
||||
continue
|
||||
@@ -312,8 +313,8 @@ func combineLoads(root *Value, n int64) bool {
|
||||
if isLittleEndian && shift0 != 0 {
|
||||
v = leftShift(loadBlock, pos, v, shift0)
|
||||
}
|
||||
if isBigEndian && shift0-(n-1)*8 != 0 {
|
||||
v = leftShift(loadBlock, pos, v, shift0-(n-1)*8)
|
||||
if isBigEndian && shift0-(n-1)*size*8 != 0 {
|
||||
v = leftShift(loadBlock, pos, v, shift0-(n-1)*size*8)
|
||||
}
|
||||
|
||||
// Install with (Copy v).
|
||||
@@ -587,14 +588,14 @@ func combineStores(root *Value, n int64) bool {
|
||||
isLittleEndian := true
|
||||
shift0 := shift(a[0].store, shiftBase)
|
||||
for i := int64(1); i < n; i++ {
|
||||
if shift(a[i].store, shiftBase) != shift0+i*8 {
|
||||
if shift(a[i].store, shiftBase) != shift0+i*size*8 {
|
||||
isLittleEndian = false
|
||||
break
|
||||
}
|
||||
}
|
||||
isBigEndian := true
|
||||
for i := int64(1); i < n; i++ {
|
||||
if shift(a[i].store, shiftBase) != shift0-i*8 {
|
||||
if shift(a[i].store, shiftBase) != shift0-i*size*8 {
|
||||
isBigEndian = false
|
||||
break
|
||||
}
|
||||
@@ -617,8 +618,8 @@ func combineStores(root *Value, n int64) bool {
|
||||
if isLittleEndian && shift0 != 0 {
|
||||
sv = rightShift(root.Block, root.Pos, sv, shift0)
|
||||
}
|
||||
if isBigEndian && shift0-(n-1)*8 != 0 {
|
||||
sv = rightShift(root.Block, root.Pos, sv, shift0-(n-1)*8)
|
||||
if isBigEndian && shift0-(n-1)*size*8 != 0 {
|
||||
sv = rightShift(root.Block, root.Pos, sv, shift0-(n-1)*size*8)
|
||||
}
|
||||
if sv.Type.Size() > size*n {
|
||||
sv = truncate(root.Block, root.Pos, sv, sv.Type.Size(), size*n)
|
||||
|
||||
@@ -38,11 +38,14 @@ func nilcheckelim(f *Func) {
|
||||
work := make([]bp, 0, 256)
|
||||
work = append(work, bp{block: f.Entry})
|
||||
|
||||
// map from value ID to bool indicating if value is known to be non-nil
|
||||
// in the current dominator path being walked. This slice is updated by
|
||||
// map from value ID to known non-nil version of that value ID
|
||||
// (in the current dominator path being walked). This slice is updated by
|
||||
// walkStates to maintain the known non-nil values.
|
||||
nonNilValues := f.Cache.allocBoolSlice(f.NumValues())
|
||||
defer f.Cache.freeBoolSlice(nonNilValues)
|
||||
// If there is extrinsic information about non-nil-ness, this map
|
||||
// points a value to itself. If a value is known non-nil because we
|
||||
// already did a nil check on it, it points to the nil check operation.
|
||||
nonNilValues := f.Cache.allocValueSlice(f.NumValues())
|
||||
defer f.Cache.freeValueSlice(nonNilValues)
|
||||
|
||||
// make an initial pass identifying any non-nil values
|
||||
for _, b := range f.Blocks {
|
||||
@@ -54,7 +57,7 @@ func nilcheckelim(f *Func) {
|
||||
// We assume that SlicePtr is non-nil because we do a bounds check
|
||||
// before the slice access (and all cap>0 slices have a non-nil ptr). See #30366.
|
||||
if v.Op == OpAddr || v.Op == OpLocalAddr || v.Op == OpAddPtr || v.Op == OpOffPtr || v.Op == OpAdd32 || v.Op == OpAdd64 || v.Op == OpSub32 || v.Op == OpSub64 || v.Op == OpSlicePtr {
|
||||
nonNilValues[v.ID] = true
|
||||
nonNilValues[v.ID] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -68,16 +71,16 @@ func nilcheckelim(f *Func) {
|
||||
if v.Op == OpPhi {
|
||||
argsNonNil := true
|
||||
for _, a := range v.Args {
|
||||
if !nonNilValues[a.ID] {
|
||||
if nonNilValues[a.ID] == nil {
|
||||
argsNonNil = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if argsNonNil {
|
||||
if !nonNilValues[v.ID] {
|
||||
if nonNilValues[v.ID] == nil {
|
||||
changed = true
|
||||
}
|
||||
nonNilValues[v.ID] = true
|
||||
nonNilValues[v.ID] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -103,8 +106,8 @@ func nilcheckelim(f *Func) {
|
||||
if len(b.Preds) == 1 {
|
||||
p := b.Preds[0].b
|
||||
if p.Kind == BlockIf && p.Controls[0].Op == OpIsNonNil && p.Succs[0].b == b {
|
||||
if ptr := p.Controls[0].Args[0]; !nonNilValues[ptr.ID] {
|
||||
nonNilValues[ptr.ID] = true
|
||||
if ptr := p.Controls[0].Args[0]; nonNilValues[ptr.ID] == nil {
|
||||
nonNilValues[ptr.ID] = ptr
|
||||
work = append(work, bp{op: ClearPtr, ptr: ptr})
|
||||
}
|
||||
}
|
||||
@@ -117,14 +120,11 @@ func nilcheckelim(f *Func) {
|
||||
pendingLines.clear()
|
||||
|
||||
// Next, process values in the block.
|
||||
i := 0
|
||||
for _, v := range b.Values {
|
||||
b.Values[i] = v
|
||||
i++
|
||||
switch v.Op {
|
||||
case OpIsNonNil:
|
||||
ptr := v.Args[0]
|
||||
if nonNilValues[ptr.ID] {
|
||||
if nonNilValues[ptr.ID] != nil {
|
||||
if v.Pos.IsStmt() == src.PosIsStmt { // Boolean true is a terrible statement boundary.
|
||||
pendingLines.add(v.Pos)
|
||||
v.Pos = v.Pos.WithNotStmt()
|
||||
@@ -135,7 +135,7 @@ func nilcheckelim(f *Func) {
|
||||
}
|
||||
case OpNilCheck:
|
||||
ptr := v.Args[0]
|
||||
if nonNilValues[ptr.ID] {
|
||||
if nilCheck := nonNilValues[ptr.ID]; nilCheck != nil {
|
||||
// This is a redundant implicit nil check.
|
||||
// Logging in the style of the former compiler -- and omit line 1,
|
||||
// which is usually in generated code.
|
||||
@@ -145,14 +145,13 @@ func nilcheckelim(f *Func) {
|
||||
if v.Pos.IsStmt() == src.PosIsStmt { // About to lose a statement boundary
|
||||
pendingLines.add(v.Pos)
|
||||
}
|
||||
v.reset(OpUnknown)
|
||||
f.freeValue(v)
|
||||
i--
|
||||
v.Op = OpCopy
|
||||
v.SetArgs1(nilCheck)
|
||||
continue
|
||||
}
|
||||
// Record the fact that we know ptr is non nil, and remember to
|
||||
// undo that information when this dominator subtree is done.
|
||||
nonNilValues[ptr.ID] = true
|
||||
nonNilValues[ptr.ID] = v
|
||||
work = append(work, bp{op: ClearPtr, ptr: ptr})
|
||||
fallthrough // a non-eliminated nil check might be a good place for a statement boundary.
|
||||
default:
|
||||
@@ -163,7 +162,7 @@ func nilcheckelim(f *Func) {
|
||||
}
|
||||
}
|
||||
// This reduces the lost statement count in "go" by 5 (out of 500 total).
|
||||
for j := 0; j < i; j++ { // is this an ordering problem?
|
||||
for j := range b.Values { // is this an ordering problem?
|
||||
v := b.Values[j]
|
||||
if v.Pos.IsStmt() != src.PosNotStmt && !isPoorStatementOp(v.Op) && pendingLines.contains(v.Pos) {
|
||||
v.Pos = v.Pos.WithIsStmt()
|
||||
@@ -174,7 +173,6 @@ func nilcheckelim(f *Func) {
|
||||
b.Pos = b.Pos.WithIsStmt()
|
||||
pendingLines.remove(b.Pos)
|
||||
}
|
||||
b.truncateValues(i)
|
||||
|
||||
// Add all dominated blocks to the work list.
|
||||
for w := sdom[node.block.ID].child; w != nil; w = sdom[w.ID].sibling {
|
||||
@@ -182,7 +180,7 @@ func nilcheckelim(f *Func) {
|
||||
}
|
||||
|
||||
case ClearPtr:
|
||||
nonNilValues[node.ptr.ID] = false
|
||||
nonNilValues[node.ptr.ID] = nil
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39373,9 +39373,10 @@ var opcodeTable = [...]opInfo{
|
||||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "NilCheck",
|
||||
argLen: 2,
|
||||
generic: true,
|
||||
name: "NilCheck",
|
||||
argLen: 2,
|
||||
nilCheck: true,
|
||||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "GetG",
|
||||
|
||||
@@ -859,6 +859,9 @@ func disjoint(p1 *Value, n1 int64, p2 *Value, n2 int64) bool {
|
||||
offset += base.AuxInt
|
||||
base = base.Args[0]
|
||||
}
|
||||
if opcodeTable[base.Op].nilCheck {
|
||||
base = base.Args[0]
|
||||
}
|
||||
return base, offset
|
||||
}
|
||||
p1, off1 := baseAndOffset(p1)
|
||||
@@ -1183,6 +1186,12 @@ func min(x, y int64) int64 {
|
||||
}
|
||||
return y
|
||||
}
|
||||
func max(x, y int64) int64 {
|
||||
if x > y {
|
||||
return x
|
||||
}
|
||||
return y
|
||||
}
|
||||
|
||||
func isConstZero(v *Value) bool {
|
||||
switch v.Op {
|
||||
@@ -1281,10 +1290,6 @@ func zeroUpper32Bits(x *Value, depth int) bool {
|
||||
OpAMD64SHRL, OpAMD64SHRLconst, OpAMD64SARL, OpAMD64SARLconst,
|
||||
OpAMD64SHLL, OpAMD64SHLLconst:
|
||||
return true
|
||||
case OpARM64REV16W, OpARM64REVW, OpARM64RBITW, OpARM64CLZW, OpARM64EXTRWconst,
|
||||
OpARM64MULW, OpARM64MNEGW, OpARM64UDIVW, OpARM64DIVW, OpARM64UMODW,
|
||||
OpARM64MADDW, OpARM64MSUBW, OpARM64RORW, OpARM64RORWconst:
|
||||
return true
|
||||
case OpArg:
|
||||
return x.Type.Size() == 4
|
||||
case OpPhi, OpSelect0, OpSelect1:
|
||||
|
||||
@@ -154,6 +154,8 @@ func rewriteValueARM64(v *Value) bool {
|
||||
return rewriteValueARM64_OpARM64GreaterEqual(v)
|
||||
case OpARM64GreaterEqualF:
|
||||
return rewriteValueARM64_OpARM64GreaterEqualF(v)
|
||||
case OpARM64GreaterEqualNoov:
|
||||
return rewriteValueARM64_OpARM64GreaterEqualNoov(v)
|
||||
case OpARM64GreaterEqualU:
|
||||
return rewriteValueARM64_OpARM64GreaterEqualU(v)
|
||||
case OpARM64GreaterThan:
|
||||
@@ -174,6 +176,8 @@ func rewriteValueARM64(v *Value) bool {
|
||||
return rewriteValueARM64_OpARM64LessThan(v)
|
||||
case OpARM64LessThanF:
|
||||
return rewriteValueARM64_OpARM64LessThanF(v)
|
||||
case OpARM64LessThanNoov:
|
||||
return rewriteValueARM64_OpARM64LessThanNoov(v)
|
||||
case OpARM64LessThanU:
|
||||
return rewriteValueARM64_OpARM64LessThanU(v)
|
||||
case OpARM64MADD:
|
||||
@@ -5953,6 +5957,27 @@ func rewriteValueARM64_OpARM64GreaterEqualF(v *Value) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueARM64_OpARM64GreaterEqualNoov(v *Value) bool {
|
||||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
typ := &b.Func.Config.Types
|
||||
// match: (GreaterEqualNoov (InvertFlags x))
|
||||
// result: (OR (LessThanNoov <typ.Bool> x) (Equal <typ.Bool> x))
|
||||
for {
|
||||
if v_0.Op != OpARM64InvertFlags {
|
||||
break
|
||||
}
|
||||
x := v_0.Args[0]
|
||||
v.reset(OpARM64OR)
|
||||
v0 := b.NewValue0(v.Pos, OpARM64LessThanNoov, typ.Bool)
|
||||
v0.AddArg(x)
|
||||
v1 := b.NewValue0(v.Pos, OpARM64Equal, typ.Bool)
|
||||
v1.AddArg(x)
|
||||
v.AddArg2(v0, v1)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueARM64_OpARM64GreaterEqualU(v *Value) bool {
|
||||
v_0 := v.Args[0]
|
||||
// match: (GreaterEqualU (FlagConstant [fc]))
|
||||
@@ -6667,6 +6692,27 @@ func rewriteValueARM64_OpARM64LessThanF(v *Value) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueARM64_OpARM64LessThanNoov(v *Value) bool {
|
||||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
typ := &b.Func.Config.Types
|
||||
// match: (LessThanNoov (InvertFlags x))
|
||||
// result: (BIC (GreaterEqualNoov <typ.Bool> x) (Equal <typ.Bool> x))
|
||||
for {
|
||||
if v_0.Op != OpARM64InvertFlags {
|
||||
break
|
||||
}
|
||||
x := v_0.Args[0]
|
||||
v.reset(OpARM64BIC)
|
||||
v0 := b.NewValue0(v.Pos, OpARM64GreaterEqualNoov, typ.Bool)
|
||||
v0.AddArg(x)
|
||||
v1 := b.NewValue0(v.Pos, OpARM64Equal, typ.Bool)
|
||||
v1.AddArg(x)
|
||||
v.AddArg2(v0, v1)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueARM64_OpARM64LessThanU(v *Value) bool {
|
||||
v_0 := v.Args[0]
|
||||
// match: (LessThanU (FlagConstant [fc]))
|
||||
@@ -8668,25 +8714,6 @@ func rewriteValueARM64_OpARM64MOVBreg(v *Value) bool {
|
||||
v.AuxInt = int64ToAuxInt(int64(int8(c)))
|
||||
return true
|
||||
}
|
||||
// match: (MOVBreg <t> (ANDconst x [c]))
|
||||
// cond: uint64(c) & uint64(0xffffffffffffff80) == 0
|
||||
// result: (ANDconst <t> x [c])
|
||||
for {
|
||||
t := v.Type
|
||||
if v_0.Op != OpARM64ANDconst {
|
||||
break
|
||||
}
|
||||
c := auxIntToInt64(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if !(uint64(c)&uint64(0xffffffffffffff80) == 0) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64ANDconst)
|
||||
v.Type = t
|
||||
v.AuxInt = int64ToAuxInt(c)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (MOVBreg (SLLconst [lc] x))
|
||||
// cond: lc < 8
|
||||
// result: (SBFIZ [armBFAuxInt(lc, 8-lc)] x)
|
||||
@@ -10765,25 +10792,6 @@ func rewriteValueARM64_OpARM64MOVHreg(v *Value) bool {
|
||||
v.AuxInt = int64ToAuxInt(int64(int16(c)))
|
||||
return true
|
||||
}
|
||||
// match: (MOVHreg <t> (ANDconst x [c]))
|
||||
// cond: uint64(c) & uint64(0xffffffffffff8000) == 0
|
||||
// result: (ANDconst <t> x [c])
|
||||
for {
|
||||
t := v.Type
|
||||
if v_0.Op != OpARM64ANDconst {
|
||||
break
|
||||
}
|
||||
c := auxIntToInt64(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if !(uint64(c)&uint64(0xffffffffffff8000) == 0) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64ANDconst)
|
||||
v.Type = t
|
||||
v.AuxInt = int64ToAuxInt(c)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (MOVHreg (SLLconst [lc] x))
|
||||
// cond: lc < 16
|
||||
// result: (SBFIZ [armBFAuxInt(lc, 16-lc)] x)
|
||||
@@ -11943,17 +11951,6 @@ func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool {
|
||||
v.AuxInt = int64ToAuxInt(int64(uint32(c)))
|
||||
return true
|
||||
}
|
||||
// match: (MOVWUreg x)
|
||||
// cond: zeroUpper32Bits(x, 3)
|
||||
// result: x
|
||||
for {
|
||||
x := v_0
|
||||
if !(zeroUpper32Bits(x, 3)) {
|
||||
break
|
||||
}
|
||||
v.copyOf(x)
|
||||
return true
|
||||
}
|
||||
// match: (MOVWUreg (SLLconst [lc] x))
|
||||
// cond: lc >= 32
|
||||
// result: (MOVDconst [0])
|
||||
@@ -12458,25 +12455,6 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool {
|
||||
v.AuxInt = int64ToAuxInt(int64(int32(c)))
|
||||
return true
|
||||
}
|
||||
// match: (MOVWreg <t> (ANDconst x [c]))
|
||||
// cond: uint64(c) & uint64(0xffffffff80000000) == 0
|
||||
// result: (ANDconst <t> x [c])
|
||||
for {
|
||||
t := v.Type
|
||||
if v_0.Op != OpARM64ANDconst {
|
||||
break
|
||||
}
|
||||
c := auxIntToInt64(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if !(uint64(c)&uint64(0xffffffff80000000) == 0) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64ANDconst)
|
||||
v.Type = t
|
||||
v.AuxInt = int64ToAuxInt(c)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (MOVWreg (SLLconst [lc] x))
|
||||
// cond: lc < 32
|
||||
// result: (SBFIZ [armBFAuxInt(lc, 32-lc)] x)
|
||||
|
||||
@@ -18967,79 +18967,84 @@ func rewriteValuegeneric_OpNilCheck(v *Value) bool {
|
||||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
fe := b.Func.fe
|
||||
// match: (NilCheck (GetG mem) mem)
|
||||
// result: mem
|
||||
// match: (NilCheck ptr:(GetG mem) mem)
|
||||
// result: ptr
|
||||
for {
|
||||
if v_0.Op != OpGetG {
|
||||
ptr := v_0
|
||||
if ptr.Op != OpGetG {
|
||||
break
|
||||
}
|
||||
mem := v_0.Args[0]
|
||||
mem := ptr.Args[0]
|
||||
if mem != v_1 {
|
||||
break
|
||||
}
|
||||
v.copyOf(mem)
|
||||
v.copyOf(ptr)
|
||||
return true
|
||||
}
|
||||
// match: (NilCheck (SelectN [0] call:(StaticLECall _ _)) _)
|
||||
// match: (NilCheck ptr:(SelectN [0] call:(StaticLECall _ _)) _)
|
||||
// cond: isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")
|
||||
// result: (Invalid)
|
||||
// result: ptr
|
||||
for {
|
||||
if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 {
|
||||
ptr := v_0
|
||||
if ptr.Op != OpSelectN || auxIntToInt64(ptr.AuxInt) != 0 {
|
||||
break
|
||||
}
|
||||
call := v_0.Args[0]
|
||||
call := ptr.Args[0]
|
||||
if call.Op != OpStaticLECall || len(call.Args) != 2 || !(isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")) {
|
||||
break
|
||||
}
|
||||
v.reset(OpInvalid)
|
||||
v.copyOf(ptr)
|
||||
return true
|
||||
}
|
||||
// match: (NilCheck (OffPtr (SelectN [0] call:(StaticLECall _ _))) _)
|
||||
// match: (NilCheck ptr:(OffPtr (SelectN [0] call:(StaticLECall _ _))) _)
|
||||
// cond: isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")
|
||||
// result: (Invalid)
|
||||
// result: ptr
|
||||
for {
|
||||
if v_0.Op != OpOffPtr {
|
||||
ptr := v_0
|
||||
if ptr.Op != OpOffPtr {
|
||||
break
|
||||
}
|
||||
v_0_0 := v_0.Args[0]
|
||||
if v_0_0.Op != OpSelectN || auxIntToInt64(v_0_0.AuxInt) != 0 {
|
||||
ptr_0 := ptr.Args[0]
|
||||
if ptr_0.Op != OpSelectN || auxIntToInt64(ptr_0.AuxInt) != 0 {
|
||||
break
|
||||
}
|
||||
call := v_0_0.Args[0]
|
||||
call := ptr_0.Args[0]
|
||||
if call.Op != OpStaticLECall || len(call.Args) != 2 || !(isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")) {
|
||||
break
|
||||
}
|
||||
v.reset(OpInvalid)
|
||||
v.copyOf(ptr)
|
||||
return true
|
||||
}
|
||||
// match: (NilCheck (Addr {_} (SB)) _)
|
||||
// result: (Invalid)
|
||||
// match: (NilCheck ptr:(Addr {_} (SB)) _)
|
||||
// result: ptr
|
||||
for {
|
||||
if v_0.Op != OpAddr {
|
||||
ptr := v_0
|
||||
if ptr.Op != OpAddr {
|
||||
break
|
||||
}
|
||||
v_0_0 := v_0.Args[0]
|
||||
if v_0_0.Op != OpSB {
|
||||
ptr_0 := ptr.Args[0]
|
||||
if ptr_0.Op != OpSB {
|
||||
break
|
||||
}
|
||||
v.reset(OpInvalid)
|
||||
v.copyOf(ptr)
|
||||
return true
|
||||
}
|
||||
// match: (NilCheck (Convert (Addr {_} (SB)) _) _)
|
||||
// result: (Invalid)
|
||||
// match: (NilCheck ptr:(Convert (Addr {_} (SB)) _) _)
|
||||
// result: ptr
|
||||
for {
|
||||
if v_0.Op != OpConvert {
|
||||
ptr := v_0
|
||||
if ptr.Op != OpConvert {
|
||||
break
|
||||
}
|
||||
v_0_0 := v_0.Args[0]
|
||||
if v_0_0.Op != OpAddr {
|
||||
ptr_0 := ptr.Args[0]
|
||||
if ptr_0.Op != OpAddr {
|
||||
break
|
||||
}
|
||||
v_0_0_0 := v_0_0.Args[0]
|
||||
if v_0_0_0.Op != OpSB {
|
||||
ptr_0_0 := ptr_0.Args[0]
|
||||
if ptr_0_0.Op != OpSB {
|
||||
break
|
||||
}
|
||||
v.reset(OpInvalid)
|
||||
v.copyOf(ptr)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
||||
@@ -312,14 +312,21 @@ func schedule(f *Func) {
|
||||
}
|
||||
|
||||
// Remove SPanchored now that we've scheduled.
|
||||
// Also unlink nil checks now that ordering is assured
|
||||
// between the nil check and the uses of the nil-checked pointer.
|
||||
for _, b := range f.Blocks {
|
||||
for _, v := range b.Values {
|
||||
for i, a := range v.Args {
|
||||
if a.Op == OpSPanchored {
|
||||
if a.Op == OpSPanchored || opcodeTable[a.Op].nilCheck {
|
||||
v.SetArg(i, a.Args[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
for i, c := range b.ControlValues() {
|
||||
if c.Op == OpSPanchored || opcodeTable[c.Op].nilCheck {
|
||||
b.ReplaceControl(i, c.Args[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, b := range f.Blocks {
|
||||
i := 0
|
||||
@@ -332,6 +339,15 @@ func schedule(f *Func) {
|
||||
v.resetArgs()
|
||||
f.freeValue(v)
|
||||
} else {
|
||||
if opcodeTable[v.Op].nilCheck {
|
||||
if v.Uses != 0 {
|
||||
base.Fatalf("nilcheck still has %d uses", v.Uses)
|
||||
}
|
||||
// We can't delete the nil check, but we mark
|
||||
// it as having void type so regalloc won't
|
||||
// try to allocate a register for it.
|
||||
v.Type = types.TypeVoid
|
||||
}
|
||||
b.Values[i] = v
|
||||
i++
|
||||
}
|
||||
|
||||
@@ -552,7 +552,11 @@ func (v *Value) LackingPos() bool {
|
||||
// if its use count drops to 0.
|
||||
func (v *Value) removeable() bool {
|
||||
if v.Type.IsVoid() {
|
||||
// Void ops, like nil pointer checks, must stay.
|
||||
// Void ops (inline marks), must stay.
|
||||
return false
|
||||
}
|
||||
if opcodeTable[v.Op].nilCheck {
|
||||
// Nil pointer checks must stay.
|
||||
return false
|
||||
}
|
||||
if v.Type.IsMemory() {
|
||||
|
||||
@@ -1991,7 +1991,8 @@ func (s *state) stmt(n ir.Node) {
|
||||
case ir.OCHECKNIL:
|
||||
n := n.(*ir.UnaryExpr)
|
||||
p := s.expr(n.X)
|
||||
s.nilCheck(p)
|
||||
_ = s.nilCheck(p)
|
||||
// TODO: check that throwing away the nilcheck result is ok.
|
||||
|
||||
case ir.OINLMARK:
|
||||
n := n.(*ir.InlineMarkStmt)
|
||||
@@ -5621,18 +5622,20 @@ func (s *state) exprPtr(n ir.Node, bounded bool, lineno src.XPos) *ssa.Value {
|
||||
}
|
||||
return p
|
||||
}
|
||||
s.nilCheck(p)
|
||||
p = s.nilCheck(p)
|
||||
return p
|
||||
}
|
||||
|
||||
// nilCheck generates nil pointer checking code.
|
||||
// Used only for automatically inserted nil checks,
|
||||
// not for user code like 'x != nil'.
|
||||
func (s *state) nilCheck(ptr *ssa.Value) {
|
||||
// Returns a "definitely not nil" copy of x to ensure proper ordering
|
||||
// of the uses of the post-nilcheck pointer.
|
||||
func (s *state) nilCheck(ptr *ssa.Value) *ssa.Value {
|
||||
if base.Debug.DisableNil != 0 || s.curfn.NilCheckDisabled() {
|
||||
return
|
||||
return ptr
|
||||
}
|
||||
s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem())
|
||||
return s.newValue2(ssa.OpNilCheck, ptr.Type, ptr, s.mem())
|
||||
}
|
||||
|
||||
// boundsCheck generates bounds checking code. Checks if 0 <= idx <[=] len, branches to exit if not.
|
||||
@@ -5984,8 +5987,8 @@ func (s *state) slice(v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value)
|
||||
if !t.Elem().IsArray() {
|
||||
s.Fatalf("bad ptr to array in slice %v\n", t)
|
||||
}
|
||||
s.nilCheck(v)
|
||||
ptr = s.newValue1(ssa.OpCopy, types.NewPtr(t.Elem().Elem()), v)
|
||||
nv := s.nilCheck(v)
|
||||
ptr = s.newValue1(ssa.OpCopy, types.NewPtr(t.Elem().Elem()), nv)
|
||||
len = s.constInt(types.Types[types.TINT], t.Elem().NumElem())
|
||||
cap = len
|
||||
default:
|
||||
@@ -7083,8 +7086,21 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
|
||||
// for an empty block this will be used for its control
|
||||
// instruction. We won't use the actual liveness map on a
|
||||
// control instruction. Just mark it something that is
|
||||
// preemptible, unless this function is "all unsafe".
|
||||
s.pp.NextLive = objw.LivenessIndex{StackMapIndex: -1, IsUnsafePoint: liveness.IsUnsafe(f)}
|
||||
// preemptible, unless this function is "all unsafe", or
|
||||
// the empty block is in a write barrier.
|
||||
unsafe := liveness.IsUnsafe(f)
|
||||
if b.Kind == ssa.BlockPlain {
|
||||
// Empty blocks that are part of write barriers need
|
||||
// to have their control instructions marked unsafe.
|
||||
c := b.Succs[0].Block()
|
||||
for _, v := range c.Values {
|
||||
if v.Op == ssa.OpWBend {
|
||||
unsafe = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
s.pp.NextLive = objw.LivenessIndex{StackMapIndex: -1, IsUnsafePoint: unsafe}
|
||||
|
||||
if idx, ok := argLiveBlockMap[b.ID]; ok && idx != argLiveIdx {
|
||||
argLiveIdx = idx
|
||||
|
||||
@@ -71,3 +71,129 @@ func readUint32be(b []byte) uint64 {
|
||||
//go:noinline
|
||||
func nop() {
|
||||
}
|
||||
|
||||
type T32 struct {
|
||||
a, b uint32
|
||||
}
|
||||
|
||||
//go:noinline
|
||||
func (t *T32) bigEndianLoad() uint64 {
|
||||
return uint64(t.a)<<32 | uint64(t.b)
|
||||
}
|
||||
|
||||
//go:noinline
|
||||
func (t *T32) littleEndianLoad() uint64 {
|
||||
return uint64(t.a) | (uint64(t.b) << 32)
|
||||
}
|
||||
|
||||
//go:noinline
|
||||
func (t *T32) bigEndianStore(x uint64) {
|
||||
t.a = uint32(x >> 32)
|
||||
t.b = uint32(x)
|
||||
}
|
||||
|
||||
//go:noinline
|
||||
func (t *T32) littleEndianStore(x uint64) {
|
||||
t.a = uint32(x)
|
||||
t.b = uint32(x >> 32)
|
||||
}
|
||||
|
||||
type T16 struct {
|
||||
a, b uint16
|
||||
}
|
||||
|
||||
//go:noinline
|
||||
func (t *T16) bigEndianLoad() uint32 {
|
||||
return uint32(t.a)<<16 | uint32(t.b)
|
||||
}
|
||||
|
||||
//go:noinline
|
||||
func (t *T16) littleEndianLoad() uint32 {
|
||||
return uint32(t.a) | (uint32(t.b) << 16)
|
||||
}
|
||||
|
||||
//go:noinline
|
||||
func (t *T16) bigEndianStore(x uint32) {
|
||||
t.a = uint16(x >> 16)
|
||||
t.b = uint16(x)
|
||||
}
|
||||
|
||||
//go:noinline
|
||||
func (t *T16) littleEndianStore(x uint32) {
|
||||
t.a = uint16(x)
|
||||
t.b = uint16(x >> 16)
|
||||
}
|
||||
|
||||
type T8 struct {
|
||||
a, b uint8
|
||||
}
|
||||
|
||||
//go:noinline
|
||||
func (t *T8) bigEndianLoad() uint16 {
|
||||
return uint16(t.a)<<8 | uint16(t.b)
|
||||
}
|
||||
|
||||
//go:noinline
|
||||
func (t *T8) littleEndianLoad() uint16 {
|
||||
return uint16(t.a) | (uint16(t.b) << 8)
|
||||
}
|
||||
|
||||
//go:noinline
|
||||
func (t *T8) bigEndianStore(x uint16) {
|
||||
t.a = uint8(x >> 8)
|
||||
t.b = uint8(x)
|
||||
}
|
||||
|
||||
//go:noinline
|
||||
func (t *T8) littleEndianStore(x uint16) {
|
||||
t.a = uint8(x)
|
||||
t.b = uint8(x >> 8)
|
||||
}
|
||||
|
||||
func TestIssue64468(t *testing.T) {
|
||||
t32 := T32{1, 2}
|
||||
if got, want := t32.bigEndianLoad(), uint64(1<<32+2); got != want {
|
||||
t.Errorf("T32.bigEndianLoad got %x want %x\n", got, want)
|
||||
}
|
||||
if got, want := t32.littleEndianLoad(), uint64(1+2<<32); got != want {
|
||||
t.Errorf("T32.littleEndianLoad got %x want %x\n", got, want)
|
||||
}
|
||||
t16 := T16{1, 2}
|
||||
if got, want := t16.bigEndianLoad(), uint32(1<<16+2); got != want {
|
||||
t.Errorf("T16.bigEndianLoad got %x want %x\n", got, want)
|
||||
}
|
||||
if got, want := t16.littleEndianLoad(), uint32(1+2<<16); got != want {
|
||||
t.Errorf("T16.littleEndianLoad got %x want %x\n", got, want)
|
||||
}
|
||||
t8 := T8{1, 2}
|
||||
if got, want := t8.bigEndianLoad(), uint16(1<<8+2); got != want {
|
||||
t.Errorf("T8.bigEndianLoad got %x want %x\n", got, want)
|
||||
}
|
||||
if got, want := t8.littleEndianLoad(), uint16(1+2<<8); got != want {
|
||||
t.Errorf("T8.littleEndianLoad got %x want %x\n", got, want)
|
||||
}
|
||||
t32.bigEndianStore(1<<32 + 2)
|
||||
if got, want := t32, (T32{1, 2}); got != want {
|
||||
t.Errorf("T32.bigEndianStore got %x want %x\n", got, want)
|
||||
}
|
||||
t32.littleEndianStore(1<<32 + 2)
|
||||
if got, want := t32, (T32{2, 1}); got != want {
|
||||
t.Errorf("T32.littleEndianStore got %x want %x\n", got, want)
|
||||
}
|
||||
t16.bigEndianStore(1<<16 + 2)
|
||||
if got, want := t16, (T16{1, 2}); got != want {
|
||||
t.Errorf("T16.bigEndianStore got %x want %x\n", got, want)
|
||||
}
|
||||
t16.littleEndianStore(1<<16 + 2)
|
||||
if got, want := t16, (T16{2, 1}); got != want {
|
||||
t.Errorf("T16.littleEndianStore got %x want %x\n", got, want)
|
||||
}
|
||||
t8.bigEndianStore(1<<8 + 2)
|
||||
if got, want := t8, (T8{1, 2}); got != want {
|
||||
t.Errorf("T8.bigEndianStore got %x want %x\n", got, want)
|
||||
}
|
||||
t8.littleEndianStore(1<<8 + 2)
|
||||
if got, want := t8, (T8{2, 1}); got != want {
|
||||
t.Errorf("T8.littleEndianStore got %x want %x\n", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -94,40 +94,24 @@ func ClosureType(clo *ir.ClosureExpr) *types.Type {
|
||||
// and has one float64 argument and no results,
|
||||
// the generated code looks like:
|
||||
//
|
||||
// clos = &struct{.F uintptr; i *int; s *string}{func.1, &i, &s}
|
||||
// clos = &struct{F uintptr; X0 *int; X1 *string}{func.1, &i, &s}
|
||||
//
|
||||
// The use of the struct provides type information to the garbage
|
||||
// collector so that it can walk the closure. We could use (in this case)
|
||||
// [3]unsafe.Pointer instead, but that would leave the gc in the dark.
|
||||
// The information appears in the binary in the form of type descriptors;
|
||||
// the struct is unnamed so that closures in multiple packages with the
|
||||
// same struct type can share the descriptor.
|
||||
// collector so that it can walk the closure. We could use (in this
|
||||
// case) [3]unsafe.Pointer instead, but that would leave the gc in
|
||||
// the dark. The information appears in the binary in the form of
|
||||
// type descriptors; the struct is unnamed and uses exported field
|
||||
// names so that closures in multiple packages with the same struct
|
||||
// type can share the descriptor.
|
||||
|
||||
// Make sure the .F field is in the same package as the rest of the
|
||||
// fields. This deals with closures in instantiated functions, which are
|
||||
// compiled as if from the source package of the generic function.
|
||||
var pkg *types.Pkg
|
||||
if len(clo.Func.ClosureVars) == 0 {
|
||||
pkg = types.LocalPkg
|
||||
} else {
|
||||
for _, v := range clo.Func.ClosureVars {
|
||||
if pkg == nil {
|
||||
pkg = v.Sym().Pkg
|
||||
} else if pkg != v.Sym().Pkg {
|
||||
base.Fatalf("Closure variables from multiple packages: %+v", clo)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fields := []*types.Field{
|
||||
types.NewField(base.Pos, pkg.Lookup(".F"), types.Types[types.TUINTPTR]),
|
||||
}
|
||||
for _, v := range clo.Func.ClosureVars {
|
||||
fields := make([]*types.Field, 1+len(clo.Func.ClosureVars))
|
||||
fields[0] = types.NewField(base.AutogeneratedPos, types.LocalPkg.Lookup("F"), types.Types[types.TUINTPTR])
|
||||
for i, v := range clo.Func.ClosureVars {
|
||||
typ := v.Type()
|
||||
if !v.Byval() {
|
||||
typ = types.NewPtr(typ)
|
||||
}
|
||||
fields = append(fields, types.NewField(base.Pos, v.Sym(), typ))
|
||||
fields[1+i] = types.NewField(base.AutogeneratedPos, types.LocalPkg.LookupNum("X", i), typ)
|
||||
}
|
||||
typ := types.NewStruct(fields)
|
||||
typ.SetNoalg(true)
|
||||
|
||||
@@ -642,9 +642,6 @@ func fldconv(b *bytes.Buffer, f *Field, verb rune, mode fmtMode, visited map[*Ty
|
||||
name = fmt.Sprint(f.Nname)
|
||||
} else if verb == 'L' {
|
||||
name = s.Name
|
||||
if name == ".F" {
|
||||
name = "F" // Hack for toolstash -cmp.
|
||||
}
|
||||
if !IsExported(name) && mode != fmtTypeIDName {
|
||||
name = sconv(s, 0, mode) // qualify non-exported names (used on structs, not on funarg)
|
||||
}
|
||||
|
||||
@@ -110,11 +110,11 @@ type Config struct {
|
||||
// type checker will initialize this field with a newly created context.
|
||||
Context *Context
|
||||
|
||||
// GoVersion describes the accepted Go language version. The string
|
||||
// must follow the format "go%d.%d" (e.g. "go1.12") or ist must be
|
||||
// empty; an empty string disables Go language version checks.
|
||||
// If the format is invalid, invoking the type checker will cause a
|
||||
// panic.
|
||||
// GoVersion describes the accepted Go language version. The string must
|
||||
// start with a prefix of the form "go%d.%d" (e.g. "go1.20", "go1.21rc1", or
|
||||
// "go1.21.0") or it must be empty; an empty string disables Go language
|
||||
// version checks. If the format is invalid, invoking the type checker will
|
||||
// result in an error.
|
||||
GoVersion string
|
||||
|
||||
// If IgnoreFuncBodies is set, function bodies are not
|
||||
|
||||
@@ -2070,6 +2070,29 @@ func TestIdenticalUnions(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssue61737(t *testing.T) {
|
||||
// This test verifies that it is possible to construct invalid interfaces
|
||||
// containing duplicate methods using the go/types API.
|
||||
//
|
||||
// It must be possible for importers to construct such invalid interfaces.
|
||||
// Previously, this panicked.
|
||||
|
||||
sig1 := NewSignatureType(nil, nil, nil, NewTuple(NewParam(nopos, nil, "", Typ[Int])), nil, false)
|
||||
sig2 := NewSignatureType(nil, nil, nil, NewTuple(NewParam(nopos, nil, "", Typ[String])), nil, false)
|
||||
|
||||
methods := []*Func{
|
||||
NewFunc(nopos, nil, "M", sig1),
|
||||
NewFunc(nopos, nil, "M", sig2),
|
||||
}
|
||||
|
||||
embeddedMethods := []*Func{
|
||||
NewFunc(nopos, nil, "M", sig2),
|
||||
}
|
||||
embedded := NewInterfaceType(embeddedMethods, nil)
|
||||
iface := NewInterfaceType(methods, []Type{embedded})
|
||||
iface.NumMethods() // unlike go/types, there is no Complete() method, so we complete implicitly
|
||||
}
|
||||
|
||||
func TestIssue15305(t *testing.T) {
|
||||
const src = "package p; func f() int16; var _ = f(undef)"
|
||||
f := mustParse(src)
|
||||
|
||||
@@ -576,6 +576,11 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
|
||||
// If nargs == 1, make sure x.mode is either a value or a constant.
|
||||
if x.mode != constant_ {
|
||||
x.mode = value
|
||||
// A value must not be untyped.
|
||||
check.assignment(x, &emptyInterface, "argument to "+bin.name)
|
||||
if x.mode == invalid {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Use the final type computed above for all arguments.
|
||||
|
||||
@@ -569,6 +569,14 @@ func (check *Checker) arguments(call *syntax.CallExpr, sig *Signature, targs []T
|
||||
for i, arg := range args {
|
||||
// generic arguments cannot have a defined (*Named) type - no need for underlying type below
|
||||
if asig, _ := arg.typ.(*Signature); asig != nil && asig.TypeParams().Len() > 0 {
|
||||
// The argument type is a generic function signature. This type is
|
||||
// pointer-identical with (it's copied from) the type of the generic
|
||||
// function argument and thus the function object.
|
||||
// Before we change the type (type parameter renaming, below), make
|
||||
// a clone of it as otherwise we implicitly modify the object's type
|
||||
// (go.dev/issues/63260).
|
||||
clone := *asig
|
||||
asig = &clone
|
||||
// Rename type parameters for cases like f(g, g); this gives each
|
||||
// generic function argument a unique type identity (go.dev/issues/59956).
|
||||
// TODO(gri) Consider only doing this if a function argument appears
|
||||
@@ -610,20 +618,17 @@ func (check *Checker) arguments(call *syntax.CallExpr, sig *Signature, targs []T
|
||||
return // error already reported
|
||||
}
|
||||
|
||||
// compute result signature: instantiate if needed
|
||||
rsig = sig
|
||||
// update result signature: instantiate if needed
|
||||
if n > 0 {
|
||||
rsig = check.instantiateSignature(call.Pos(), call.Fun, sig, targs[:n], xlist)
|
||||
}
|
||||
|
||||
// Optimization: Only if the callee's parameter list was adjusted do we need to
|
||||
// compute it from the adjusted list; otherwise we can simply use the result
|
||||
// signature's parameter list. We only need the n type parameters and arguments
|
||||
// of the callee.
|
||||
if n > 0 && adjusted {
|
||||
sigParams = check.subst(call.Pos(), sigParams, makeSubstMap(tparams[:n], targs[:n]), nil, check.context()).(*Tuple)
|
||||
} else {
|
||||
sigParams = rsig.params
|
||||
// If the callee's parameter list was adjusted we need to update (instantiate)
|
||||
// it separately. Otherwise we can simply use the result signature's parameter
|
||||
// list.
|
||||
if adjusted {
|
||||
sigParams = check.subst(call.Pos(), sigParams, makeSubstMap(tparams[:n], targs[:n]), nil, check.context()).(*Tuple)
|
||||
} else {
|
||||
sigParams = rsig.params
|
||||
}
|
||||
}
|
||||
|
||||
// compute argument signatures: instantiate if needed
|
||||
|
||||
@@ -96,7 +96,7 @@ func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type,
|
||||
// Unify parameter and argument types for generic parameters with typed arguments
|
||||
// and collect the indices of generic parameters with untyped arguments.
|
||||
// Terminology: generic parameter = function parameter with a type-parameterized type
|
||||
u := newUnifier(tparams, targs)
|
||||
u := newUnifier(tparams, targs, check.allowVersion(check.pkg, pos, go1_21))
|
||||
|
||||
errorf := func(kind string, tpar, targ Type, arg *operand) {
|
||||
// provide a better error message if we can
|
||||
|
||||
@@ -900,3 +900,84 @@ func _cgoCheckResult(interface{})
|
||||
*boolFieldAddr(cfg, "go115UsesCgo") = true
|
||||
})
|
||||
}
|
||||
|
||||
func TestIssue61931(t *testing.T) {
|
||||
const src = `
|
||||
package p
|
||||
|
||||
func A(func(any), ...any) {}
|
||||
func B[T any](T) {}
|
||||
|
||||
func _() {
|
||||
A(B, nil // syntax error: missing ',' before newline in argument list
|
||||
}
|
||||
`
|
||||
f, err := syntax.Parse(syntax.NewFileBase(pkgName(src)), strings.NewReader(src), func(error) {}, nil, 0)
|
||||
if err == nil {
|
||||
t.Fatal("expected syntax error")
|
||||
}
|
||||
|
||||
var conf Config
|
||||
conf.Check(f.PkgName.Value, []*syntax.File{f}, nil) // must not panic
|
||||
}
|
||||
|
||||
func TestIssue63260(t *testing.T) {
|
||||
const src = `
|
||||
package p
|
||||
|
||||
func _() {
|
||||
use(f[*string])
|
||||
}
|
||||
|
||||
func use(func()) {}
|
||||
|
||||
func f[I *T, T any]() {
|
||||
var v T
|
||||
_ = v
|
||||
}`
|
||||
|
||||
info := Info{
|
||||
Defs: make(map[*syntax.Name]Object),
|
||||
}
|
||||
pkg := mustTypecheck(src, nil, &info)
|
||||
|
||||
// get type parameter T in signature of f
|
||||
T := pkg.Scope().Lookup("f").Type().(*Signature).TypeParams().At(1)
|
||||
if T.Obj().Name() != "T" {
|
||||
t.Fatalf("got type parameter %s, want T", T)
|
||||
}
|
||||
|
||||
// get type of variable v in body of f
|
||||
var v Object
|
||||
for name, obj := range info.Defs {
|
||||
if name.Value == "v" {
|
||||
v = obj
|
||||
break
|
||||
}
|
||||
}
|
||||
if v == nil {
|
||||
t.Fatal("variable v not found")
|
||||
}
|
||||
|
||||
// type of v and T must be pointer-identical
|
||||
if v.Type() != T {
|
||||
t.Fatalf("types of v and T are not pointer-identical: %p != %p", v.Type().(*TypeParam), T)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssue64759(t *testing.T) {
|
||||
const src = `
|
||||
//go:build go1.18
|
||||
package p
|
||||
|
||||
func f[S ~[]E, E any](S) {}
|
||||
|
||||
func _() {
|
||||
f([]string{})
|
||||
}
|
||||
`
|
||||
// Per the go:build directive, the source must typecheck
|
||||
// even though the (module) Go version is set to go1.17.
|
||||
conf := Config{GoVersion: "go1.17"}
|
||||
mustTypecheck(src, &conf, nil)
|
||||
}
|
||||
|
||||
@@ -169,6 +169,7 @@ func (subst *subster) typ(typ Type) Type {
|
||||
if mcopied || ecopied {
|
||||
iface := subst.check.newInterface()
|
||||
iface.embeddeds = embeddeds
|
||||
iface.embedPos = t.embedPos
|
||||
iface.implicit = t.implicit
|
||||
iface.complete = t.complete
|
||||
// If we've changed the interface type, we may need to replace its
|
||||
|
||||
@@ -6,7 +6,6 @@ package types2
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/syntax"
|
||||
"fmt"
|
||||
. "internal/types/errors"
|
||||
"sort"
|
||||
"strings"
|
||||
@@ -212,7 +211,6 @@ func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_
|
||||
// we can get rid of the mpos map below and simply use the cloned method's
|
||||
// position.
|
||||
|
||||
var todo []*Func
|
||||
var seen objset
|
||||
var allMethods []*Func
|
||||
mpos := make(map[*Func]syntax.Pos) // method specification or method embedding position, for good error messages
|
||||
@@ -222,36 +220,30 @@ func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_
|
||||
allMethods = append(allMethods, m)
|
||||
mpos[m] = pos
|
||||
case explicit:
|
||||
if check == nil {
|
||||
panic(fmt.Sprintf("%s: duplicate method %s", m.pos, m.name))
|
||||
if check != nil {
|
||||
var err error_
|
||||
err.code = DuplicateDecl
|
||||
err.errorf(pos, "duplicate method %s", m.name)
|
||||
err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name)
|
||||
check.report(&err)
|
||||
}
|
||||
// check != nil
|
||||
var err error_
|
||||
err.code = DuplicateDecl
|
||||
err.errorf(pos, "duplicate method %s", m.name)
|
||||
err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name)
|
||||
check.report(&err)
|
||||
default:
|
||||
// We have a duplicate method name in an embedded (not explicitly declared) method.
|
||||
// Check method signatures after all types are computed (go.dev/issue/33656).
|
||||
// If we're pre-go1.14 (overlapping embeddings are not permitted), report that
|
||||
// error here as well (even though we could do it eagerly) because it's the same
|
||||
// error message.
|
||||
if check == nil {
|
||||
// check method signatures after all locally embedded interfaces are computed
|
||||
todo = append(todo, m, other.(*Func))
|
||||
break
|
||||
if check != nil {
|
||||
check.later(func() {
|
||||
if !check.allowVersion(m.pkg, pos, go1_14) || !Identical(m.typ, other.Type()) {
|
||||
var err error_
|
||||
err.code = DuplicateDecl
|
||||
err.errorf(pos, "duplicate method %s", m.name)
|
||||
err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name)
|
||||
check.report(&err)
|
||||
}
|
||||
}).describef(pos, "duplicate method check for %s", m.name)
|
||||
}
|
||||
// check != nil
|
||||
check.later(func() {
|
||||
if !check.allowVersion(m.pkg, pos, go1_14) || !Identical(m.typ, other.Type()) {
|
||||
var err error_
|
||||
err.code = DuplicateDecl
|
||||
err.errorf(pos, "duplicate method %s", m.name)
|
||||
err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name)
|
||||
check.report(&err)
|
||||
}
|
||||
}).describef(pos, "duplicate method check for %s", m.name)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -263,9 +255,8 @@ func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_
|
||||
allTerms := allTermlist
|
||||
allComparable := false
|
||||
for i, typ := range ityp.embeddeds {
|
||||
// The embedding position is nil for imported interfaces
|
||||
// and also for interface copies after substitution (but
|
||||
// in that case we don't need to report errors again).
|
||||
// The embedding position is nil for imported interfaces.
|
||||
// We don't need to do version checks in those cases.
|
||||
var pos syntax.Pos // embedding position
|
||||
if ityp.embedPos != nil {
|
||||
pos = (*ityp.embedPos)[i]
|
||||
@@ -278,7 +269,7 @@ func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_
|
||||
assert(!isTypeParam(typ))
|
||||
tset := computeInterfaceTypeSet(check, pos, u)
|
||||
// If typ is local, an error was already reported where typ is specified/defined.
|
||||
if check != nil && check.isImportedConstraint(typ) && !check.verifyVersionf(pos, go1_18, "embedding constraint interface %s", typ) {
|
||||
if pos.IsKnown() && check != nil && check.isImportedConstraint(typ) && !check.verifyVersionf(pos, go1_18, "embedding constraint interface %s", typ) {
|
||||
continue
|
||||
}
|
||||
comparable = tset.comparable
|
||||
@@ -287,7 +278,7 @@ func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_
|
||||
}
|
||||
terms = tset.terms
|
||||
case *Union:
|
||||
if check != nil && !check.verifyVersionf(pos, go1_18, "embedding interface element %s", u) {
|
||||
if pos.IsKnown() && check != nil && !check.verifyVersionf(pos, go1_18, "embedding interface element %s", u) {
|
||||
continue
|
||||
}
|
||||
tset := computeUnionTypeSet(check, unionSets, pos, u)
|
||||
@@ -301,7 +292,7 @@ func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_
|
||||
if u == Typ[Invalid] {
|
||||
continue
|
||||
}
|
||||
if check != nil && !check.verifyVersionf(pos, go1_18, "embedding non-interface type %s", typ) {
|
||||
if pos.IsKnown() && check != nil && !check.verifyVersionf(pos, go1_18, "embedding non-interface type %s", typ) {
|
||||
continue
|
||||
}
|
||||
terms = termlist{{false, typ}}
|
||||
@@ -312,16 +303,6 @@ func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_
|
||||
// separately. Here we only need to intersect the term lists and comparable bits.
|
||||
allTerms, allComparable = intersectTermLists(allTerms, allComparable, terms, comparable)
|
||||
}
|
||||
ityp.embedPos = nil // not needed anymore (errors have been reported)
|
||||
|
||||
// process todo's (this only happens if check == nil)
|
||||
for i := 0; i < len(todo); i += 2 {
|
||||
m := todo[i]
|
||||
other := todo[i+1]
|
||||
if !Identical(m.typ, other.typ) {
|
||||
panic(fmt.Sprintf("%s: duplicate method %s", m.pos, m.name))
|
||||
}
|
||||
}
|
||||
|
||||
ityp.tset.comparable = allComparable
|
||||
if len(allMethods) != 0 {
|
||||
|
||||
@@ -53,11 +53,6 @@ const (
|
||||
// the core types, if any, of non-local (unbound) type parameters.
|
||||
enableCoreTypeUnification = true
|
||||
|
||||
// If enableInterfaceInference is set, type inference uses
|
||||
// shared methods for improved type inference involving
|
||||
// interfaces.
|
||||
enableInterfaceInference = true
|
||||
|
||||
// If traceInference is set, unification will print a trace of its operation.
|
||||
// Interpretation of trace:
|
||||
// x ≡ y attempt to unify types x and y
|
||||
@@ -81,15 +76,16 @@ type unifier struct {
|
||||
// that inferring the type for a given type parameter P will
|
||||
// automatically infer the same type for all other parameters
|
||||
// unified (joined) with P.
|
||||
handles map[*TypeParam]*Type
|
||||
depth int // recursion depth during unification
|
||||
handles map[*TypeParam]*Type
|
||||
depth int // recursion depth during unification
|
||||
enableInterfaceInference bool // use shared methods for better inference
|
||||
}
|
||||
|
||||
// newUnifier returns a new unifier initialized with the given type parameter
|
||||
// and corresponding type argument lists. The type argument list may be shorter
|
||||
// than the type parameter list, and it may contain nil types. Matching type
|
||||
// parameters and arguments must have the same index.
|
||||
func newUnifier(tparams []*TypeParam, targs []Type) *unifier {
|
||||
func newUnifier(tparams []*TypeParam, targs []Type, enableInterfaceInference bool) *unifier {
|
||||
assert(len(tparams) >= len(targs))
|
||||
handles := make(map[*TypeParam]*Type, len(tparams))
|
||||
// Allocate all handles up-front: in a correct program, all type parameters
|
||||
@@ -103,7 +99,7 @@ func newUnifier(tparams []*TypeParam, targs []Type) *unifier {
|
||||
}
|
||||
handles[x] = &t
|
||||
}
|
||||
return &unifier{handles, 0}
|
||||
return &unifier{handles, 0, enableInterfaceInference}
|
||||
}
|
||||
|
||||
// unifyMode controls the behavior of the unifier.
|
||||
@@ -339,7 +335,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
|
||||
// we will fail at function instantiation or argument assignment time.
|
||||
//
|
||||
// If we have at least one defined type, there is one in y.
|
||||
if ny, _ := y.(*Named); mode&exact == 0 && ny != nil && isTypeLit(x) && !(enableInterfaceInference && IsInterface(x)) {
|
||||
if ny, _ := y.(*Named); mode&exact == 0 && ny != nil && isTypeLit(x) && !(u.enableInterfaceInference && IsInterface(x)) {
|
||||
if traceInference {
|
||||
u.tracef("%s ≡ under %s", x, ny)
|
||||
}
|
||||
@@ -405,18 +401,40 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
|
||||
// Therefore, we must fail unification (go.dev/issue/60933).
|
||||
return false
|
||||
}
|
||||
// If y is a defined type, make sure we record that type
|
||||
// for type parameter x, which may have until now only
|
||||
// recorded an underlying type (go.dev/issue/43056).
|
||||
// Either both types are interfaces, or neither type is.
|
||||
// If both are interfaces, they have the same methods.
|
||||
// If we have inexact unification and one of x or y is a defined type, select the
|
||||
// defined type. This ensures that in a series of types, all matching against the
|
||||
// same type parameter, we infer a defined type if there is one, independent of
|
||||
// order. Type inference or assignment may fail, which is ok.
|
||||
// Selecting a defined type, if any, ensures that we don't lose the type name;
|
||||
// and since we have inexact unification, a value of equally named or matching
|
||||
// undefined type remains assignable (go.dev/issue/43056).
|
||||
//
|
||||
// Note: Changing the recorded type for a type parameter to
|
||||
// a defined type is only ok when unification is inexact.
|
||||
// But in exact unification, if we have a match, x and y must
|
||||
// be identical, so changing the recorded type for x is a no-op.
|
||||
if yn {
|
||||
u.set(px, y)
|
||||
// Similarly, if we have inexact unification and there are no defined types but
|
||||
// channel types, select a directed channel, if any. This ensures that in a series
|
||||
// of unnamed types, all matching against the same type parameter, we infer the
|
||||
// directed channel if there is one, independent of order.
|
||||
// Selecting a directional channel, if any, ensures that a value of another
|
||||
// inexactly unifying channel type remains assignable (go.dev/issue/62157).
|
||||
//
|
||||
// If we have multiple defined channel types, they are either identical or we
|
||||
// have assignment conflicts, so we can ignore directionality in this case.
|
||||
//
|
||||
// If we have defined and literal channel types, a defined type wins to avoid
|
||||
// order dependencies.
|
||||
if mode&exact == 0 {
|
||||
switch {
|
||||
case xn:
|
||||
// x is a defined type: nothing to do.
|
||||
case yn:
|
||||
// x is not a defined type and y is a defined type: select y.
|
||||
u.set(px, y)
|
||||
default:
|
||||
// Neither x nor y are defined types.
|
||||
if yc, _ := under(y).(*Chan); yc != nil && yc.dir != SendRecv {
|
||||
// y is a directed channel type: select y.
|
||||
u.set(px, y)
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
@@ -437,12 +455,12 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
|
||||
emode |= exact
|
||||
}
|
||||
|
||||
// If EnableInterfaceInference is set and we don't require exact unification,
|
||||
// If u.EnableInterfaceInference is set and we don't require exact unification,
|
||||
// if both types are interfaces, one interface must have a subset of the
|
||||
// methods of the other and corresponding method signatures must unify.
|
||||
// If only one type is an interface, all its methods must be present in the
|
||||
// other type and corresponding method signatures must unify.
|
||||
if enableInterfaceInference && mode&exact == 0 {
|
||||
if u.enableInterfaceInference && mode&exact == 0 {
|
||||
// One or both interfaces may be defined types.
|
||||
// Look under the name, but not under type parameters (go.dev/issue/60564).
|
||||
xi := asInterface(x)
|
||||
@@ -505,7 +523,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
|
||||
}
|
||||
// All xmethods must exist in ymethods and corresponding signatures must unify.
|
||||
for _, xm := range xmethods {
|
||||
if ym := ymap[xm.Id()]; ym == nil || !u.nify(xm.typ, ym.typ, emode, p) {
|
||||
if ym := ymap[xm.Id()]; ym == nil || !u.nify(xm.typ, ym.typ, exact, p) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -526,7 +544,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
|
||||
xmethods := xi.typeSet().methods
|
||||
for _, xm := range xmethods {
|
||||
obj, _, _ := LookupFieldOrMethod(y, false, xm.pkg, xm.name)
|
||||
if ym, _ := obj.(*Func); ym == nil || !u.nify(xm.typ, ym.typ, emode, p) {
|
||||
if ym, _ := obj.(*Func); ym == nil || !u.nify(xm.typ, ym.typ, exact, p) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -632,7 +650,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
|
||||
}
|
||||
|
||||
case *Interface:
|
||||
assert(!enableInterfaceInference || mode&exact != 0) // handled before this switch
|
||||
assert(!u.enableInterfaceInference || mode&exact != 0) // handled before this switch
|
||||
|
||||
// Two interface types unify if they have the same set of methods with
|
||||
// the same names, and corresponding function types unify.
|
||||
@@ -685,7 +703,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
|
||||
}
|
||||
for i, f := range a {
|
||||
g := b[i]
|
||||
if f.Id() != g.Id() || !u.nify(f.typ, g.typ, emode, q) {
|
||||
if f.Id() != g.Id() || !u.nify(f.typ, g.typ, exact, q) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -255,7 +255,10 @@ func walkLenCap(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
|
||||
return mkcall("countrunes", n.Type(), init, typecheck.Conv(n.X.(*ir.ConvExpr).X, types.Types[types.TSTRING]))
|
||||
}
|
||||
if isByteCount(n) {
|
||||
_, len := backingArrayPtrLen(cheapExpr(n.X.(*ir.ConvExpr).X, init))
|
||||
conv := n.X.(*ir.ConvExpr)
|
||||
walkStmtList(conv.Init())
|
||||
init.Append(ir.TakeInit(conv)...)
|
||||
_, len := backingArrayPtrLen(cheapExpr(conv.X, init))
|
||||
return len
|
||||
}
|
||||
|
||||
|
||||
@@ -278,8 +278,10 @@ func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) {
|
||||
} else {
|
||||
ptr.SetType(n.Type().Elem().PtrTo())
|
||||
}
|
||||
ptr.SetTypecheck(1)
|
||||
length = ir.NewUnaryExpr(base.Pos, ir.OLEN, n)
|
||||
length.SetType(types.Types[types.TINT])
|
||||
length.SetTypecheck(1)
|
||||
return ptr, length
|
||||
}
|
||||
|
||||
|
||||
30
src/cmd/dist/test.go
vendored
30
src/cmd/dist/test.go
vendored
@@ -91,6 +91,29 @@ type work struct {
|
||||
end chan bool
|
||||
}
|
||||
|
||||
// printSkip prints a skip message for all of work.
|
||||
func (w *work) printSkip(t *tester, msg string) {
|
||||
if t.json {
|
||||
type event struct {
|
||||
Time time.Time
|
||||
Action string
|
||||
Package string
|
||||
Output string `json:",omitempty"`
|
||||
}
|
||||
enc := json.NewEncoder(&w.out)
|
||||
ev := event{Time: time.Now(), Package: w.dt.name, Action: "start"}
|
||||
enc.Encode(ev)
|
||||
ev.Action = "output"
|
||||
ev.Output = msg
|
||||
enc.Encode(ev)
|
||||
ev.Action = "skip"
|
||||
ev.Output = ""
|
||||
enc.Encode(ev)
|
||||
return
|
||||
}
|
||||
fmt.Fprintln(&w.out, msg)
|
||||
}
|
||||
|
||||
// A distTest is a test run by dist test.
|
||||
// Each test has a unique name and belongs to a group (heading)
|
||||
type distTest struct {
|
||||
@@ -405,6 +428,9 @@ func (opts *goTest) buildArgs(t *tester) (build, run, pkgs, testFlags []string,
|
||||
if opts.timeout != 0 {
|
||||
d := opts.timeout * time.Duration(t.timeoutScale)
|
||||
run = append(run, "-timeout="+d.String())
|
||||
} else if t.timeoutScale != 1 {
|
||||
const goTestDefaultTimeout = 10 * time.Minute // Default value of go test -timeout flag.
|
||||
run = append(run, "-timeout="+(goTestDefaultTimeout*time.Duration(t.timeoutScale)).String())
|
||||
}
|
||||
if opts.short || t.short {
|
||||
run = append(run, "-short")
|
||||
@@ -1235,7 +1261,7 @@ func (t *tester) runPending(nextTest *distTest) {
|
||||
go func(w *work) {
|
||||
if !<-w.start {
|
||||
timelog("skip", w.dt.name)
|
||||
w.out.WriteString("skipped due to earlier error\n")
|
||||
w.printSkip(t, "skipped due to earlier error")
|
||||
} else {
|
||||
timelog("start", w.dt.name)
|
||||
w.err = w.cmd.Run()
|
||||
@@ -1246,7 +1272,7 @@ func (t *tester) runPending(nextTest *distTest) {
|
||||
if isUnsupportedVMASize(w) {
|
||||
timelog("skip", w.dt.name)
|
||||
w.out.Reset()
|
||||
w.out.WriteString("skipped due to unsupported VMA\n")
|
||||
w.printSkip(t, "skipped due to unsupported VMA")
|
||||
w.err = nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ type fileInfo struct {
|
||||
func (i fileInfo) Name() string { return path.Base(i.f.Name) }
|
||||
func (i fileInfo) ModTime() time.Time { return i.f.Time }
|
||||
func (i fileInfo) Mode() fs.FileMode { return i.f.Mode }
|
||||
func (i fileInfo) IsDir() bool { return false }
|
||||
func (i fileInfo) IsDir() bool { return i.f.Mode&fs.ModeDir != 0 }
|
||||
func (i fileInfo) Size() int64 { return i.f.Size }
|
||||
func (i fileInfo) Sys() any { return nil }
|
||||
|
||||
|
||||
@@ -329,8 +329,47 @@ func writeTgz(name string, a *Archive) {
|
||||
|
||||
zw := check(gzip.NewWriterLevel(out, gzip.BestCompression))
|
||||
tw := tar.NewWriter(zw)
|
||||
|
||||
// Find the mode and mtime to use for directory entries,
|
||||
// based on the mode and mtime of the first file we see.
|
||||
// We know that modes and mtimes are uniform across the archive.
|
||||
var dirMode fs.FileMode
|
||||
var mtime time.Time
|
||||
for _, f := range a.Files {
|
||||
dirMode = fs.ModeDir | f.Mode | (f.Mode&0444)>>2 // copy r bits down to x bits
|
||||
mtime = f.Time
|
||||
break
|
||||
}
|
||||
|
||||
// mkdirAll ensures that the tar file contains directory
|
||||
// entries for dir and all its parents. Some programs reading
|
||||
// these tar files expect that. See go.dev/issue/61862.
|
||||
haveDir := map[string]bool{".": true}
|
||||
var mkdirAll func(string)
|
||||
mkdirAll = func(dir string) {
|
||||
if dir == "/" {
|
||||
panic("mkdirAll /")
|
||||
}
|
||||
if haveDir[dir] {
|
||||
return
|
||||
}
|
||||
haveDir[dir] = true
|
||||
mkdirAll(path.Dir(dir))
|
||||
df := &File{
|
||||
Name: dir + "/",
|
||||
Time: mtime,
|
||||
Mode: dirMode,
|
||||
}
|
||||
h := check(tar.FileInfoHeader(df.Info(), ""))
|
||||
h.Name = dir + "/"
|
||||
if err := tw.WriteHeader(h); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, f = range a.Files {
|
||||
h := check(tar.FileInfoHeader(f.Info(), ""))
|
||||
mkdirAll(path.Dir(f.Name))
|
||||
h.Name = f.Name
|
||||
if err := tw.WriteHeader(h); err != nil {
|
||||
panic(err)
|
||||
|
||||
@@ -490,25 +490,43 @@ func findGOROOT(env string) string {
|
||||
// depend on the executable's location.
|
||||
return def
|
||||
}
|
||||
|
||||
// canonical returns a directory path that represents
|
||||
// the same directory as dir,
|
||||
// preferring the spelling in def if the two are the same.
|
||||
canonical := func(dir string) string {
|
||||
if isSameDir(def, dir) {
|
||||
return def
|
||||
}
|
||||
return dir
|
||||
}
|
||||
|
||||
exe, err := os.Executable()
|
||||
if err == nil {
|
||||
exe, err = filepath.Abs(exe)
|
||||
if err == nil {
|
||||
// cmd/go may be installed in GOROOT/bin or GOROOT/bin/GOOS_GOARCH,
|
||||
// depending on whether it was cross-compiled with a different
|
||||
// GOHOSTOS (see https://go.dev/issue/62119). Try both.
|
||||
if dir := filepath.Join(exe, "../.."); isGOROOT(dir) {
|
||||
// If def (runtime.GOROOT()) and dir are the same
|
||||
// directory, prefer the spelling used in def.
|
||||
if isSameDir(def, dir) {
|
||||
return def
|
||||
}
|
||||
return dir
|
||||
return canonical(dir)
|
||||
}
|
||||
if dir := filepath.Join(exe, "../../.."); isGOROOT(dir) {
|
||||
return canonical(dir)
|
||||
}
|
||||
|
||||
// Depending on what was passed on the command line, it is possible
|
||||
// that os.Executable is a symlink (like /usr/local/bin/go) referring
|
||||
// to a binary installed in a real GOROOT elsewhere
|
||||
// (like /usr/lib/go/bin/go).
|
||||
// Try to find that GOROOT by resolving the symlinks.
|
||||
exe, err = filepath.EvalSymlinks(exe)
|
||||
if err == nil {
|
||||
if dir := filepath.Join(exe, "../.."); isGOROOT(dir) {
|
||||
if isSameDir(def, dir) {
|
||||
return def
|
||||
}
|
||||
return dir
|
||||
return canonical(dir)
|
||||
}
|
||||
if dir := filepath.Join(exe, "../../.."); isGOROOT(dir) {
|
||||
return canonical(dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -181,6 +181,8 @@ func init() {
|
||||
}
|
||||
|
||||
func runGenerate(ctx context.Context, cmd *base.Command, args []string) {
|
||||
modload.InitWorkfile()
|
||||
|
||||
if generateRunFlag != "" {
|
||||
var err error
|
||||
generateRunRE, err = regexp.Compile(generateRunFlag)
|
||||
|
||||
@@ -22,6 +22,13 @@ import (
|
||||
// FromToolchain("go1.2.3-bigcorp") == "1.2.3"
|
||||
// FromToolchain("invalid") == ""
|
||||
func FromToolchain(name string) string {
|
||||
if strings.ContainsAny(name, "\\/") {
|
||||
// The suffix must not include a path separator, since that would cause
|
||||
// exec.LookPath to resolve it from a relative directory instead of from
|
||||
// $PATH.
|
||||
return ""
|
||||
}
|
||||
|
||||
var v string
|
||||
if strings.HasPrefix(name, "go") {
|
||||
v = name[2:]
|
||||
|
||||
@@ -959,7 +959,10 @@ func collectDepsErrors(p *load.Package) {
|
||||
if len(stkj) != 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
return p.DepsErrors[i].Err.Error() < p.DepsErrors[j].Err.Error()
|
||||
} else if len(stkj) == 0 {
|
||||
return false
|
||||
}
|
||||
pathi, pathj := stki[len(stki)-1], stkj[len(stkj)-1]
|
||||
return pathi < pathj
|
||||
|
||||
@@ -473,6 +473,7 @@ func recompileForTest(pmain, preal, ptest, pxtest *Package) *PackageError {
|
||||
p.Target = ""
|
||||
p.Internal.BuildInfo = nil
|
||||
p.Internal.ForceLibrary = true
|
||||
p.Internal.PGOProfile = preal.Internal.PGOProfile
|
||||
}
|
||||
|
||||
// Update p.Internal.Imports to use test copies.
|
||||
@@ -496,6 +497,11 @@ func recompileForTest(pmain, preal, ptest, pxtest *Package) *PackageError {
|
||||
if p.Name == "main" && p != pmain && p != ptest {
|
||||
split()
|
||||
}
|
||||
// Split and attach PGO information to test dependencies if preal
|
||||
// is built with PGO.
|
||||
if preal.Internal.PGOProfile != "" && p.Internal.PGOProfile == "" {
|
||||
split()
|
||||
}
|
||||
}
|
||||
|
||||
// Do search to find cycle.
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"errors"
|
||||
"os"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"cmd/go/internal/base"
|
||||
"cmd/go/internal/cfg"
|
||||
@@ -17,6 +18,7 @@ import (
|
||||
"cmd/go/internal/modfetch"
|
||||
"cmd/go/internal/modfetch/codehost"
|
||||
"cmd/go/internal/modload"
|
||||
"cmd/go/internal/toolchain"
|
||||
|
||||
"golang.org/x/mod/module"
|
||||
)
|
||||
@@ -153,7 +155,10 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) {
|
||||
// 'go mod graph', and similar commands.
|
||||
_, err := modload.LoadModGraph(ctx, "")
|
||||
if err != nil {
|
||||
base.Fatal(err)
|
||||
// TODO(#64008): call base.Fatalf instead of toolchain.SwitchOrFatal
|
||||
// here, since we can only reach this point with an outdated toolchain
|
||||
// if the go.mod file is inconsistent.
|
||||
toolchain.SwitchOrFatal(ctx, err)
|
||||
}
|
||||
|
||||
for _, m := range modFile.Require {
|
||||
@@ -194,8 +199,26 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) {
|
||||
// from the resulting TooNewError), all before we try the actual full download
|
||||
// of each module.
|
||||
//
|
||||
// For now, we just let it fail: the user can explicitly set GOTOOLCHAIN
|
||||
// and retry if they want to.
|
||||
// For now, we go ahead and try all the downloads and collect the errors, and
|
||||
// if any download failed due to a TooNewError, we switch toolchains and try
|
||||
// again. Any downloads that already succeeded will still be in cache.
|
||||
// That won't give optimal concurrency (we'll do two batches of concurrent
|
||||
// downloads instead of all in one batch), and it might add a little overhead
|
||||
// to look up the downloads from the first batch in the module cache when
|
||||
// we see them again in the second batch. On the other hand, it's way simpler
|
||||
// to implement, and not really any more expensive if the user is requesting
|
||||
// no explicit arguments (their go.mod file should already list an appropriate
|
||||
// toolchain version) or only one module (as is used by the Go Module Proxy).
|
||||
|
||||
if infosErr != nil {
|
||||
var sw toolchain.Switcher
|
||||
sw.Error(infosErr)
|
||||
if sw.NeedSwitch() {
|
||||
sw.Switch(ctx)
|
||||
}
|
||||
// Otherwise, wait to report infosErr after we have downloaded
|
||||
// when we can.
|
||||
}
|
||||
|
||||
if !haveExplicitArgs && modload.WorkFilePath() == "" {
|
||||
// 'go mod download' is sometimes run without arguments to pre-populate the
|
||||
@@ -205,7 +228,7 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) {
|
||||
// (golang.org/issue/45332). We do still fix inconsistencies in go.mod
|
||||
// though.
|
||||
//
|
||||
// TODO(#45551): In the future, report an error if go.mod or go.sum need to
|
||||
// TODO(#64008): In the future, report an error if go.mod or go.sum need to
|
||||
// be updated after loading the build list. This may require setting
|
||||
// the mode to "mod" or "readonly" depending on haveExplicitArgs.
|
||||
if err := modload.WriteGoMod(ctx, modload.WriteOpts{}); err != nil {
|
||||
@@ -213,6 +236,7 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) {
|
||||
}
|
||||
}
|
||||
|
||||
var downloadErrs sync.Map
|
||||
for _, info := range infos {
|
||||
if info.Replace != nil {
|
||||
info = info.Replace
|
||||
@@ -239,7 +263,11 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) {
|
||||
}
|
||||
sem <- token{}
|
||||
go func() {
|
||||
DownloadModule(ctx, m)
|
||||
err := DownloadModule(ctx, m)
|
||||
if err != nil {
|
||||
downloadErrs.Store(m, err)
|
||||
m.Error = err.Error()
|
||||
}
|
||||
<-sem
|
||||
}()
|
||||
}
|
||||
@@ -249,6 +277,39 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) {
|
||||
sem <- token{}
|
||||
}
|
||||
|
||||
// If there were explicit arguments
|
||||
// (like 'go mod download golang.org/x/tools@latest'),
|
||||
// check whether we need to upgrade the toolchain in order to download them.
|
||||
//
|
||||
// (If invoked without arguments, we expect the module graph to already
|
||||
// be tidy and the go.mod file to declare a 'go' version that satisfies
|
||||
// transitive requirements. If that invariant holds, then we should have
|
||||
// already upgraded when we loaded the module graph, and should not need
|
||||
// an additional check here. See https://go.dev/issue/45551.)
|
||||
//
|
||||
// We also allow upgrades if in a workspace because in workspace mode
|
||||
// with no arguments we download the module pattern "all",
|
||||
// which may include dependencies that are normally pruned out
|
||||
// of the individual modules in the workspace.
|
||||
if haveExplicitArgs || modload.WorkFilePath() != "" {
|
||||
var sw toolchain.Switcher
|
||||
// Add errors to the Switcher in deterministic order so that they will be
|
||||
// logged deterministically.
|
||||
for _, m := range mods {
|
||||
if erri, ok := downloadErrs.Load(m); ok {
|
||||
sw.Error(erri.(error))
|
||||
}
|
||||
}
|
||||
// Only call sw.Switch if it will actually switch.
|
||||
// Otherwise, we may want to write the errors as JSON
|
||||
// (instead of using base.Error as sw.Switch would),
|
||||
// and we may also have other errors to report from the
|
||||
// initial infos returned by ListModules.
|
||||
if sw.NeedSwitch() {
|
||||
sw.Switch(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
if *downloadJSON {
|
||||
for _, m := range mods {
|
||||
b, err := json.MarshalIndent(m, "", "\t")
|
||||
@@ -302,34 +363,27 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) {
|
||||
|
||||
// DownloadModule runs 'go mod download' for m.Path@m.Version,
|
||||
// leaving the results (including any error) in m itself.
|
||||
func DownloadModule(ctx context.Context, m *ModuleJSON) {
|
||||
func DownloadModule(ctx context.Context, m *ModuleJSON) error {
|
||||
var err error
|
||||
_, file, err := modfetch.InfoFile(ctx, m.Path, m.Version)
|
||||
if err != nil {
|
||||
m.Error = err.Error()
|
||||
return
|
||||
return err
|
||||
}
|
||||
m.Info = file
|
||||
m.GoMod, err = modfetch.GoModFile(ctx, m.Path, m.Version)
|
||||
if err != nil {
|
||||
m.Error = err.Error()
|
||||
return
|
||||
return err
|
||||
}
|
||||
m.GoModSum, err = modfetch.GoModSum(ctx, m.Path, m.Version)
|
||||
if err != nil {
|
||||
m.Error = err.Error()
|
||||
return
|
||||
return err
|
||||
}
|
||||
mod := module.Version{Path: m.Path, Version: m.Version}
|
||||
m.Zip, err = modfetch.DownloadZip(ctx, mod)
|
||||
if err != nil {
|
||||
m.Error = err.Error()
|
||||
return
|
||||
return err
|
||||
}
|
||||
m.Sum = modfetch.Sum(ctx, mod)
|
||||
m.Dir, err = modfetch.Download(ctx, mod)
|
||||
if err != nil {
|
||||
m.Error = err.Error()
|
||||
return
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -61,7 +61,7 @@ func runVerify(ctx context.Context, cmd *base.Command, args []string) {
|
||||
if err != nil {
|
||||
base.Fatal(err)
|
||||
}
|
||||
mods := mg.BuildList()[modload.MainModules.Len():]
|
||||
mods := mg.BuildList()
|
||||
// Use a slice of result channels, so that the output is deterministic.
|
||||
errsChans := make([]<-chan []error, len(mods))
|
||||
|
||||
@@ -94,6 +94,9 @@ func verifyMod(ctx context.Context, mod module.Version) []error {
|
||||
// "go" and "toolchain" have no disk footprint; nothing to verify.
|
||||
return nil
|
||||
}
|
||||
if modload.MainModules.Contains(mod.Path) {
|
||||
return nil
|
||||
}
|
||||
var errs []error
|
||||
zip, zipErr := modfetch.CachePath(ctx, mod, "zip")
|
||||
if zipErr == nil {
|
||||
|
||||
@@ -110,7 +110,13 @@ func ListModules(ctx context.Context, args []string, mode ListMode, reuseFile st
|
||||
|
||||
if err == nil {
|
||||
requirements = rs
|
||||
if !ExplicitWriteGoMod {
|
||||
// TODO(#61605): The extra ListU clause fixes a problem with Go 1.21rc3
|
||||
// where "go mod tidy" and "go list -m -u all" fight over whether the go.sum
|
||||
// should be considered up-to-date. The fix for now is to always treat the
|
||||
// go.sum as up-to-date during list -m -u. Probably the right fix is more targeted,
|
||||
// but in general list -u is looking up other checksums in the checksum database
|
||||
// that won't be necessary later, so it makes sense not to write the go.sum back out.
|
||||
if !ExplicitWriteGoMod && mode&ListU == 0 {
|
||||
err = commitRequirements(ctx, WriteOpts{})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1363,65 +1363,87 @@ func (r *runTestActor) Act(b *work.Builder, ctx context.Context, a *work.Action)
|
||||
ctx, cancel := context.WithTimeout(ctx, testKillTimeout)
|
||||
defer cancel()
|
||||
|
||||
cmd := exec.CommandContext(ctx, args[0], args[1:]...)
|
||||
cmd.Dir = a.Package.Dir
|
||||
|
||||
env := slices.Clip(cfg.OrigEnv)
|
||||
env = base.AppendPATH(env)
|
||||
env = base.AppendPWD(env, cmd.Dir)
|
||||
cmd.Env = env
|
||||
if addToEnv != "" {
|
||||
cmd.Env = append(cmd.Env, addToEnv)
|
||||
}
|
||||
|
||||
cmd.Stdout = stdout
|
||||
cmd.Stderr = stdout
|
||||
|
||||
// If there are any local SWIG dependencies, we want to load
|
||||
// the shared library from the build directory.
|
||||
if a.Package.UsesSwig() {
|
||||
env := cmd.Env
|
||||
found := false
|
||||
prefix := "LD_LIBRARY_PATH="
|
||||
for i, v := range env {
|
||||
if strings.HasPrefix(v, prefix) {
|
||||
env[i] = v + ":."
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
env = append(env, "LD_LIBRARY_PATH=.")
|
||||
}
|
||||
cmd.Env = env
|
||||
}
|
||||
// Now we're ready to actually run the command.
|
||||
//
|
||||
// If the -o flag is set, or if at some point we change cmd/go to start
|
||||
// copying test executables into the build cache, we may run into spurious
|
||||
// ETXTBSY errors on Unix platforms (see https://go.dev/issue/22315).
|
||||
//
|
||||
// Since we know what causes those, and we know that they should resolve
|
||||
// quickly (the ETXTBSY error will resolve as soon as the subprocess
|
||||
// holding the descriptor open reaches its 'exec' call), we retry them
|
||||
// in a loop.
|
||||
|
||||
var (
|
||||
cmd *exec.Cmd
|
||||
t0 time.Time
|
||||
cancelKilled = false
|
||||
cancelSignaled = false
|
||||
)
|
||||
cmd.Cancel = func() error {
|
||||
if base.SignalTrace == nil {
|
||||
err := cmd.Process.Kill()
|
||||
for {
|
||||
cmd = exec.CommandContext(ctx, args[0], args[1:]...)
|
||||
cmd.Dir = a.Package.Dir
|
||||
|
||||
env := slices.Clip(cfg.OrigEnv)
|
||||
env = base.AppendPATH(env)
|
||||
env = base.AppendPWD(env, cmd.Dir)
|
||||
cmd.Env = env
|
||||
if addToEnv != "" {
|
||||
cmd.Env = append(cmd.Env, addToEnv)
|
||||
}
|
||||
|
||||
cmd.Stdout = stdout
|
||||
cmd.Stderr = stdout
|
||||
|
||||
// If there are any local SWIG dependencies, we want to load
|
||||
// the shared library from the build directory.
|
||||
if a.Package.UsesSwig() {
|
||||
env := cmd.Env
|
||||
found := false
|
||||
prefix := "LD_LIBRARY_PATH="
|
||||
for i, v := range env {
|
||||
if strings.HasPrefix(v, prefix) {
|
||||
env[i] = v + ":."
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
env = append(env, "LD_LIBRARY_PATH=.")
|
||||
}
|
||||
cmd.Env = env
|
||||
}
|
||||
|
||||
cmd.Cancel = func() error {
|
||||
if base.SignalTrace == nil {
|
||||
err := cmd.Process.Kill()
|
||||
if err == nil {
|
||||
cancelKilled = true
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Send a quit signal in the hope that the program will print
|
||||
// a stack trace and exit.
|
||||
err := cmd.Process.Signal(base.SignalTrace)
|
||||
if err == nil {
|
||||
cancelKilled = true
|
||||
cancelSignaled = true
|
||||
}
|
||||
return err
|
||||
}
|
||||
cmd.WaitDelay = testWaitDelay
|
||||
|
||||
// Send a quit signal in the hope that the program will print
|
||||
// a stack trace and exit.
|
||||
err := cmd.Process.Signal(base.SignalTrace)
|
||||
if err == nil {
|
||||
cancelSignaled = true
|
||||
base.StartSigHandlers()
|
||||
t0 = time.Now()
|
||||
err = cmd.Run()
|
||||
|
||||
if !isETXTBSY(err) {
|
||||
// We didn't hit the race in #22315, so there is no reason to retry the
|
||||
// command.
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
cmd.WaitDelay = testWaitDelay
|
||||
|
||||
base.StartSigHandlers()
|
||||
t0 := time.Now()
|
||||
err = cmd.Run()
|
||||
out := buf.Bytes()
|
||||
a.TestOutput = &buf
|
||||
t := fmt.Sprintf("%.3fs", time.Since(t0).Seconds())
|
||||
|
||||
12
src/cmd/go/internal/test/test_nonunix.go
Normal file
12
src/cmd/go/internal/test/test_nonunix.go
Normal file
@@ -0,0 +1,12 @@
|
||||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !unix
|
||||
|
||||
package test
|
||||
|
||||
func isETXTBSY(err error) bool {
|
||||
// syscall.ETXTBSY is only meaningful on Unix platforms.
|
||||
return false
|
||||
}
|
||||
16
src/cmd/go/internal/test/test_unix.go
Normal file
16
src/cmd/go/internal/test/test_unix.go
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build unix
|
||||
|
||||
package test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func isETXTBSY(err error) bool {
|
||||
return errors.Is(err, syscall.ETXTBSY)
|
||||
}
|
||||
@@ -61,7 +61,7 @@ func init() {
|
||||
cf.String("run", "", "")
|
||||
cf.Bool("short", false, "")
|
||||
cf.String("skip", "", "")
|
||||
cf.DurationVar(&testTimeout, "timeout", 10*time.Minute, "")
|
||||
cf.DurationVar(&testTimeout, "timeout", 10*time.Minute, "") // known to cmd/dist
|
||||
cf.String("fuzztime", "", "")
|
||||
cf.String("fuzzminimizetime", "", "")
|
||||
cf.StringVar(&testTrace, "trace", "", "")
|
||||
|
||||
@@ -8,6 +8,7 @@ package toolchain
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"io/fs"
|
||||
@@ -25,6 +26,7 @@ import (
|
||||
"cmd/go/internal/modfetch"
|
||||
"cmd/go/internal/modload"
|
||||
"cmd/go/internal/run"
|
||||
"cmd/go/internal/work"
|
||||
|
||||
"golang.org/x/mod/module"
|
||||
)
|
||||
@@ -485,74 +487,132 @@ func goInstallVersion() bool {
|
||||
// Note: We assume there are no flags between 'go' and 'install' or 'run'.
|
||||
// During testing there are some debugging flags that are accepted
|
||||
// in that position, but in production go binaries there are not.
|
||||
if len(os.Args) < 3 || (os.Args[1] != "install" && os.Args[1] != "run") {
|
||||
if len(os.Args) < 3 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check for pkg@version.
|
||||
var arg string
|
||||
var cmdFlags *flag.FlagSet
|
||||
switch os.Args[1] {
|
||||
default:
|
||||
// Command doesn't support a pkg@version as the main module.
|
||||
return false
|
||||
case "install":
|
||||
// We would like to let 'go install -newflag pkg@version' work even
|
||||
// across a toolchain switch. To make that work, assume the pkg@version
|
||||
// is the last argument and skip the flag parsing.
|
||||
arg = os.Args[len(os.Args)-1]
|
||||
cmdFlags = &work.CmdInstall.Flag
|
||||
case "run":
|
||||
// For run, the pkg@version can be anywhere on the command line,
|
||||
// because it is preceded by run flags and followed by arguments to the
|
||||
// program being run. To handle that precisely, we have to interpret the
|
||||
// flags a little bit, to know whether each flag takes an optional argument.
|
||||
// We can still allow unknown flags as long as they have an explicit =value.
|
||||
args := os.Args[2:]
|
||||
for i := 0; i < len(args); i++ {
|
||||
a := args[i]
|
||||
if !strings.HasPrefix(a, "-") {
|
||||
arg = a
|
||||
break
|
||||
}
|
||||
if a == "-" {
|
||||
// non-flag but also non-pkg@version
|
||||
cmdFlags = &run.CmdRun.Flag
|
||||
}
|
||||
|
||||
// The modcachrw flag is unique, in that it affects how we fetch the
|
||||
// requested module to even figure out what toolchain it needs.
|
||||
// We need to actually set it before we check the toolchain version.
|
||||
// (See https://go.dev/issue/64282.)
|
||||
modcacherwFlag := cmdFlags.Lookup("modcacherw")
|
||||
if modcacherwFlag == nil {
|
||||
base.Fatalf("internal error: modcacherw flag not registered for command")
|
||||
}
|
||||
modcacherwVal, ok := modcacherwFlag.Value.(interface {
|
||||
IsBoolFlag() bool
|
||||
flag.Value
|
||||
})
|
||||
if !ok || !modcacherwVal.IsBoolFlag() {
|
||||
base.Fatalf("internal error: modcacherw is not a boolean flag")
|
||||
}
|
||||
|
||||
// Make a best effort to parse the command's args to find the pkg@version
|
||||
// argument and the -modcacherw flag.
|
||||
var (
|
||||
pkgArg string
|
||||
modcacherwSeen bool
|
||||
)
|
||||
for args := os.Args[2:]; len(args) > 0; {
|
||||
a := args[0]
|
||||
args = args[1:]
|
||||
if a == "--" {
|
||||
if len(args) == 0 {
|
||||
return false
|
||||
}
|
||||
if a == "--" {
|
||||
if i+1 >= len(args) {
|
||||
return false
|
||||
pkgArg = args[0]
|
||||
break
|
||||
}
|
||||
|
||||
a, ok := strings.CutPrefix(a, "-")
|
||||
if !ok {
|
||||
// Not a flag argument. Must be a package.
|
||||
pkgArg = a
|
||||
break
|
||||
}
|
||||
a = strings.TrimPrefix(a, "-") // Treat --flag as -flag.
|
||||
|
||||
name, val, hasEq := strings.Cut(a, "=")
|
||||
|
||||
if name == "modcacherw" {
|
||||
if !hasEq {
|
||||
val = "true"
|
||||
}
|
||||
if err := modcacherwVal.Set(val); err != nil {
|
||||
return false
|
||||
}
|
||||
modcacherwSeen = true
|
||||
continue
|
||||
}
|
||||
|
||||
if hasEq {
|
||||
// Already has a value; don't bother parsing it.
|
||||
continue
|
||||
}
|
||||
|
||||
f := run.CmdRun.Flag.Lookup(a)
|
||||
if f == nil {
|
||||
// We don't know whether this flag is a boolean.
|
||||
if os.Args[1] == "run" {
|
||||
// We don't know where to find the pkg@version argument.
|
||||
// For run, the pkg@version can be anywhere on the command line,
|
||||
// because it is preceded by run flags and followed by arguments to the
|
||||
// program being run. Since we don't know whether this flag takes
|
||||
// an argument, we can't reliably identify the end of the run flags.
|
||||
// Just give up and let the user clarify using the "=" form..
|
||||
return false
|
||||
}
|
||||
|
||||
// We would like to let 'go install -newflag pkg@version' work even
|
||||
// across a toolchain switch. To make that work, assume by default that
|
||||
// the pkg@version is the last argument and skip the remaining args unless
|
||||
// we spot a plausible "-modcacherw" flag.
|
||||
for len(args) > 0 {
|
||||
a := args[0]
|
||||
name, _, _ := strings.Cut(a, "=")
|
||||
if name == "-modcacherw" || name == "--modcacherw" {
|
||||
break
|
||||
}
|
||||
arg = args[i+1]
|
||||
break
|
||||
if len(args) == 1 && !strings.HasPrefix(a, "-") {
|
||||
pkgArg = a
|
||||
}
|
||||
args = args[1:]
|
||||
}
|
||||
a = strings.TrimPrefix(a, "-")
|
||||
a = strings.TrimPrefix(a, "-")
|
||||
if strings.HasPrefix(a, "-") {
|
||||
// non-flag but also non-pkg@version
|
||||
return false
|
||||
}
|
||||
if strings.Contains(a, "=") {
|
||||
// already has value
|
||||
continue
|
||||
}
|
||||
f := run.CmdRun.Flag.Lookup(a)
|
||||
if f == nil {
|
||||
// Unknown flag. Give up. The command is going to fail in flag parsing.
|
||||
return false
|
||||
}
|
||||
if bf, ok := f.Value.(interface{ IsBoolFlag() bool }); ok && bf.IsBoolFlag() {
|
||||
// Does not take value.
|
||||
continue
|
||||
}
|
||||
i++ // Does take a value; skip it.
|
||||
continue
|
||||
}
|
||||
|
||||
if bf, ok := f.Value.(interface{ IsBoolFlag() bool }); !ok || !bf.IsBoolFlag() {
|
||||
// The next arg is the value for this flag. Skip it.
|
||||
args = args[1:]
|
||||
continue
|
||||
}
|
||||
}
|
||||
if !strings.Contains(arg, "@") || build.IsLocalImport(arg) || filepath.IsAbs(arg) {
|
||||
|
||||
if !strings.Contains(pkgArg, "@") || build.IsLocalImport(pkgArg) || filepath.IsAbs(pkgArg) {
|
||||
return false
|
||||
}
|
||||
path, version, _ := strings.Cut(arg, "@")
|
||||
path, version, _ := strings.Cut(pkgArg, "@")
|
||||
if path == "" || version == "" || gover.IsToolchain(path) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !modcacherwSeen && base.InGOFLAGS("-modcacherw") {
|
||||
fs := flag.NewFlagSet("goInstallVersion", flag.ExitOnError)
|
||||
fs.Var(modcacherwVal, "modcacherw", modcacherwFlag.Usage)
|
||||
base.SetFromGOFLAGS(fs)
|
||||
}
|
||||
|
||||
// It would be correct to simply return true here, bypassing use
|
||||
// of the current go.mod or go.work, and let "go run" or "go install"
|
||||
// do the rest, including a toolchain switch.
|
||||
|
||||
@@ -1204,18 +1204,31 @@ func repoRootFromVCSPaths(importPath string, security web.SecurityMode, vcsPaths
|
||||
var ok bool
|
||||
repoURL, ok = interceptVCSTest(repo, vcs, security)
|
||||
if !ok {
|
||||
scheme := vcs.Scheme[0] // default to first scheme
|
||||
if vcs.PingCmd != "" {
|
||||
// If we know how to test schemes, scan to find one.
|
||||
scheme, err := func() (string, error) {
|
||||
for _, s := range vcs.Scheme {
|
||||
if security == web.SecureOnly && !vcs.isSecureScheme(s) {
|
||||
continue
|
||||
}
|
||||
if vcs.Ping(s, repo) == nil {
|
||||
scheme = s
|
||||
break
|
||||
|
||||
// If we know how to ping URL schemes for this VCS,
|
||||
// check that this repo works.
|
||||
// Otherwise, default to the first scheme
|
||||
// that meets the requested security level.
|
||||
if vcs.PingCmd == "" {
|
||||
return s, nil
|
||||
}
|
||||
if err := vcs.Ping(s, repo); err == nil {
|
||||
return s, nil
|
||||
}
|
||||
}
|
||||
securityFrag := ""
|
||||
if security == web.SecureOnly {
|
||||
securityFrag = "secure "
|
||||
}
|
||||
return "", fmt.Errorf("no %sprotocol found for repository", securityFrag)
|
||||
}()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
repoURL = scheme + "://" + repo
|
||||
}
|
||||
|
||||
@@ -212,16 +212,22 @@ func get(security SecurityMode, url *urlpkg.URL) (*Response, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if res == nil || res.Body == nil {
|
||||
if err != nil {
|
||||
// Per the docs for [net/http.Client.Do], “On error, any Response can be
|
||||
// ignored. A non-nil Response with a non-nil error only occurs when
|
||||
// CheckRedirect fails, and even then the returned Response.Body is
|
||||
// already closed.”
|
||||
release()
|
||||
} else {
|
||||
body := res.Body
|
||||
res.Body = hookCloser{
|
||||
ReadCloser: body,
|
||||
afterClose: release,
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// “If the returned error is nil, the Response will contain a non-nil Body
|
||||
// which the user is expected to close.”
|
||||
body := res.Body
|
||||
res.Body = hookCloser{
|
||||
ReadCloser: body,
|
||||
afterClose: release,
|
||||
}
|
||||
return url, res, err
|
||||
}
|
||||
|
||||
|
||||
@@ -141,6 +141,12 @@ var validCompilerFlagsWithNextArg = []string{
|
||||
"-x",
|
||||
}
|
||||
|
||||
var invalidLinkerFlags = []*lazyregexp.Regexp{
|
||||
// On macOS this means the linker loads and executes the next argument.
|
||||
// Have to exclude separately because -lfoo is allowed in general.
|
||||
re(`-lto_library`),
|
||||
}
|
||||
|
||||
var validLinkerFlags = []*lazyregexp.Regexp{
|
||||
re(`-F([^@\-].*)`),
|
||||
re(`-l([^@\-].*)`),
|
||||
@@ -231,12 +237,12 @@ var validLinkerFlagsWithNextArg = []string{
|
||||
|
||||
func checkCompilerFlags(name, source string, list []string) error {
|
||||
checkOverrides := true
|
||||
return checkFlags(name, source, list, validCompilerFlags, validCompilerFlagsWithNextArg, checkOverrides)
|
||||
return checkFlags(name, source, list, nil, validCompilerFlags, validCompilerFlagsWithNextArg, checkOverrides)
|
||||
}
|
||||
|
||||
func checkLinkerFlags(name, source string, list []string) error {
|
||||
checkOverrides := true
|
||||
return checkFlags(name, source, list, validLinkerFlags, validLinkerFlagsWithNextArg, checkOverrides)
|
||||
return checkFlags(name, source, list, invalidLinkerFlags, validLinkerFlags, validLinkerFlagsWithNextArg, checkOverrides)
|
||||
}
|
||||
|
||||
// checkCompilerFlagsForInternalLink returns an error if 'list'
|
||||
@@ -245,7 +251,7 @@ func checkLinkerFlags(name, source string, list []string) error {
|
||||
// external linker).
|
||||
func checkCompilerFlagsForInternalLink(name, source string, list []string) error {
|
||||
checkOverrides := false
|
||||
if err := checkFlags(name, source, list, validCompilerFlags, validCompilerFlagsWithNextArg, checkOverrides); err != nil {
|
||||
if err := checkFlags(name, source, list, nil, validCompilerFlags, validCompilerFlagsWithNextArg, checkOverrides); err != nil {
|
||||
return err
|
||||
}
|
||||
// Currently the only flag on the allow list that causes problems
|
||||
@@ -258,7 +264,7 @@ func checkCompilerFlagsForInternalLink(name, source string, list []string) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkFlags(name, source string, list []string, valid []*lazyregexp.Regexp, validNext []string, checkOverrides bool) error {
|
||||
func checkFlags(name, source string, list []string, invalid, valid []*lazyregexp.Regexp, validNext []string, checkOverrides bool) error {
|
||||
// Let users override rules with $CGO_CFLAGS_ALLOW, $CGO_CFLAGS_DISALLOW, etc.
|
||||
var (
|
||||
allow *regexp.Regexp
|
||||
@@ -290,6 +296,11 @@ Args:
|
||||
if allow != nil && allow.FindString(arg) == arg {
|
||||
continue Args
|
||||
}
|
||||
for _, re := range invalid {
|
||||
if re.FindString(arg) == arg { // must be complete match
|
||||
goto Bad
|
||||
}
|
||||
}
|
||||
for _, re := range valid {
|
||||
if re.FindString(arg) == arg { // must be complete match
|
||||
continue Args
|
||||
|
||||
@@ -175,7 +175,11 @@ func main() {
|
||||
if used > 0 {
|
||||
helpArg += " " + strings.Join(args[:used], " ")
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "go %s: unknown command\nRun 'go help%s' for usage.\n", cfg.CmdName, helpArg)
|
||||
cmdName := cfg.CmdName
|
||||
if cmdName == "" {
|
||||
cmdName = args[0]
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "go %s: unknown command\nRun 'go help%s' for usage.\n", cmdName, helpArg)
|
||||
base.SetExitStatus(2)
|
||||
base.Exit()
|
||||
}
|
||||
|
||||
@@ -45,6 +45,12 @@ stderr 'compile.*-pgoprofile=.*b(/|\\\\)default\.pgo.*b(/|\\\\)b_test\.go'
|
||||
stderr 'compile.*-pgoprofile=.*b(/|\\\\)default\.pgo.*dep(/|\\\\)dep\.go'
|
||||
! stderr 'compile.*-pgoprofile=.*nopgo(/|\\\\)nopgo_test\.go'
|
||||
|
||||
# test-only dependencies also have profiles attached
|
||||
stderr 'compile.*-pgoprofile=.*a(/|\\\\)default\.pgo.*testdep(/|\\\\)testdep\.go'
|
||||
stderr 'compile.*-pgoprofile=.*b(/|\\\\)default\.pgo.*testdep(/|\\\\)testdep\.go'
|
||||
stderr 'compile.*-pgoprofile=.*a(/|\\\\)default\.pgo.*testdep2(/|\\\\)testdep2\.go'
|
||||
stderr 'compile.*-pgoprofile=.*b(/|\\\\)default\.pgo.*testdep2(/|\\\\)testdep2\.go'
|
||||
|
||||
# go list -deps prints packages built multiple times.
|
||||
go list -pgo=auto -deps ./a ./b ./nopgo
|
||||
stdout 'test/dep \[test/a\]'
|
||||
@@ -66,6 +72,7 @@ func main() {}
|
||||
-- a/a_test.go --
|
||||
package main
|
||||
import "testing"
|
||||
import _ "test/testdep"
|
||||
func TestA(*testing.T) {}
|
||||
-- a/default.pgo --
|
||||
-- b/b.go --
|
||||
@@ -76,6 +83,7 @@ func main() {}
|
||||
-- b/b_test.go --
|
||||
package main
|
||||
import "testing"
|
||||
import _ "test/testdep"
|
||||
func TestB(*testing.T) {}
|
||||
-- b/default.pgo --
|
||||
-- nopgo/nopgo.go --
|
||||
@@ -94,3 +102,8 @@ import _ "test/dep3"
|
||||
package dep2
|
||||
-- dep3/dep3.go --
|
||||
package dep3
|
||||
-- testdep/testdep.go --
|
||||
package testdep
|
||||
import _ "test/testdep2"
|
||||
-- testdep2/testdep2.go --
|
||||
package testdep2
|
||||
|
||||
17
src/cmd/go/testdata/script/darwin_lto_library_ldflag.txt
vendored
Normal file
17
src/cmd/go/testdata/script/darwin_lto_library_ldflag.txt
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
[!GOOS:darwin] skip
|
||||
[!cgo] skip
|
||||
|
||||
! go build
|
||||
stderr 'invalid flag in #cgo LDFLAGS: -lto_library'
|
||||
|
||||
-- go.mod --
|
||||
module ldflag
|
||||
|
||||
-- main.go --
|
||||
package main
|
||||
|
||||
// #cgo CFLAGS: -flto
|
||||
// #cgo LDFLAGS: -lto_library bad.dylib
|
||||
import "C"
|
||||
|
||||
func main() {}
|
||||
27
src/cmd/go/testdata/script/generate_workspace.txt
vendored
Normal file
27
src/cmd/go/testdata/script/generate_workspace.txt
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
# This is a regression test for Issue #56098: Go generate
|
||||
# wasn't initializing workspace mode
|
||||
|
||||
[short] skip
|
||||
|
||||
go generate ./mod
|
||||
cmp ./mod/got.txt want.txt
|
||||
|
||||
-- go.work --
|
||||
go 1.21
|
||||
|
||||
use ./mod
|
||||
-- mod/go.mod --
|
||||
module example.com/mod
|
||||
-- mod/gen.go --
|
||||
//go:generate go run gen.go got.txt
|
||||
|
||||
package main
|
||||
|
||||
import "os"
|
||||
|
||||
func main() {
|
||||
outfile := os.Args[1]
|
||||
os.WriteFile(outfile, []byte("Hello World!\n"), 0644)
|
||||
}
|
||||
-- want.txt --
|
||||
Hello World!
|
||||
2
src/cmd/go/testdata/script/go_badcmd.txt
vendored
Normal file
2
src/cmd/go/testdata/script/go_badcmd.txt
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
! go asdf
|
||||
stderr '^go asdf: unknown command'
|
||||
@@ -1,4 +1,5 @@
|
||||
[compiler:gccgo] skip
|
||||
[short] skip 'builds and links another cmd/go'
|
||||
|
||||
mkdir $WORK/new/bin
|
||||
|
||||
@@ -9,15 +10,18 @@ mkdir $WORK/new/bin
|
||||
# new cmd/go is built.
|
||||
env GOROOT_FINAL=
|
||||
|
||||
# $GOROOT/bin/go is whatever the user has already installed
|
||||
# (using make.bash or similar). We can't make assumptions about what
|
||||
# options it may have been built with, such as -trimpath or GOROOT_FINAL.
|
||||
# Instead, we build a fresh copy of the binary with known settings.
|
||||
go build -o $WORK/new/bin/go$GOEXE cmd/go &
|
||||
go build -o $WORK/bin/check$GOEXE check.go &
|
||||
go build -trimpath -o $WORK/bin/check$GOEXE check.go &
|
||||
wait
|
||||
|
||||
env TESTGOROOT=$GOROOT
|
||||
env GOROOT=
|
||||
|
||||
# Relocated Executable
|
||||
# cp $TESTGOROOT/bin/go$GOEXE $WORK/new/bin/go$GOEXE
|
||||
exec $WORK/bin/check$GOEXE $WORK/new/bin/go$GOEXE $TESTGOROOT
|
||||
|
||||
# Relocated Tree:
|
||||
|
||||
91
src/cmd/go/testdata/script/goroot_executable_trimpath.txt
vendored
Normal file
91
src/cmd/go/testdata/script/goroot_executable_trimpath.txt
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
# Regression test for https://go.dev/issue/62119:
|
||||
# A 'go' command cross-compiled with a different GOHOSTOS
|
||||
# should be able to locate its GOROOT using os.Executable.
|
||||
#
|
||||
# (This also tests a 'go' command built with -trimpath
|
||||
# that is not cross-compiled, since we need to build that
|
||||
# configuration for the test anyway.)
|
||||
|
||||
[short] skip 'builds and links another cmd/go'
|
||||
|
||||
mkdir $WORK/new/bin
|
||||
mkdir $WORK/new/bin/${GOOS}_${GOARCH}
|
||||
|
||||
# In this test, we are specifically checking the logic for deriving
|
||||
# the value of GOROOT from os.Executable when runtime.GOROOT is
|
||||
# trimmed away.
|
||||
# GOROOT_FINAL changes the default behavior of runtime.GOROOT,
|
||||
# so we explicitly clear it to remove it as a confounding variable.
|
||||
env GOROOT_FINAL=
|
||||
|
||||
# $GOROOT/bin/go is whatever the user has already installed
|
||||
# (using make.bash or similar). We can't make assumptions about what
|
||||
# options it may have been built with, such as -trimpath or GOROOT_FINAL.
|
||||
# Instead, we build a fresh copy of the binary with known settings.
|
||||
go build -trimpath -o $WORK/new/bin/go$GOEXE cmd/go &
|
||||
go build -trimpath -o $WORK/bin/check$GOEXE check.go &
|
||||
wait
|
||||
|
||||
env TESTGOROOT=$GOROOT
|
||||
env GOROOT=
|
||||
|
||||
# Relocated Executable
|
||||
# Since we built with -trimpath and the binary isn't installed in a
|
||||
# normal-looking GOROOT, this command should fail.
|
||||
|
||||
! exec $WORK/new/bin/go$GOEXE env GOROOT
|
||||
stderr '^go: cannot find GOROOT directory: ''go'' binary is trimmed and GOROOT is not set$'
|
||||
|
||||
# Cross-compiled binaries in cmd are installed to a ${GOOS}_${GOARCH} subdirectory,
|
||||
# so we also want to try a copy there.
|
||||
# (Note that the script engine's 'exec' engine already works around
|
||||
# https://go.dev/issue/22315, so we don't have to do that explicitly in the
|
||||
# 'check' program we use later.)
|
||||
cp $WORK/new/bin/go$GOEXE $WORK/new/bin/${GOOS}_${GOARCH}/go$GOEXE
|
||||
! exec $WORK/new/bin/${GOOS}_${GOARCH}/go$GOEXE env GOROOT
|
||||
stderr '^go: cannot find GOROOT directory: ''go'' binary is trimmed and GOROOT is not set$'
|
||||
|
||||
# Relocated Tree:
|
||||
# If the binary is sitting in a bin dir next to ../pkg/tool, that counts as a GOROOT,
|
||||
# so it should find the new tree.
|
||||
mkdir $WORK/new/pkg/tool
|
||||
exec $WORK/bin/check$GOEXE $WORK/new/bin/go$GOEXE $WORK/new
|
||||
exec $WORK/bin/check$GOEXE $WORK/new/bin/${GOOS}_${GOARCH}/go$GOEXE $WORK/new
|
||||
|
||||
-- check.go --
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func main() {
|
||||
exe := os.Args[1]
|
||||
want := os.Args[2]
|
||||
cmd := exec.Command(exe, "env", "GOROOT")
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%s env GOROOT: %v, %s\n", exe, err, out)
|
||||
os.Exit(1)
|
||||
}
|
||||
goroot, err := filepath.EvalSymlinks(strings.TrimSpace(string(out)))
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
want, err = filepath.EvalSymlinks(want)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if !strings.EqualFold(goroot, want) {
|
||||
fmt.Fprintf(os.Stderr, "go env GOROOT:\nhave %s\nwant %s\n", goroot, want)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "go env GOROOT: %s\n", goroot)
|
||||
|
||||
}
|
||||
@@ -3,25 +3,18 @@ env TESTGO_VERSION_SWITCH=switch
|
||||
|
||||
# If the main module's go.mod file lists a version lower than the version
|
||||
# required by its dependencies, the commands that fetch and diagnose the module
|
||||
# graph (such as 'go mod download' and 'go mod graph') should fail explicitly:
|
||||
# graph (such as 'go mod graph' and 'go mod verify') should fail explicitly:
|
||||
# they can't interpret the graph themselves, and they aren't allowed to update
|
||||
# the go.mod file to record a specific, stable toolchain version that can.
|
||||
|
||||
! go mod download rsc.io/future@v1.0.0
|
||||
stderr '^go: rsc.io/future@v1.0.0 requires go >= 1.999 \(running go 1.21.0\)'
|
||||
|
||||
! go mod download rsc.io/future
|
||||
stderr '^go: rsc.io/future@v1.0.0 requires go >= 1.999 \(running go 1.21.0\)'
|
||||
|
||||
! go mod download
|
||||
stderr '^go: rsc.io/future@v1.0.0: module rsc.io/future@v1.0.0 requires go >= 1.999 \(running go 1.21.0\)'
|
||||
|
||||
! go mod verify
|
||||
stderr '^go: rsc.io/future@v1.0.0: module rsc.io/future@v1.0.0 requires go >= 1.999 \(running go 1.21.0\)'
|
||||
|
||||
! go mod graph
|
||||
stderr '^go: rsc.io/future@v1.0.0: module rsc.io/future@v1.0.0 requires go >= 1.999 \(running go 1.21.0\)'
|
||||
|
||||
# TODO(#64008): 'go mod download' without arguments should fail too.
|
||||
|
||||
|
||||
# 'go get' should update the main module's go.mod file to a version compatible with the
|
||||
# go version required for rsc.io/future, not fail.
|
||||
@@ -33,8 +26,6 @@ stderr '^go: added toolchain go1.999testmod$'
|
||||
|
||||
# Now, the various 'go mod' subcommands should succeed.
|
||||
|
||||
go mod download rsc.io/future@v1.0.0
|
||||
go mod download rsc.io/future
|
||||
go mod download
|
||||
|
||||
go mod verify
|
||||
|
||||
@@ -8,11 +8,12 @@ env TESTGO_VERSION=go1.21pre3
|
||||
# Compile a fake toolchain to put in the path under various names.
|
||||
env GOTOOLCHAIN=
|
||||
mkdir $WORK/bin
|
||||
[!GOOS:plan9] env PATH=$WORK/bin${:}$PATH
|
||||
[GOOS:plan9] env path=$WORK/bin${:}$path
|
||||
go build -o $WORK/bin/ ./fakego.go # adds .exe extension implicitly on Windows
|
||||
cp $WORK/bin/fakego$GOEXE $WORK/bin/go1.50.0$GOEXE
|
||||
|
||||
[!GOOS:plan9] env PATH=$WORK/bin
|
||||
[GOOS:plan9] env path=$WORK/bin
|
||||
|
||||
go version
|
||||
stdout go1.21pre3
|
||||
|
||||
|
||||
45
src/cmd/go/testdata/script/install_modcacherw_issue64282.txt
vendored
Normal file
45
src/cmd/go/testdata/script/install_modcacherw_issue64282.txt
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
# Regression test for https://go.dev/issue/64282.
|
||||
#
|
||||
# 'go install' and 'go run' with pkg@version arguments should make
|
||||
# a best effort to parse flags relevant to downloading modules
|
||||
# (currently only -modcacherw) before actually downloading the module
|
||||
# to identify which toolchain version to use.
|
||||
#
|
||||
# However, the best-effort flag parsing should not interfere with
|
||||
# actual flag parsing if we don't switch toolchains. In particular,
|
||||
# unrecognized flags should still be diagnosed after the module for
|
||||
# the requested package has been downloaded and checked for toolchain
|
||||
# upgrades.
|
||||
|
||||
|
||||
! go install -cake=delicious -modcacherw example.com/printversion@v0.1.0
|
||||
stderr '^flag provided but not defined: -cake$'
|
||||
# Because the -modcacherw flag was set, we should be able to modify the contents
|
||||
# of a directory within the module cache.
|
||||
cp $WORK/extraneous.txt $GOPATH/pkg/mod/example.com/printversion@v0.1.0/extraneous_file.go
|
||||
go clean -modcache
|
||||
|
||||
|
||||
! go install -unknownflag -tags -modcacherw example.com/printversion@v0.1.0
|
||||
stderr '^flag provided but not defined: -unknownflag$'
|
||||
cp $WORK/extraneous.txt $GOPATH/pkg/mod/example.com/printversion@v0.1.0/extraneous_file.go
|
||||
go clean -modcache
|
||||
|
||||
|
||||
# Also try it with a 'go install' that succeeds.
|
||||
# (But skip in short mode, because linking a binary is expensive.)
|
||||
[!short] go install -modcacherw example.com/printversion@v0.1.0
|
||||
[!short] cp $WORK/extraneous.txt $GOPATH/pkg/mod/example.com/printversion@v0.1.0/extraneous_file.go
|
||||
[!short] go clean -modcache
|
||||
|
||||
|
||||
# The flag should also be applied if given in GOFLAGS
|
||||
# instead of on the command line.
|
||||
env GOFLAGS=-modcacherw
|
||||
! go install -cake=delicious example.com/printversion@v0.1.0
|
||||
stderr '^flag provided but not defined: -cake$'
|
||||
cp $WORK/extraneous.txt $GOPATH/pkg/mod/example.com/printversion@v0.1.0/extraneous_file.go
|
||||
|
||||
|
||||
-- $WORK/extraneous.txt --
|
||||
This is not a Go source file.
|
||||
26
src/cmd/go/testdata/script/list_issue_59905.txt
vendored
26
src/cmd/go/testdata/script/list_issue_59905.txt
vendored
@@ -1,8 +1,13 @@
|
||||
# Expect no panic
|
||||
go list -f '{{if .DepsErrors}}{{.DepsErrors}}{{end}}' -export -e -deps
|
||||
cmpenv stdout wanterr
|
||||
cmpenv stdout wanterr_59905
|
||||
|
||||
-- wanterr --
|
||||
# Expect no panic (Issue 61816)
|
||||
cp level1b_61816.txt level1b/pkg.go
|
||||
go list -f '{{if .DepsErrors}}{{.DepsErrors}}{{end}}' -export -e -deps
|
||||
cmpenv stdout wanterr_61816
|
||||
|
||||
-- wanterr_59905 --
|
||||
[# test/main/level1a
|
||||
level1a${/}pkg.go:5:2: level2x redeclared in this block
|
||||
level1a${/}pkg.go:4:2: other declaration of level2x
|
||||
@@ -14,6 +19,23 @@ level1b${/}pkg.go:5:2: level2x redeclared in this block
|
||||
level1b${/}pkg.go:5:2: "test/main/level1b/level2y" imported as level2x and not used
|
||||
level1b${/}pkg.go:8:39: undefined: level2y
|
||||
]
|
||||
-- wanterr_61816 --
|
||||
[level1b${/}pkg.go:4:2: package foo is not in std ($GOROOT${/}src${/}foo)]
|
||||
[# test/main/level1a
|
||||
level1a${/}pkg.go:5:2: level2x redeclared in this block
|
||||
level1a${/}pkg.go:4:2: other declaration of level2x
|
||||
level1a${/}pkg.go:5:2: "test/main/level1a/level2y" imported as level2x and not used
|
||||
level1a${/}pkg.go:8:39: undefined: level2y
|
||||
level1b${/}pkg.go:4:2: package foo is not in std ($GOROOT${/}src${/}foo)]
|
||||
-- level1b_61816.txt --
|
||||
package level1b
|
||||
|
||||
import (
|
||||
"foo"
|
||||
)
|
||||
|
||||
func Print() { println(level2x.Value, level2y.Value) }
|
||||
|
||||
-- go.mod --
|
||||
module test/main
|
||||
|
||||
|
||||
107
src/cmd/go/testdata/script/mod_download_exec_toolchain.txt
vendored
Normal file
107
src/cmd/go/testdata/script/mod_download_exec_toolchain.txt
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
env TESTGO_VERSION=go1.21
|
||||
env TESTGO_VERSION_SWITCH=switch
|
||||
|
||||
# First, test 'go mod download' outside of a module.
|
||||
#
|
||||
# There is no go.mod file into which we can record the selected toolchain,
|
||||
# so unfortunately these version switches won't be as reproducible as other
|
||||
# go commands, but that's still preferable to failing entirely or downloading
|
||||
# a module zip that we don't understand.
|
||||
|
||||
# GOTOOLCHAIN=auto should run the newer toolchain
|
||||
env GOTOOLCHAIN=auto
|
||||
go mod download rsc.io/needgo121@latest rsc.io/needgo122@latest rsc.io/needgo123@latest rsc.io/needall@latest
|
||||
stderr '^go: rsc.io/needall@v0.0.1 requires go >= 1.23; switching to go1.23.9$'
|
||||
! stderr '\(running'
|
||||
|
||||
# GOTOOLCHAIN=min+auto should run the newer toolchain
|
||||
env GOTOOLCHAIN=go1.21+auto
|
||||
go mod download rsc.io/needgo121@latest rsc.io/needgo122@latest rsc.io/needgo123@latest rsc.io/needall@latest
|
||||
stderr '^go: rsc.io/needall@v0.0.1 requires go >= 1.23; switching to go1.23.9$'
|
||||
! stderr '\(running'
|
||||
|
||||
# GOTOOLCHAIN=go1.21 should NOT run the newer toolchain
|
||||
env GOTOOLCHAIN=go1.21
|
||||
! go mod download rsc.io/needgo121@latest rsc.io/needgo122@latest rsc.io/needgo123@latest rsc.io/needall@latest
|
||||
! stderr switching
|
||||
stderr 'rsc.io/needgo122@v0.0.1 requires go >= 1.22'
|
||||
stderr 'rsc.io/needgo123@v0.0.1 requires go >= 1.23'
|
||||
stderr 'rsc.io/needall@v0.0.1 requires go >= 1.23'
|
||||
stderr 'requires go >= 1.23'
|
||||
! stderr 'requires go >= 1.21' # that's us!
|
||||
|
||||
|
||||
# JSON output should be emitted exactly once,
|
||||
# and non-JSON output should go to stderr instead of stdout.
|
||||
env GOTOOLCHAIN=auto
|
||||
go mod download -json rsc.io/needgo121@latest rsc.io/needgo122@latest rsc.io/needgo123@latest rsc.io/needall@latest
|
||||
stderr '^go: rsc.io/needall@v0.0.1 requires go >= 1.23; switching to go1.23.9$'
|
||||
! stderr '\(running'
|
||||
stdout -count=1 '"Path": "rsc.io/needgo121",'
|
||||
stdout -count=1 '"Path": "rsc.io/needgo122",'
|
||||
stdout -count=1 '"Path": "rsc.io/needgo123",'
|
||||
stdout -count=1 '"Path": "rsc.io/needall",'
|
||||
|
||||
# GOTOOLCHAIN=go1.21 should write the errors in the JSON Error fields, not to stderr.
|
||||
env GOTOOLCHAIN=go1.21
|
||||
! go mod download -json rsc.io/needgo121@latest rsc.io/needgo122@latest rsc.io/needgo123@latest rsc.io/needall@latest
|
||||
! stderr switching
|
||||
stdout -count=1 '"Error": "rsc.io/needgo122@v0.0.1 requires go .*= 1.22 \(running go 1.21; GOTOOLCHAIN=go1.21\)"'
|
||||
stdout -count=1 '"Error": "rsc.io/needgo123@v0.0.1 requires go .*= 1.23 \(running go 1.21; GOTOOLCHAIN=go1.21\)"'
|
||||
stdout -count=1 '"Error": "rsc.io/needall@v0.0.1 requires go .*= 1.23 \(running go 1.21; GOTOOLCHAIN=go1.21\)"'
|
||||
! stdout '"Error": "rsc.io/needgo121' # We can handle this one.
|
||||
! stderr .
|
||||
|
||||
|
||||
# Within a module, 'go mod download' of explicit versions should upgrade if
|
||||
# needed to perform the download, but should not change the main module's
|
||||
# toolchain version (because the downloaded modules are still not required by
|
||||
# the main module).
|
||||
|
||||
cd example
|
||||
cp go.mod go.mod.orig
|
||||
|
||||
env GOTOOLCHAIN=auto
|
||||
go mod download rsc.io/needgo121@latest rsc.io/needgo122@latest rsc.io/needgo123@latest rsc.io/needall@latest
|
||||
stderr '^go: rsc.io/needall@v0.0.1 requires go >= 1.23; switching to go1.23.9$'
|
||||
! stderr '\(running'
|
||||
cmp go.mod go.mod.orig
|
||||
|
||||
|
||||
# However, 'go mod download' without arguments should fix up the
|
||||
# 'go' and 'toolchain' lines to be consistent with the existing
|
||||
# requirements in the module graph.
|
||||
|
||||
go mod edit -require=rsc.io/needall@v0.0.1
|
||||
cp go.mod go.mod.121
|
||||
|
||||
# If an upgrade is needed, GOTOOLCHAIN=go1.21 should cause
|
||||
# the command to fail without changing go.mod.
|
||||
|
||||
env GOTOOLCHAIN=go1.21
|
||||
! go mod download
|
||||
stderr 'rsc.io/needall@v0.0.1 requires go >= 1.23'
|
||||
! stderr switching
|
||||
cmp go.mod go.mod.121
|
||||
|
||||
# If an upgrade is needed, GOTOOLCHAIN=auto should perform
|
||||
# the upgrade and record the resulting toolchain version.
|
||||
|
||||
env GOTOOLCHAIN=auto
|
||||
go mod download
|
||||
stderr '^go: module rsc.io/needall@v0.0.1 requires go >= 1.23; switching to go1.23.9$'
|
||||
cmp go.mod go.mod.final
|
||||
|
||||
|
||||
-- example/go.mod --
|
||||
module example
|
||||
|
||||
go 1.21
|
||||
-- example/go.mod.final --
|
||||
module example
|
||||
|
||||
go 1.23
|
||||
|
||||
toolchain go1.23.9
|
||||
|
||||
require rsc.io/needall v0.0.1
|
||||
@@ -1,6 +1,7 @@
|
||||
env TESTGO_VERSION=go1.21
|
||||
env GOTOOLCHAIN=local
|
||||
! go mod download rsc.io/future@v1.0.0
|
||||
stderr '^go: rsc.io/future@v1.0.0 requires go >= 1.999 \(running go 1.21\)$'
|
||||
stderr '^go: rsc.io/future@v1.0.0 requires go >= 1.999 \(running go 1.21; GOTOOLCHAIN=local\)$'
|
||||
|
||||
-- go.mod --
|
||||
module m
|
||||
|
||||
19
src/cmd/go/testdata/script/mod_get_insecure_redirect.txt
vendored
Normal file
19
src/cmd/go/testdata/script/mod_get_insecure_redirect.txt
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
# golang.org/issue/29591: 'go get' was following plain-HTTP redirects even without -insecure (now replaced by GOINSECURE).
|
||||
# golang.org/issue/61877: 'go get' would panic in case of an insecure redirect in module mode
|
||||
|
||||
[!git] skip
|
||||
|
||||
env GOPRIVATE=vcs-test.golang.org
|
||||
|
||||
! go get -d vcs-test.golang.org/insecure/go/insecure
|
||||
stderr 'redirected .* to insecure URL'
|
||||
|
||||
[short] stop 'builds a git repo'
|
||||
|
||||
env GOINSECURE=vcs-test.golang.org/insecure/go/insecure
|
||||
go get -d vcs-test.golang.org/insecure/go/insecure
|
||||
|
||||
-- go.mod --
|
||||
module example
|
||||
go 1.21
|
||||
|
||||
28
src/cmd/go/testdata/script/mod_insecure_issue63845.txt
vendored
Normal file
28
src/cmd/go/testdata/script/mod_insecure_issue63845.txt
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
# Regression test for https://go.dev/issue/63845:
|
||||
# If 'git ls-remote' fails for all secure protocols,
|
||||
# we should fail instead of falling back to an arbitrary protocol.
|
||||
#
|
||||
# Note that this test does not use the local vcweb test server
|
||||
# (vcs-test.golang.org), because the hook for redirecting to that
|
||||
# server bypasses the "ping to determine protocol" logic
|
||||
# in cmd/go/internal/vcs.
|
||||
|
||||
[!net:golang.org] skip
|
||||
[!git] skip
|
||||
[short] skip 'tries to access a nonexistent external Git repo'
|
||||
|
||||
env GOPRIVATE=golang.org
|
||||
env CURLOPT_TIMEOUT_MS=100
|
||||
env GIT_SSH_COMMAND=false
|
||||
|
||||
! go get -x golang.org/nonexist.git@latest
|
||||
stderr '^git ls-remote https://golang.org/nonexist$'
|
||||
stderr '^git ls-remote git\+ssh://golang.org/nonexist'
|
||||
stderr '^git ls-remote ssh://golang.org/nonexist$'
|
||||
! stderr 'git://'
|
||||
stderr '^go: golang.org/nonexist.git@latest: no secure protocol found for repository$'
|
||||
|
||||
-- go.mod --
|
||||
module example
|
||||
|
||||
go 1.19
|
||||
32
src/cmd/go/testdata/script/mod_toolchain_slash.txt
vendored
Normal file
32
src/cmd/go/testdata/script/mod_toolchain_slash.txt
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
[!exec:/bin/sh] skip
|
||||
|
||||
chmod 0777 go1.999999-/run.sh
|
||||
chmod 0777 run.sh
|
||||
|
||||
! go list all
|
||||
! stdout 'RAN SCRIPT'
|
||||
|
||||
cd subdir
|
||||
! go list all
|
||||
! stdout 'RAN SCRIPT'
|
||||
|
||||
-- go.mod --
|
||||
module exploit
|
||||
|
||||
go 1.21
|
||||
toolchain go1.999999-/run.sh
|
||||
-- go1.999999-/run.sh --
|
||||
#!/bin/sh
|
||||
printf 'RAN SCRIPT\n'
|
||||
exit 1
|
||||
-- run.sh --
|
||||
#!/bin/sh
|
||||
printf 'RAN SCRIPT\n'
|
||||
exit 1
|
||||
-- subdir/go.mod --
|
||||
module exploit
|
||||
|
||||
go 1.21
|
||||
toolchain go1.999999-/../../run.sh
|
||||
-- subdir/go1.999999-/README.txt --
|
||||
heh heh heh
|
||||
24
src/cmd/go/testdata/script/mod_verify_work.txt
vendored
Normal file
24
src/cmd/go/testdata/script/mod_verify_work.txt
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
# Regression test for Issue #62663: we would filter out the toolchain and
|
||||
# main modules from the build list incorrectly, leading to the workspace
|
||||
# modules being checked for correct sums. Specifically this would happen when
|
||||
# the module name sorted after the virtual 'go' version module name because
|
||||
# it could not get chopped off when we removed the MainModules.Len() modules
|
||||
# at the beginning of the build list and we would remove the go module instead.
|
||||
|
||||
go mod verify
|
||||
|
||||
-- go.work --
|
||||
go 1.21
|
||||
|
||||
use (
|
||||
./a
|
||||
./b
|
||||
)
|
||||
-- a/go.mod --
|
||||
module hexample.com/a // important for test that module name sorts after 'go'
|
||||
|
||||
go 1.21
|
||||
-- b/go.mod --
|
||||
module hexample.com/b // important for test that module name sorts after 'go'
|
||||
|
||||
go 1.21
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
# Verify test -c can output multiple executables to a directory.
|
||||
|
||||
# This test also serves as a regression test for https://go.dev/issue/62221:
|
||||
# prior to the fix for that issue, it occasionally failed with ETXTBSY when
|
||||
# run on Unix platforms.
|
||||
|
||||
go test -c -o $WORK/some/nonexisting/directory/ ./pkg/...
|
||||
exists -exec $WORK/some/nonexisting/directory/pkg1.test$GOEXE
|
||||
exists -exec $WORK/some/nonexisting/directory/pkg2.test$GOEXE
|
||||
@@ -43,4 +47,4 @@ package pkg1
|
||||
package pkg2
|
||||
|
||||
-- anotherpkg/pkg1/pkg1_test.go --
|
||||
package pkg1
|
||||
package pkg1
|
||||
|
||||
@@ -14,6 +14,10 @@ go build -ldflags='-linkmode=internal'
|
||||
exec ./abitest
|
||||
stdout success
|
||||
|
||||
go build -buildmode=pie -o abitest.pie -ldflags='-linkmode=internal'
|
||||
exec ./abitest.pie
|
||||
stdout success
|
||||
|
||||
-- go.mod --
|
||||
module abitest
|
||||
|
||||
|
||||
@@ -826,21 +826,24 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
|
||||
p.To = obj.Addr{}
|
||||
if c.cursym.Func().Text.Mark&LEAF != 0 {
|
||||
if c.autosize != 0 {
|
||||
// Restore frame pointer.
|
||||
// ADD $framesize-8, RSP, R29
|
||||
p.As = AADD
|
||||
p.From.Type = obj.TYPE_CONST
|
||||
p.From.Offset = int64(c.autosize) - 8
|
||||
p.Reg = REGSP
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = REGFP
|
||||
|
||||
// Pop stack frame.
|
||||
// ADD $framesize, RSP, RSP
|
||||
p = obj.Appendp(p, c.newprog)
|
||||
p.As = AADD
|
||||
p.From.Type = obj.TYPE_CONST
|
||||
p.From.Offset = int64(c.autosize)
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = REGSP
|
||||
p.Spadj = -c.autosize
|
||||
|
||||
// Frame pointer.
|
||||
p = obj.Appendp(p, c.newprog)
|
||||
p.As = ASUB
|
||||
p.From.Type = obj.TYPE_CONST
|
||||
p.From.Offset = 8
|
||||
p.Reg = REGSP
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = REGFP
|
||||
}
|
||||
} else {
|
||||
aoffset := c.autosize
|
||||
|
||||
@@ -36,8 +36,25 @@ import (
|
||||
"cmd/internal/sys"
|
||||
"internal/abi"
|
||||
"log"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Is this a symbol which should never have a TOC prologue generated?
|
||||
// These are special functions which should not have a TOC regeneration
|
||||
// prologue.
|
||||
func isNOTOCfunc(name string) bool {
|
||||
switch {
|
||||
case name == "runtime.duffzero":
|
||||
return true
|
||||
case name == "runtime.duffcopy":
|
||||
return true
|
||||
case strings.HasPrefix(name, "runtime.elf_"):
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
|
||||
p.From.Class = 0
|
||||
p.To.Class = 0
|
||||
@@ -643,7 +660,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
|
||||
|
||||
q = p
|
||||
|
||||
if NeedTOCpointer(c.ctxt) && c.cursym.Name != "runtime.duffzero" && c.cursym.Name != "runtime.duffcopy" {
|
||||
if NeedTOCpointer(c.ctxt) && !isNOTOCfunc(c.cursym.Name) {
|
||||
// When compiling Go into PIC, without PCrel support, all functions must start
|
||||
// with instructions to load the TOC pointer into r2:
|
||||
//
|
||||
|
||||
@@ -2067,17 +2067,22 @@ func instructionsForProg(p *obj.Prog) []*instruction {
|
||||
return instructionsForStore(p, ins.as, p.To.Reg)
|
||||
|
||||
case ALRW, ALRD:
|
||||
// Set aq to use acquire access ordering, which matches Go's memory requirements.
|
||||
// Set aq to use acquire access ordering
|
||||
ins.funct7 = 2
|
||||
ins.rs1, ins.rs2 = uint32(p.From.Reg), REG_ZERO
|
||||
|
||||
case AADDI, AANDI, AORI, AXORI:
|
||||
inss = instructionsForOpImmediate(p, ins.as, p.Reg)
|
||||
|
||||
case ASCW, ASCD, AAMOSWAPW, AAMOSWAPD, AAMOADDW, AAMOADDD, AAMOANDW, AAMOANDD, AAMOORW, AAMOORD,
|
||||
case ASCW, ASCD:
|
||||
// Set release access ordering
|
||||
ins.funct7 = 1
|
||||
ins.rd, ins.rs1, ins.rs2 = uint32(p.RegTo2), uint32(p.To.Reg), uint32(p.From.Reg)
|
||||
|
||||
case AAMOSWAPW, AAMOSWAPD, AAMOADDW, AAMOADDD, AAMOANDW, AAMOANDD, AAMOORW, AAMOORD,
|
||||
AAMOXORW, AAMOXORD, AAMOMINW, AAMOMIND, AAMOMINUW, AAMOMINUD, AAMOMAXW, AAMOMAXD, AAMOMAXUW, AAMOMAXUD:
|
||||
// Set aq to use acquire access ordering, which matches Go's memory requirements.
|
||||
ins.funct7 = 2
|
||||
// Set aqrl to use acquire & release access ordering
|
||||
ins.funct7 = 3
|
||||
ins.rd, ins.rs1, ins.rs2 = uint32(p.RegTo2), uint32(p.To.Reg), uint32(p.From.Reg)
|
||||
|
||||
case AECALL, AEBREAK, ARDCYCLE, ARDTIME, ARDINSTRET:
|
||||
|
||||
@@ -446,7 +446,7 @@ func machoreloc1(arch *sys.Arch, out *ld.OutBuf, ldr *loader.Loader, s loader.Sy
|
||||
rs := r.Xsym
|
||||
rt := r.Type
|
||||
|
||||
if ldr.SymType(rs) == sym.SHOSTOBJ || rt == objabi.R_PCREL || rt == objabi.R_GOTPCREL || rt == objabi.R_CALL {
|
||||
if rt == objabi.R_PCREL || rt == objabi.R_GOTPCREL || rt == objabi.R_CALL || ldr.SymType(rs) == sym.SHOSTOBJ || ldr.SymType(s) == sym.SINITARR {
|
||||
if ldr.SymDynid(rs) < 0 {
|
||||
ldr.Errorf(s, "reloc %d (%s) to non-macho symbol %s type=%d (%s)", rt, sym.RelocName(arch, rt), ldr.SymName(rs), ldr.SymType(rs), ldr.SymType(rs))
|
||||
return false
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user