mirror of
https://github.com/golang/go.git
synced 2026-01-30 07:32:05 +03:00
Compare commits
116 Commits
dev.inline
...
go1.21.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
26b5783b72 | ||
|
|
2ddfc04d12 | ||
|
|
a15ef1bb0f | ||
|
|
41d71a5afa | ||
|
|
0b6b0a275a | ||
|
|
cd671a1180 | ||
|
|
fc57cc31a0 | ||
|
|
9bec49cf52 | ||
|
|
3ef4f939c3 | ||
|
|
556e9c36ba | ||
|
|
b64dc5f499 | ||
|
|
cd66ca0636 | ||
|
|
d7a0626806 | ||
|
|
2c1e5b05fe | ||
|
|
bbd043ff0d | ||
|
|
b0e1d3ea26 | ||
|
|
d25a935574 | ||
|
|
e3ba569c78 | ||
|
|
8dc6ad1c61 | ||
|
|
06df3292a8 | ||
|
|
b120517ffd | ||
|
|
0a9582163c | ||
|
|
91a4e74b98 | ||
|
|
6385a6fb18 | ||
|
|
2d07bb86f0 | ||
|
|
745b81b6e6 | ||
|
|
13339c75b8 | ||
|
|
2977709875 | ||
|
|
2d4746f37b | ||
|
|
2b8026f025 | ||
|
|
7c97cc7d97 | ||
|
|
cb6ea94996 | ||
|
|
45b98bfb79 | ||
|
|
bac083a584 | ||
|
|
70aa116c4a | ||
|
|
31c5a236bc | ||
|
|
25ec110e51 | ||
|
|
6634ce2f41 | ||
|
|
25c6dce188 | ||
|
|
4e34f2e81d | ||
|
|
d91843ff67 | ||
|
|
7437db1085 | ||
|
|
ed527ecfb2 | ||
|
|
b78e8cc145 | ||
|
|
3475e6af4c | ||
|
|
179821c9e1 | ||
|
|
9398951479 | ||
|
|
75d8be5fb4 | ||
|
|
1755d14559 | ||
|
|
c19c4c566c | ||
|
|
e973d24261 | ||
|
|
2e6276df34 | ||
|
|
aeef93cd64 | ||
|
|
35de5f2b0e | ||
|
|
a3b092d65e | ||
|
|
07c72a0915 | ||
|
|
041dd5ce05 | ||
|
|
a51957fb0b | ||
|
|
363f2594aa | ||
|
|
9b53b9b585 | ||
|
|
4a14d9c9af | ||
|
|
9786164333 | ||
|
|
6df6e61cbb | ||
|
|
b25266c58d | ||
|
|
1ea8d38517 | ||
|
|
b2ffc23a82 | ||
|
|
8472fcb62d | ||
|
|
b36e5555dd | ||
|
|
ed977e2f47 | ||
|
|
2fabb143d7 | ||
|
|
c9f01f0ec7 | ||
|
|
252f20b2c1 | ||
|
|
7ee7a21ef2 | ||
|
|
06a9034b60 | ||
|
|
03c7e96be9 | ||
|
|
c2de6836c1 | ||
|
|
2639a17f14 | ||
|
|
28ca813a13 | ||
|
|
d983be9cb5 | ||
|
|
9b33543339 | ||
|
|
229cde5149 | ||
|
|
c3458e35f4 | ||
|
|
fe5af1532a | ||
|
|
847d40d699 | ||
|
|
4aeac326b5 | ||
|
|
c30faf9c54 | ||
|
|
089e37a931 | ||
|
|
9480b4adf9 | ||
|
|
b88bd917b8 | ||
|
|
d4f0d896a6 | ||
|
|
4a0f51696e | ||
|
|
49d42128fd | ||
|
|
cc0cb3020d | ||
|
|
6244b1946b | ||
|
|
b4872ea187 | ||
|
|
87350393e6 | ||
|
|
230e549142 | ||
|
|
5e4000ad7f | ||
|
|
af8f94e3c5 | ||
|
|
3eaee3d5dd | ||
|
|
167c8b73bf | ||
|
|
cb7a091d72 | ||
|
|
651869716a | ||
|
|
3f8b04bfb5 | ||
|
|
4c58d6bf52 | ||
|
|
6a063b01b0 | ||
|
|
07ede7a543 | ||
|
|
7dc62f3bda | ||
|
|
0b65b02ba5 | ||
|
|
c4db811e44 | ||
|
|
5c15498609 | ||
|
|
d8117459c5 | ||
|
|
ebbff91f59 | ||
|
|
1c1c82432a | ||
|
|
b4a0665266 | ||
|
|
577e7b9bb9 |
@@ -60,7 +60,9 @@ pkg crypto/tls, method (*QUICConn) Close() error #44886
|
||||
pkg crypto/tls, method (*QUICConn) ConnectionState() ConnectionState #44886
|
||||
pkg crypto/tls, method (*QUICConn) HandleData(QUICEncryptionLevel, []uint8) error #44886
|
||||
pkg crypto/tls, method (*QUICConn) NextEvent() QUICEvent #44886
|
||||
pkg crypto/tls, method (*QUICConn) SendSessionTicket(bool) error #60107
|
||||
pkg crypto/tls, method (*QUICConn) SendSessionTicket(QUICSessionTicketOptions) error #60107
|
||||
pkg crypto/tls, type QUICSessionTicketOptions struct #60107
|
||||
pkg crypto/tls, type QUICSessionTicketOptions struct, EarlyData bool #60107
|
||||
pkg crypto/tls, method (*QUICConn) SetTransportParameters([]uint8) #44886
|
||||
pkg crypto/tls, method (*QUICConn) Start(context.Context) error #44886
|
||||
pkg crypto/tls, method (QUICEncryptionLevel) String() string #44886
|
||||
@@ -219,18 +221,18 @@ pkg log/slog, func Any(string, interface{}) Attr #56345
|
||||
pkg log/slog, func AnyValue(interface{}) Value #56345
|
||||
pkg log/slog, func Bool(string, bool) Attr #56345
|
||||
pkg log/slog, func BoolValue(bool) Value #56345
|
||||
pkg log/slog, func DebugCtx(context.Context, string, ...interface{}) #56345
|
||||
pkg log/slog, func DebugContext(context.Context, string, ...interface{}) #61200
|
||||
pkg log/slog, func Debug(string, ...interface{}) #56345
|
||||
pkg log/slog, func Default() *Logger #56345
|
||||
pkg log/slog, func Duration(string, time.Duration) Attr #56345
|
||||
pkg log/slog, func DurationValue(time.Duration) Value #56345
|
||||
pkg log/slog, func ErrorCtx(context.Context, string, ...interface{}) #56345
|
||||
pkg log/slog, func ErrorContext(context.Context, string, ...interface{}) #61200
|
||||
pkg log/slog, func Error(string, ...interface{}) #56345
|
||||
pkg log/slog, func Float64(string, float64) Attr #56345
|
||||
pkg log/slog, func Float64Value(float64) Value #56345
|
||||
pkg log/slog, func Group(string, ...interface{}) Attr #59204
|
||||
pkg log/slog, func GroupValue(...Attr) Value #56345
|
||||
pkg log/slog, func InfoCtx(context.Context, string, ...interface{}) #56345
|
||||
pkg log/slog, func InfoContext(context.Context, string, ...interface{}) #61200
|
||||
pkg log/slog, func Info(string, ...interface{}) #56345
|
||||
pkg log/slog, func Int64(string, int64) Attr #56345
|
||||
pkg log/slog, func Int64Value(int64) Value #56345
|
||||
@@ -250,7 +252,7 @@ pkg log/slog, func Time(string, time.Time) Attr #56345
|
||||
pkg log/slog, func TimeValue(time.Time) Value #56345
|
||||
pkg log/slog, func Uint64(string, uint64) Attr #56345
|
||||
pkg log/slog, func Uint64Value(uint64) Value #56345
|
||||
pkg log/slog, func WarnCtx(context.Context, string, ...interface{}) #56345
|
||||
pkg log/slog, func WarnContext(context.Context, string, ...interface{}) #61200
|
||||
pkg log/slog, func Warn(string, ...interface{}) #56345
|
||||
pkg log/slog, func With(...interface{}) *Logger #56345
|
||||
pkg log/slog, method (Attr) Equal(Attr) bool #56345
|
||||
@@ -271,17 +273,17 @@ pkg log/slog, method (*LevelVar) MarshalText() ([]uint8, error) #56345
|
||||
pkg log/slog, method (*LevelVar) Set(Level) #56345
|
||||
pkg log/slog, method (*LevelVar) String() string #56345
|
||||
pkg log/slog, method (*LevelVar) UnmarshalText([]uint8) error #56345
|
||||
pkg log/slog, method (*Logger) DebugCtx(context.Context, string, ...interface{}) #56345
|
||||
pkg log/slog, method (*Logger) DebugContext(context.Context, string, ...interface{}) #61200
|
||||
pkg log/slog, method (*Logger) Debug(string, ...interface{}) #56345
|
||||
pkg log/slog, method (*Logger) Enabled(context.Context, Level) bool #56345
|
||||
pkg log/slog, method (*Logger) ErrorCtx(context.Context, string, ...interface{}) #56345
|
||||
pkg log/slog, method (*Logger) ErrorContext(context.Context, string, ...interface{}) #61200
|
||||
pkg log/slog, method (*Logger) Error(string, ...interface{}) #56345
|
||||
pkg log/slog, method (*Logger) Handler() Handler #56345
|
||||
pkg log/slog, method (*Logger) InfoCtx(context.Context, string, ...interface{}) #56345
|
||||
pkg log/slog, method (*Logger) InfoContext(context.Context, string, ...interface{}) #61200
|
||||
pkg log/slog, method (*Logger) Info(string, ...interface{}) #56345
|
||||
pkg log/slog, method (*Logger) LogAttrs(context.Context, Level, string, ...Attr) #56345
|
||||
pkg log/slog, method (*Logger) Log(context.Context, Level, string, ...interface{}) #56345
|
||||
pkg log/slog, method (*Logger) WarnCtx(context.Context, string, ...interface{}) #56345
|
||||
pkg log/slog, method (*Logger) WarnContext(context.Context, string, ...interface{}) #61200
|
||||
pkg log/slog, method (*Logger) Warn(string, ...interface{}) #56345
|
||||
pkg log/slog, method (*Logger) WithGroup(string) *Logger #56345
|
||||
pkg log/slog, method (*Logger) With(...interface{}) *Logger #56345
|
||||
@@ -344,8 +346,6 @@ pkg maps, func Copy[$0 interface{ ~map[$2]$3 }, $1 interface{ ~map[$2]$3 }, $2 c
|
||||
pkg maps, func DeleteFunc[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0, func($1, $2) bool) #57436
|
||||
pkg maps, func Equal[$0 interface{ ~map[$2]$3 }, $1 interface{ ~map[$2]$3 }, $2 comparable, $3 comparable]($0, $1) bool #57436
|
||||
pkg maps, func EqualFunc[$0 interface{ ~map[$2]$3 }, $1 interface{ ~map[$2]$4 }, $2 comparable, $3 interface{}, $4 interface{}]($0, $1, func($3, $4) bool) bool #57436
|
||||
pkg maps, func Keys[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0) []$1 #57436
|
||||
pkg maps, func Values[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0) []$2 #57436
|
||||
pkg math/big, method (*Int) Float64() (float64, Accuracy) #56984
|
||||
pkg net/http, method (*ProtocolError) Is(error) bool #41198
|
||||
pkg net/http, method (*ResponseController) EnableFullDuplex() error #57786
|
||||
|
||||
@@ -1 +1,2 @@
|
||||
branch: master
|
||||
branch: release-branch.go1.21
|
||||
parent-branch: master
|
||||
|
||||
1246
doc/go1.21.html
1246
doc/go1.21.html
File diff suppressed because it is too large
Load Diff
756
doc/go_spec.html
756
doc/go_spec.html
@@ -1,6 +1,6 @@
|
||||
<!--{
|
||||
"Title": "The Go Programming Language Specification",
|
||||
"Subtitle": "Version of June 14, 2023",
|
||||
"Subtitle": "Version of Aug 2, 2023",
|
||||
"Path": "/ref/spec"
|
||||
}-->
|
||||
|
||||
@@ -2511,7 +2511,7 @@ type (
|
||||
|
||||
<p>
|
||||
A type definition creates a new, distinct type with the same
|
||||
<a href="#Types">underlying type</a> and operations as the given type
|
||||
<a href="#Underlying_types">underlying type</a> and operations as the given type
|
||||
and binds an identifier, the <i>type name</i>, to it.
|
||||
</p>
|
||||
|
||||
@@ -4343,7 +4343,7 @@ type parameter list type arguments after substitution
|
||||
When using a generic function, type arguments may be provided explicitly,
|
||||
or they may be partially or completely <a href="#Type_inference">inferred</a>
|
||||
from the context in which the function is used.
|
||||
Provided that they can be inferred, type arguments may be omitted entirely if the function is:
|
||||
Provided that they can be inferred, type argument lists may be omitted entirely if the function is:
|
||||
</p>
|
||||
|
||||
<ul>
|
||||
@@ -4351,7 +4351,7 @@ Provided that they can be inferred, type arguments may be omitted entirely if th
|
||||
<a href="#Calls">called</a> with ordinary arguments,
|
||||
</li>
|
||||
<li>
|
||||
<a href="#Assignment_statements">assigned</a> to a variable with an explicitly declared type,
|
||||
<a href="#Assignment_statements">assigned</a> to a variable with a known type
|
||||
</li>
|
||||
<li>
|
||||
<a href="#Calls">passed as an argument</a> to another function, or
|
||||
@@ -4371,7 +4371,7 @@ must be inferrable from the context in which the function is used.
|
||||
// sum returns the sum (concatenation, for strings) of its arguments.
|
||||
func sum[T ~int | ~float64 | ~string](x... T) T { … }
|
||||
|
||||
x := sum // illegal: sum must have a type argument (x is a variable without a declared type)
|
||||
x := sum // illegal: the type of x is unknown
|
||||
intSum := sum[int] // intSum has type func(x... int) int
|
||||
a := intSum(2, 3) // a has value 5 of type int
|
||||
b := sum[float64](2.0, 3) // b has value 5.0 of type float64
|
||||
@@ -4406,402 +4406,323 @@ For a generic type, all type arguments must always be provided explicitly.
|
||||
<h3 id="Type_inference">Type inference</h3>
|
||||
|
||||
<p>
|
||||
<em>NOTE: This section is not yet up-to-date for Go 1.21.</em>
|
||||
A use of a generic function may omit some or all type arguments if they can be
|
||||
<i>inferred</i> from the context within which the function is used, including
|
||||
the constraints of the function's type parameters.
|
||||
Type inference succeeds if it can infer the missing type arguments
|
||||
and <a href="#Instantiations">instantiation</a> succeeds with the
|
||||
inferred type arguments.
|
||||
Otherwise, type inference fails and the program is invalid.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Missing function type arguments may be <i>inferred</i> by a series of steps, described below.
|
||||
Each step attempts to use known information to infer additional type arguments.
|
||||
Type inference stops as soon as all type arguments are known.
|
||||
After type inference is complete, it is still necessary to substitute all type arguments
|
||||
for type parameters and verify that each type argument
|
||||
<a href="#Implementing_an_interface">implements</a> the relevant constraint;
|
||||
it is possible for an inferred type argument to fail to implement a constraint, in which
|
||||
case instantiation fails.
|
||||
Type inference uses the type relationships between pairs of types for inference:
|
||||
For instance, a function argument must be <a href="#Assignability">assignable</a>
|
||||
to its respective function parameter; this establishes a relationship between the
|
||||
type of the argument and the type of the parameter.
|
||||
If either of these two types contains type parameters, type inference looks for the
|
||||
type arguments to substitute the type parameters with such that the assignability
|
||||
relationship is satisfied.
|
||||
Similarly, type inference uses the fact that a type argument must
|
||||
<a href="#Satisfying_a_type_constraint">satisfy</a> the constraint of its respective
|
||||
type parameter.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Type inference is based on
|
||||
Each such pair of matched types corresponds to a <i>type equation</i> containing
|
||||
one or multiple type parameters, from one or possibly multiple generic functions.
|
||||
Inferring the missing type arguments means solving the resulting set of type
|
||||
equations for the respective type parameters.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
For example, given
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
// dedup returns a copy of the argument slice with any duplicate entries removed.
|
||||
func dedup[S ~[]E, E comparable](S) S { … }
|
||||
|
||||
type Slice []int
|
||||
var s Slice
|
||||
s = dedup(s) // same as s = dedup[Slice, int](s)
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
the variable <code>s</code> of type <code>Slice</code> must be assignable to
|
||||
the function parameter type <code>S</code> for the program to be valid.
|
||||
To reduce complexity, type inference ignores the directionality of assignments,
|
||||
so the type relationship between <code>Slice</code> and <code>S</code> can be
|
||||
expressed via the (symmetric) type equation <code>Slice ≡<sub>A</sub> S</code>
|
||||
(or <code>S ≡<sub>A</sub> Slice</code> for that matter),
|
||||
where the <code><sub>A</sub></code> in <code>≡<sub>A</sub></code>
|
||||
indicates that the LHS and RHS types must match per assignability rules
|
||||
(see the section on <a href="#Type_unification">type unification</a> for
|
||||
details).
|
||||
Similarly, the type parameter <code>S</code> must satisfy its constraint
|
||||
<code>~[]E</code>. This can be expressed as <code>S ≡<sub>C</sub> ~[]E</code>
|
||||
where <code>X ≡<sub>C</sub> Y</code> stands for
|
||||
"<code>X</code> satisfies constraint <code>Y</code>".
|
||||
These observations lead to a set of two equations
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
Slice ≡<sub>A</sub> S (1)
|
||||
S ≡<sub>C</sub> ~[]E (2)
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
which now can be solved for the type parameters <code>S</code> and <code>E</code>.
|
||||
From (1) a compiler can infer that the type argument for <code>S</code> is <code>Slice</code>.
|
||||
Similarly, because the underlying type of <code>Slice</code> is <code>[]int</code>
|
||||
and <code>[]int</code> must match <code>[]E</code> of the constraint,
|
||||
a compiler can infer that <code>E</code> must be <code>int</code>.
|
||||
Thus, for these two equations, type inference infers
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
S ➞ Slice
|
||||
E ➞ int
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
Given a set of type equations, the type parameters to solve for are
|
||||
the type parameters of the functions that need to be instantiated
|
||||
and for which no explicit type arguments is provided.
|
||||
These type parameters are called <i>bound</i> type parameters.
|
||||
For instance, in the <code>dedup</code> example above, the type parameters
|
||||
<code>P</code> and <code>E</code> are bound to <code>dedup</code>.
|
||||
An argument to a generic function call may be a generic function itself.
|
||||
The type parameters of that function are included in the set of bound
|
||||
type parameters.
|
||||
The types of function arguments may contain type parameters from other
|
||||
functions (such as a generic function enclosing a function call).
|
||||
Those type parameters may also appear in type equations but they are
|
||||
not bound in that context.
|
||||
Type equations are always solved for the bound type parameters only.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Type inference supports calls of generic functions and assignments
|
||||
of generic functions to (explicitly function-typed) variables.
|
||||
This includes passing generic functions as arguments to other
|
||||
(possibly also generic) functions, and returning generic functions
|
||||
as results.
|
||||
Type inference operates on a set of equations specific to each of
|
||||
these cases.
|
||||
The equations are as follows (type argument lists are omitted for clarity):
|
||||
</p>
|
||||
|
||||
<ul>
|
||||
<li>
|
||||
a <a href="#Type_parameter_declarations">type parameter list</a>
|
||||
<p>
|
||||
For a function call <code>f(a<sub>0</sub>, a<sub>1</sub>, …)</code> where
|
||||
<code>f</code> or a function argument <code>a<sub>i</sub></code> is
|
||||
a generic function:
|
||||
<br>
|
||||
Each pair <code>(a<sub>i</sub>, p<sub>i</sub>)</code> of corresponding
|
||||
function arguments and parameters where <code>a<sub>i</sub></code> is not an
|
||||
<a href="#Constants">untyped constant</a> yields an equation
|
||||
<code>typeof(p<sub>i</sub>) ≡<sub>A</sub> typeof(a<sub>i</sub>)</code>.
|
||||
<br>
|
||||
If <code>a<sub>i</sub></code> is an untyped constant <code>c<sub>j</sub></code>,
|
||||
and <code>typeof(p<sub>i</sub>)</code> is a bound type parameter <code>P<sub>k</sub></code>,
|
||||
the pair <code>(c<sub>j</sub>, P<sub>k</sub>)</code> is collected separately from
|
||||
the type equations.
|
||||
</p>
|
||||
</li>
|
||||
<li>
|
||||
a substitution map <i>M</i> initialized with the known type arguments, if any
|
||||
<p>
|
||||
For an assignment <code>v = f</code> of a generic function <code>f</code> to a
|
||||
(non-generic) variable <code>v</code> of function type:
|
||||
<br>
|
||||
<code>typeof(v) ≡<sub>A</sub> typeof(f)</code>.
|
||||
</p>
|
||||
</li>
|
||||
<li>
|
||||
a (possibly empty) list of ordinary function arguments (in case of a function call only)
|
||||
<p>
|
||||
For a return statement <code>return …, f, … </code> where <code>f</code> is a
|
||||
generic function returned as a result to a (non-generic) result variable
|
||||
<code>r</code> of function type:
|
||||
<br>
|
||||
<code>typeof(r) ≡<sub>A</sub> typeof(f)</code>.
|
||||
</p>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<p>
|
||||
and then proceeds with the following steps:
|
||||
Additionally, each type parameter <code>P<sub>k</sub></code> and corresponding type constraint
|
||||
<code>C<sub>k</sub></code> yields the type equation
|
||||
<code>P<sub>k</sub> ≡<sub>C</sub> C<sub>k</sub></code>.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Type inference gives precedence to type information obtained from typed operands
|
||||
before considering untyped constants.
|
||||
Therefore, inference proceeds in two phases:
|
||||
</p>
|
||||
|
||||
<ol>
|
||||
<li>
|
||||
apply <a href="#Function_argument_type_inference"><i>function argument type inference</i></a>
|
||||
to all <i>typed</i> ordinary function arguments
|
||||
<p>
|
||||
The type equations are solved for the bound
|
||||
type parameters using <a href="#Type_unification">type unification</a>.
|
||||
If unification fails, type inference fails.
|
||||
</p>
|
||||
</li>
|
||||
<li>
|
||||
apply <a href="#Constraint_type_inference"><i>constraint type inference</i></a>
|
||||
</li>
|
||||
<li>
|
||||
apply function argument type inference to all <i>untyped</i> ordinary function arguments
|
||||
using the default type for each of the untyped function arguments
|
||||
</li>
|
||||
<li>
|
||||
apply constraint type inference
|
||||
<p>
|
||||
For each bound type parameter <code>P<sub>k</sub></code> for which no type argument
|
||||
has been inferred yet and for which one or more pairs
|
||||
<code>(c<sub>j</sub>, P<sub>k</sub>)</code> with that same type parameter
|
||||
were collected, determine the <a href="#Constant_expressions">constant kind</a>
|
||||
of the constants <code>c<sub>j</sub></code> in all those pairs the same way as for
|
||||
<a href="#Constant_expressions">constant expressions</a>.
|
||||
The type argument for <code>P<sub>k</sub></code> is the
|
||||
<a href="#Constants">default type</a> for the determined constant kind.
|
||||
If a constant kind cannot be determined due to conflicting constant kinds,
|
||||
type inference fails.
|
||||
</p>
|
||||
</li>
|
||||
</ol>
|
||||
|
||||
<p>
|
||||
If there are no ordinary or untyped function arguments, the respective steps are skipped.
|
||||
Constraint type inference is skipped if the previous step didn't infer any new type arguments,
|
||||
but it is run at least once if there are missing type arguments.
|
||||
If not all type arguments have been found after these two phases, type inference fails.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
The substitution map <i>M</i> is carried through all steps, and each step may add entries to <i>M</i>.
|
||||
The process stops as soon as <i>M</i> has a type argument for each type parameter or if an inference step fails.
|
||||
If an inference step fails, or if <i>M</i> is still missing type arguments after the last step, type inference fails.
|
||||
If the two phases are successful, type inference determined a type argument for each
|
||||
bound type parameter:
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
P<sub>k</sub> ➞ A<sub>k</sub>
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
A type argument <code>A<sub>k</sub></code> may be a composite type,
|
||||
containing other bound type parameters <code>P<sub>k</sub></code> as element types
|
||||
(or even be just another bound type parameter).
|
||||
In a process of repeated simplification, the bound type parameters in each type
|
||||
argument are substituted with the respective type arguments for those type
|
||||
parameters until each type argument is free of bound type parameters.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
If type arguments contain cyclic references to themselves
|
||||
through bound type parameters, simplification and thus type
|
||||
inference fails.
|
||||
Otherwise, type inference succeeds.
|
||||
</p>
|
||||
|
||||
<h4 id="Type_unification">Type unification</h4>
|
||||
|
||||
<p>
|
||||
Type inference is based on <i>type unification</i>. A single unification step
|
||||
applies to a <a href="#Type_inference">substitution map</a> and two types, either
|
||||
or both of which may be or contain type parameters. The substitution map tracks
|
||||
the known (explicitly provided or already inferred) type arguments: the map
|
||||
contains an entry <code>P</code> → <code>A</code> for each type
|
||||
parameter <code>P</code> and corresponding known type argument <code>A</code>.
|
||||
During unification, known type arguments take the place of their corresponding type
|
||||
parameters when comparing types. Unification is the process of finding substitution
|
||||
map entries that make the two types equivalent.
|
||||
Type inference solves type equations through <i>type unification</i>.
|
||||
Type unification recursively compares the LHS and RHS types of an
|
||||
equation, where either or both types may be or contain bound type parameters,
|
||||
and looks for type arguments for those type parameters such that the LHS
|
||||
and RHS match (become identical or assignment-compatible, depending on
|
||||
context).
|
||||
To that effect, type inference maintains a map of bound type parameters
|
||||
to inferred type arguments; this map is consulted and updated during type unification.
|
||||
Initially, the bound type parameters are known but the map is empty.
|
||||
During type unification, if a new type argument <code>A</code> is inferred,
|
||||
the respective mapping <code>P ➞ A</code> from type parameter to argument
|
||||
is added to the map.
|
||||
Conversely, when comparing types, a known type argument
|
||||
(a type argument for which a map entry already exists)
|
||||
takes the place of its corresponding type parameter.
|
||||
As type inference progresses, the map is populated more and more
|
||||
until all equations have been considered, or until unification fails.
|
||||
Type inference succeeds if no unification step fails and the map has
|
||||
an entry for each type parameter.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
For unification, two types that don't contain any type parameters from the current type
|
||||
parameter list are <i>equivalent</i>
|
||||
if they are identical, or if they are channel types that are identical ignoring channel
|
||||
direction, or if their underlying types are equivalent.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Unification works by comparing the structure of pairs of types: their structure
|
||||
disregarding type parameters must be identical, and types other than type parameters
|
||||
must be equivalent.
|
||||
A type parameter in one type may match any complete subtype in the other type;
|
||||
each successful match causes an entry to be added to the substitution map.
|
||||
If the structure differs, or types other than type parameters are not equivalent,
|
||||
unification fails.
|
||||
</p>
|
||||
|
||||
<!--
|
||||
TODO(gri) Somewhere we need to describe the process of adding an entry to the
|
||||
substitution map: if the entry is already present, the type argument
|
||||
values are themselves unified.
|
||||
-->
|
||||
|
||||
<p>
|
||||
For example, if <code>T1</code> and <code>T2</code> are type parameters,
|
||||
<code>[]map[int]bool</code> can be unified with any of the following:
|
||||
</pre>
|
||||
For example, given the type equation with the bound type parameter
|
||||
<code>P</code>
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
[]map[int]bool // types are identical
|
||||
T1 // adds T1 → []map[int]bool to substitution map
|
||||
[]T1 // adds T1 → map[int]bool to substitution map
|
||||
[]map[T1]T2 // adds T1 → int and T2 → bool to substitution map
|
||||
[10]struct{ elem P, list []P } ≡<sub>A</sub> [10]struct{ elem string; list []string }
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
On the other hand, <code>[]map[int]bool</code> cannot be unified with any of
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
int // int is not a slice
|
||||
struct{} // a struct is not a slice
|
||||
[]struct{} // a struct is not a map
|
||||
[]map[T1]string // map element types don't match
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
As an exception to this general rule, because a <a href="#Type_definitions">defined type</a>
|
||||
<code>D</code> and a type literal <code>L</code> are never equivalent,
|
||||
unification compares the underlying type of <code>D</code> with <code>L</code> instead.
|
||||
For example, given the defined type
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
type Vector []float64
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
and the type literal <code>[]E</code>, unification compares <code>[]float64</code> with
|
||||
<code>[]E</code> and adds an entry <code>E</code> → <code>float64</code> to
|
||||
the substitution map.
|
||||
</p>
|
||||
|
||||
<h4 id="Function_argument_type_inference">Function argument type inference</h4>
|
||||
|
||||
<!-- In this section and the section on constraint type inference we start with examples
|
||||
rather than have the examples follow the rules as is customary elsewhere in spec.
|
||||
Hopefully this helps building an intuition and makes the rules easier to follow. -->
|
||||
|
||||
<p>
|
||||
Function argument type inference infers type arguments from function arguments:
|
||||
if a function parameter is declared with a type <code>T</code> that uses
|
||||
type parameters,
|
||||
<a href="#Type_unification">unifying</a> the type of the corresponding
|
||||
function argument with <code>T</code> may infer type arguments for the type
|
||||
parameters used by <code>T</code>.
|
||||
type inference starts with an empty map.
|
||||
Unification first compares the top-level structure of the LHS and RHS
|
||||
types.
|
||||
Both are arrays of the same length; they unify if the element types unify.
|
||||
Both element types are structs; they unify if they have
|
||||
the same number of fields with the same names and if the
|
||||
field types unify.
|
||||
The type argument for <code>P</code> is not known yet (there is no map entry),
|
||||
so unifying <code>P</code> with <code>string</code> adds
|
||||
the mapping <code>P ➞ string</code> to the map.
|
||||
Unifying the types of the <code>list</code> field requires
|
||||
unifying <code>[]P</code> and <code>[]string</code> and
|
||||
thus <code>P</code> and <code>string</code>.
|
||||
Since the type argument for <code>P</code> is known at this point
|
||||
(there is a map entry for <code>P</code>), its type argument
|
||||
<code>string</code> takes the place of <code>P</code>.
|
||||
And since <code>string</code> is identical to <code>string</code>,
|
||||
this unification step succeeds as well.
|
||||
Unification of the LHS and RHS of the equation is now finished.
|
||||
Type inference succeeds because there is only one type equation,
|
||||
no unification step failed, and the map is fully populated.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
For instance, given the generic function
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
func scale[Number ~int64|~float64|~complex128](v []Number, s Number) []Number
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
and the call
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
var vector []float64
|
||||
scaledVector := scale(vector, 42)
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
the type argument for <code>Number</code> can be inferred from the function argument
|
||||
<code>vector</code> by unifying the type of <code>vector</code> with the corresponding
|
||||
parameter type: <code>[]float64</code> and <code>[]Number</code>
|
||||
match in structure and <code>float64</code> matches with <code>Number</code>.
|
||||
This adds the entry <code>Number</code> → <code>float64</code> to the
|
||||
<a href="#Type_unification">substitution map</a>.
|
||||
Untyped arguments, such as the second function argument <code>42</code> here, are ignored
|
||||
in the first round of function argument type inference and only considered if there are
|
||||
unresolved type parameters left.
|
||||
Unification uses a combination of <i>exact</i> and <i>loose</i>
|
||||
unification depending on whether two types have to be
|
||||
<a href="#Type_identity">identical</a>,
|
||||
<a href="#Assignability">assignment-compatible</a>, or
|
||||
only structurally equal.
|
||||
The respective <a href="#Type_unification_rules">type unification rules</a>
|
||||
are spelled out in detail in the <a href="#Appendix">Appendix</a>.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Inference happens in two separate phases; each phase operates on a specific list of
|
||||
(parameter, argument) pairs:
|
||||
For an equation of the form <code>X ≡<sub>A</sub> Y</code>,
|
||||
where <code>X</code> and <code>Y</code> are types involved
|
||||
in an assignment (including parameter passing and return statements),
|
||||
the top-level type structures may unify loosely but element types
|
||||
must unify exactly, matching the rules for assignments.
|
||||
</p>
|
||||
|
||||
<ol>
|
||||
<p>
|
||||
For an equation of the form <code>P ≡<sub>C</sub> C</code>,
|
||||
where <code>P</code> is a type parameter and <code>C</code>
|
||||
its corresponding constraint, the unification rules are bit
|
||||
more complicated:
|
||||
</p>
|
||||
|
||||
<ul>
|
||||
<li>
|
||||
The list <i>Lt</i> contains all (parameter, argument) pairs where the parameter
|
||||
type uses type parameters and where the function argument is <i>typed</i>.
|
||||
If <code>C</code> has a <a href="#Core_types">core type</a>
|
||||
<code>core(C)</code>
|
||||
and <code>P</code> has a known type argument <code>A</code>,
|
||||
<code>core(C)</code> and <code>A</code> must unify loosely.
|
||||
If <code>P</code> does not have a known type argument
|
||||
and <code>C</code> contains exactly one type term <code>T</code>
|
||||
that is not an underlying (tilde) type, unification adds the
|
||||
mapping <code>P ➞ T</code> to the map.
|
||||
</li>
|
||||
<li>
|
||||
The list <i>Lu</i> contains all remaining pairs where the parameter type is a single
|
||||
type parameter. In this list, the respective function arguments are untyped.
|
||||
If <code>C</code> does not have a core type
|
||||
and <code>P</code> has a known type argument <code>A</code>,
|
||||
<code>A</code> must have all methods of <code>C</code>, if any,
|
||||
and corresponding method types must unify exactly.
|
||||
</li>
|
||||
</ol>
|
||||
</ul>
|
||||
|
||||
<p>
|
||||
Any other (parameter, argument) pair is ignored.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
By construction, the arguments of the pairs in <i>Lu</i> are <i>untyped</i> constants
|
||||
(or the untyped boolean result of a comparison). And because <a href="#Constants">default types</a>
|
||||
of untyped values are always predeclared non-composite types, they can never match against
|
||||
a composite type, so it is sufficient to only consider parameter types that are single type
|
||||
parameters.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Each list is processed in a separate phase:
|
||||
</p>
|
||||
|
||||
<ol>
|
||||
<li>
|
||||
In the first phase, the parameter and argument types of each pair in <i>Lt</i>
|
||||
are unified. If unification succeeds for a pair, it may yield new entries that
|
||||
are added to the substitution map <i>M</i>. If unification fails, type inference
|
||||
fails.
|
||||
</li>
|
||||
<li>
|
||||
The second phase considers the entries of list <i>Lu</i>. Type parameters for
|
||||
which the type argument has already been determined are ignored in this phase.
|
||||
For each remaining pair, the parameter type (which is a single type parameter) and
|
||||
the <a href="#Constants">default type</a> of the corresponding untyped argument is
|
||||
unified. If unification fails, type inference fails.
|
||||
</li>
|
||||
</ol>
|
||||
|
||||
<p>
|
||||
While unification is successful, processing of each list continues until all list elements
|
||||
are considered, even if all type arguments are inferred before the last list element has
|
||||
been processed.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Example:
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
func min[T ~int|~float64](x, y T) T
|
||||
|
||||
var x int
|
||||
min(x, 2.0) // T is int, inferred from typed argument x; 2.0 is assignable to int
|
||||
min(1.0, 2.0) // T is float64, inferred from default type for 1.0 and matches default type for 2.0
|
||||
min(1.0, 2) // illegal: default type float64 (for 1.0) doesn't match default type int (for 2)
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
In the example <code>min(1.0, 2)</code>, processing the function argument <code>1.0</code>
|
||||
yields the substitution map entry <code>T</code> → <code>float64</code>. Because
|
||||
processing continues until all untyped arguments are considered, an error is reported. This
|
||||
ensures that type inference does not depend on the order of the untyped arguments.
|
||||
</p>
|
||||
|
||||
<h4 id="Constraint_type_inference">Constraint type inference</h4>
|
||||
|
||||
<p>
|
||||
Constraint type inference infers type arguments by considering type constraints.
|
||||
If a type parameter <code>P</code> has a constraint with a
|
||||
<a href="#Core_types">core type</a> <code>C</code>,
|
||||
<a href="#Type_unification">unifying</a> <code>P</code> with <code>C</code>
|
||||
may infer additional type arguments, either the type argument for <code>P</code>,
|
||||
or if that is already known, possibly the type arguments for type parameters
|
||||
used in <code>C</code>.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
For instance, consider the type parameter list with type parameters <code>List</code> and
|
||||
<code>Elem</code>:
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
[List ~[]Elem, Elem any]
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
Constraint type inference can deduce the type of <code>Elem</code> from the type argument
|
||||
for <code>List</code> because <code>Elem</code> is a type parameter in the core type
|
||||
<code>[]Elem</code> of <code>List</code>.
|
||||
If the type argument is <code>Bytes</code>:
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
type Bytes []byte
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
unifying the underlying type of <code>Bytes</code> with the core type means
|
||||
unifying <code>[]byte</code> with <code>[]Elem</code>. That unification succeeds and yields
|
||||
the <a href="#Type_unification">substitution map</a> entry
|
||||
<code>Elem</code> → <code>byte</code>.
|
||||
Thus, in this example, constraint type inference can infer the second type argument from the
|
||||
first one.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Using the core type of a constraint may lose some information: In the (unlikely) case that
|
||||
the constraint's type set contains a single <a href="#Type_definitions">defined type</a>
|
||||
<code>N</code>, the corresponding core type is <code>N</code>'s underlying type rather than
|
||||
<code>N</code> itself. In this case, constraint type inference may succeed but instantiation
|
||||
will fail because the inferred type is not in the type set of the constraint.
|
||||
Thus, constraint type inference uses the <i>adjusted core type</i> of
|
||||
a constraint: if the type set contains a single type, use that type; otherwise use the
|
||||
constraint's core type.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Generally, constraint type inference proceeds in two phases: Starting with a given
|
||||
substitution map <i>M</i>
|
||||
</p>
|
||||
|
||||
<ol>
|
||||
<li>
|
||||
For all type parameters with an adjusted core type, unify the type parameter with that
|
||||
type. If any unification fails, constraint type inference fails.
|
||||
</li>
|
||||
|
||||
<li>
|
||||
At this point, some entries in <i>M</i> may map type parameters to other
|
||||
type parameters or to types containing type parameters. For each entry
|
||||
<code>P</code> → <code>A</code> in <i>M</i> where <code>A</code> is or
|
||||
contains type parameters <code>Q</code> for which there exist entries
|
||||
<code>Q</code> → <code>B</code> in <i>M</i>, substitute those
|
||||
<code>Q</code> with the respective <code>B</code> in <code>A</code>.
|
||||
Stop when no further substitution is possible.
|
||||
</li>
|
||||
</ol>
|
||||
|
||||
<p>
|
||||
The result of constraint type inference is the final substitution map <i>M</i> from type
|
||||
parameters <code>P</code> to type arguments <code>A</code> where no type parameter <code>P</code>
|
||||
appears in any of the <code>A</code>.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
For instance, given the type parameter list
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
[A any, B []C, C *A]
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
and the single provided type argument <code>int</code> for type parameter <code>A</code>,
|
||||
the initial substitution map <i>M</i> contains the entry <code>A</code> → <code>int</code>.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
In the first phase, the type parameters <code>B</code> and <code>C</code> are unified
|
||||
with the core type of their respective constraints. This adds the entries
|
||||
<code>B</code> → <code>[]C</code> and <code>C</code> → <code>*A</code>
|
||||
to <i>M</i>.
|
||||
|
||||
<p>
|
||||
At this point there are two entries in <i>M</i> where the right-hand side
|
||||
is or contains type parameters for which there exists other entries in <i>M</i>:
|
||||
<code>[]C</code> and <code>*A</code>.
|
||||
In the second phase, these type parameters are replaced with their respective
|
||||
types. It doesn't matter in which order this happens. Starting with the state
|
||||
of <i>M</i> after the first phase:
|
||||
</p>
|
||||
|
||||
<p>
|
||||
<code>A</code> → <code>int</code>,
|
||||
<code>B</code> → <code>[]C</code>,
|
||||
<code>C</code> → <code>*A</code>
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Replace <code>A</code> on the right-hand side of → with <code>int</code>:
|
||||
</p>
|
||||
|
||||
<p>
|
||||
<code>A</code> → <code>int</code>,
|
||||
<code>B</code> → <code>[]C</code>,
|
||||
<code>C</code> → <code>*int</code>
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Replace <code>C</code> on the right-hand side of → with <code>*int</code>:
|
||||
</p>
|
||||
|
||||
<p>
|
||||
<code>A</code> → <code>int</code>,
|
||||
<code>B</code> → <code>[]*int</code>,
|
||||
<code>C</code> → <code>*int</code>
|
||||
</p>
|
||||
|
||||
<p>
|
||||
At this point no further substitution is possible and the map is full.
|
||||
Therefore, <code>M</code> represents the final map of type parameters
|
||||
to type arguments for the given type parameter list.
|
||||
When solving type equations from type constraints,
|
||||
solving one equation may infer additional type arguments,
|
||||
which in turn may enable solving other equations that depend
|
||||
on those type arguments.
|
||||
Type inference repeats type unification as long as new type
|
||||
arguments are inferred.
|
||||
</p>
|
||||
|
||||
<h3 id="Operators">Operators</h3>
|
||||
@@ -5479,7 +5400,7 @@ in any of these cases:
|
||||
ignoring struct tags (see below),
|
||||
<code>x</code>'s type and <code>T</code> are not
|
||||
<a href="#Type_parameter_declarations">type parameters</a> but have
|
||||
<a href="#Type_identity">identical</a> <a href="#Types">underlying types</a>.
|
||||
<a href="#Type_identity">identical</a> <a href="#Underlying_types">underlying types</a>.
|
||||
</li>
|
||||
<li>
|
||||
ignoring struct tags (see below),
|
||||
@@ -7324,7 +7245,8 @@ clear(t) type parameter see below
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
If the argument type is a <a href="#Type_parameter_declarations">type parameter</a>,
|
||||
If the type of the argument to <code>clear</code> is a
|
||||
<a href="#Type_parameter_declarations">type parameter</a>,
|
||||
all types in its type set must be maps or slices, and <code>clear</code>
|
||||
performs the operation corresponding to the actual type argument.
|
||||
</p>
|
||||
@@ -8290,7 +8212,7 @@ of if the general conversion rules take care of this.
|
||||
<p>
|
||||
A <code>Pointer</code> is a <a href="#Pointer_types">pointer type</a> but a <code>Pointer</code>
|
||||
value may not be <a href="#Address_operators">dereferenced</a>.
|
||||
Any pointer or value of <a href="#Types">underlying type</a> <code>uintptr</code> can be
|
||||
Any pointer or value of <a href="#Underlying_types">underlying type</a> <code>uintptr</code> can be
|
||||
<a href="#Conversions">converted</a> to a type of underlying type <code>Pointer</code> and vice versa.
|
||||
The effect of converting between <code>Pointer</code> and <code>uintptr</code> is implementation-defined.
|
||||
</p>
|
||||
@@ -8438,3 +8360,145 @@ The following minimal alignment properties are guaranteed:
|
||||
<p>
|
||||
A struct or array type has size zero if it contains no fields (or elements, respectively) that have a size greater than zero. Two distinct zero-size variables may have the same address in memory.
|
||||
</p>
|
||||
|
||||
<h2 id="Appendix">Appendix</h2>
|
||||
|
||||
<h3 id="Type_unification_rules">Type unification rules</h3>
|
||||
|
||||
<p>
|
||||
The type unification rules describe if and how two types unify.
|
||||
The precise details are relevant for Go implementations,
|
||||
affect the specifics of error messages (such as whether
|
||||
a compiler reports a type inference or other error),
|
||||
and may explain why type inference fails in unusual code situations.
|
||||
But by and large these rules can be ignored when writing Go code:
|
||||
type inference is designed to mostly "work as expected",
|
||||
and the unification rules are fine-tuned accordingly.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Type unification is controlled by a <i>matching mode</i>, which may
|
||||
be <i>exact</i> or <i>loose</i>.
|
||||
As unification recursively descends a composite type structure,
|
||||
the matching mode used for elements of the type, the <i>element matching mode</i>,
|
||||
remains the same as the matching mode except when two types are unified for
|
||||
<a href="#Assignability">assignability</a> (<code>≡<sub>A</sub></code>):
|
||||
in this case, the matching mode is <i>loose</i> at the top level but
|
||||
then changes to <i>exact</i> for element types, reflecting the fact
|
||||
that types don't have to be identical to be assignable.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Two types that are not bound type parameters unify exactly if any of
|
||||
following conditions is true:
|
||||
</p>
|
||||
|
||||
<ul>
|
||||
<li>
|
||||
Both types are <a href="#Type_identity">identical</a>.
|
||||
</li>
|
||||
<li>
|
||||
Both types have identical structure and their element types
|
||||
unify exactly.
|
||||
</li>
|
||||
<li>
|
||||
Exactly one type is an <a href="#Type_inference">unbound</a>
|
||||
type parameter with a <a href="#Core_types">core type</a>,
|
||||
and that core type unifies with the other type per the
|
||||
unification rules for <code>≡<sub>A</sub></code>
|
||||
(loose unification at the top level and exact unification
|
||||
for element types).
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<p>
|
||||
If both types are bound type parameters, they unify per the given
|
||||
matching modes if:
|
||||
</p>
|
||||
|
||||
<ul>
|
||||
<li>
|
||||
Both type parameters are identical.
|
||||
</li>
|
||||
<li>
|
||||
At most one of the type parameters has a known type argument.
|
||||
In this case, the type parameters are <i>joined</i>:
|
||||
they both stand for the same type argument.
|
||||
If neither type parameter has a known type argument yet,
|
||||
a future type argument inferred for one the type parameters
|
||||
is simultaneously inferred for both of them.
|
||||
</li>
|
||||
<li>
|
||||
Both type parameters have a known type argument
|
||||
and the type arguments unify per the given matching modes.
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<p>
|
||||
A single bound type parameter <code>P</code> and another type <code>T</code> unify
|
||||
per the given matching modes if:
|
||||
</p>
|
||||
|
||||
<ul>
|
||||
<li>
|
||||
<code>P</code> doesn't have a known type argument.
|
||||
In this case, <code>T</code> is inferred as the type argument for <code>P</code>.
|
||||
</li>
|
||||
<li>
|
||||
<code>P</code> does have a known type argument <code>A</code>,
|
||||
<code>A</code> and <code>T</code> unify per the given matching modes,
|
||||
and one of the following conditions is true:
|
||||
<ul>
|
||||
<li>
|
||||
Both <code>A</code> and <code>T</code> are interface types:
|
||||
In this case, if both <code>A</code> and <code>T</code> are
|
||||
also <a href="#Type_definitions">defined</a> types,
|
||||
they must be <a href="#Type_identity">identical</a>.
|
||||
Otherwise, if neither of them is a defined type, they must
|
||||
have the same number of methods
|
||||
(unification of <code>A</code> and <code>T</code> already
|
||||
established that the methods match).
|
||||
</li>
|
||||
<li>
|
||||
Neither <code>A</code> nor <code>T</code> are interface types:
|
||||
In this case, if <code>T</code> is a defined type, <code>T</code>
|
||||
replaces <code>A</code> as the inferred type argument for <code>P</code>.
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<p>
|
||||
Finally, two types that are not bound type parameters unify loosely
|
||||
(and per the element matching mode) if:
|
||||
</p>
|
||||
|
||||
<ul>
|
||||
<li>
|
||||
Both types unify exactly.
|
||||
</li>
|
||||
<li>
|
||||
One type is a <a href="#Type_definitions">defined type</a>,
|
||||
the other type is a type literal, but not an interface,
|
||||
and their underlying types unify per the element matching mode.
|
||||
</li>
|
||||
<li>
|
||||
Both types are interfaces (but not type parameters) with
|
||||
identical <a href="#Interface_types">type terms</a>,
|
||||
both or neither embed the predeclared type
|
||||
<a href="#Predeclared_identifiers">comparable</a>,
|
||||
corresponding method types unify per the element matching mode,
|
||||
and the method set of one of the interfaces is a subset of
|
||||
the method set of the other interface.
|
||||
</li>
|
||||
<li>
|
||||
Only one type is an interface (but not a type parameter),
|
||||
corresponding methods of the two types unify per the element matching mode,
|
||||
and the method set of the interface is a subset of
|
||||
the method set of the other type.
|
||||
</li>
|
||||
<li>
|
||||
Both types have the same structure and their element types
|
||||
unify per the element matching mode.
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -126,6 +126,14 @@ for example,
|
||||
see the [runtime documentation](/pkg/runtime#hdr-Environment_Variables)
|
||||
and the [go command documentation](/cmd/go#hdr-Build_and_test_caching).
|
||||
|
||||
### Go 1.22
|
||||
|
||||
Go 1.22 adds a configurable limit to control the maximum acceptable RSA key size
|
||||
that can be used in TLS handshakes, controlled by the [`tlsmaxrsasize`setting](/pkg/crypto/tls#Conn.Handshake).
|
||||
The default is tlsmaxrsasize=8192, limiting RSA to 8192-bit keys. To avoid
|
||||
denial of service attacks, this setting and default was backported to Go
|
||||
1.19.13, Go 1.20.8, and Go 1.21.1.
|
||||
|
||||
### Go 1.21
|
||||
|
||||
Go 1.21 made it a run-time error to call `panic` with a nil interface value,
|
||||
@@ -142,6 +150,10 @@ forms, controlled by the
|
||||
respectively.
|
||||
This behavior was backported to Go 1.19.8+ and Go 1.20.3+.
|
||||
|
||||
Go 1.21 adds the support of Multipath TCP but it is only used if the application
|
||||
explicitly asked for it. This behavior can be controlled by the
|
||||
[`multipathtcp` setting](/pkg/net#Dialer.SetMultipathTCP).
|
||||
|
||||
There is no plan to remove any of these settings.
|
||||
|
||||
### Go 1.20
|
||||
|
||||
@@ -10,12 +10,12 @@ case "$GOWASIRUNTIME" in
|
||||
"wasmer")
|
||||
exec wasmer run --dir=/ --env PWD="$PWD" --env PATH="$PATH" ${GOWASIRUNTIMEARGS:-} "$1" -- "${@:2}"
|
||||
;;
|
||||
"wasmtime")
|
||||
exec wasmtime run --dir=/ --env PWD="$PWD" --env PATH="$PATH" --max-wasm-stack 1048576 ${GOWASIRUNTIMEARGS:-} "$1" -- "${@:2}"
|
||||
;;
|
||||
"wazero" | "")
|
||||
"wazero")
|
||||
exec wazero run -mount /:/ -env-inherit -cachedir "${TMPDIR:-/tmp}"/wazero ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}"
|
||||
;;
|
||||
"wasmtime" | "")
|
||||
exec wasmtime run --dir=/ --env PWD="$PWD" --env PATH="$PATH" --max-wasm-stack 1048576 ${GOWASIRUNTIMEARGS:-} "$1" -- "${@:2}"
|
||||
;;
|
||||
*)
|
||||
echo "Unknown Go WASI runtime specified: $GOWASIRUNTIME"
|
||||
exit 1
|
||||
|
||||
@@ -38,7 +38,7 @@ The vendor directory may be updated with 'go mod vendor'.
|
||||
A typical sequence might be:
|
||||
|
||||
cd src
|
||||
go get golang.org/x/net@latest
|
||||
go get golang.org/x/net@master
|
||||
go mod tidy
|
||||
go mod vendor
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ package bytes_test
|
||||
import (
|
||||
. "bytes"
|
||||
"fmt"
|
||||
"internal/testenv"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -73,7 +72,7 @@ func TestCompareBytes(t *testing.T) {
|
||||
}
|
||||
lengths = append(lengths, 256, 512, 1024, 1333, 4095, 4096, 4097)
|
||||
|
||||
if !testing.Short() || testenv.Builder() != "" {
|
||||
if !testing.Short() {
|
||||
lengths = append(lengths, 65535, 65536, 65537, 99999)
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package api
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
//go:build boringcrypto
|
||||
|
||||
package api
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
@@ -2,9 +2,10 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package api computes the exported API of a set of Go packages.
|
||||
// This package computes the exported API of a set of Go packages.
|
||||
// It is only a test, not a command, nor a usefully importable package.
|
||||
package api
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
40
src/cmd/asm/internal/asm/testdata/riscv64.s
vendored
40
src/cmd/asm/internal/asm/testdata/riscv64.s
vendored
@@ -183,28 +183,28 @@ start:
|
||||
// 8.2: Load-Reserved/Store-Conditional
|
||||
LRW (X5), X6 // 2fa30214
|
||||
LRD (X5), X6 // 2fb30214
|
||||
SCW X5, (X6), X7 // af23531c
|
||||
SCD X5, (X6), X7 // af33531c
|
||||
SCW X5, (X6), X7 // af23531a
|
||||
SCD X5, (X6), X7 // af33531a
|
||||
|
||||
// 8.3: Atomic Memory Operations
|
||||
AMOSWAPW X5, (X6), X7 // af23530c
|
||||
AMOSWAPD X5, (X6), X7 // af33530c
|
||||
AMOADDW X5, (X6), X7 // af235304
|
||||
AMOADDD X5, (X6), X7 // af335304
|
||||
AMOANDW X5, (X6), X7 // af235364
|
||||
AMOANDD X5, (X6), X7 // af335364
|
||||
AMOORW X5, (X6), X7 // af235344
|
||||
AMOORD X5, (X6), X7 // af335344
|
||||
AMOXORW X5, (X6), X7 // af235324
|
||||
AMOXORD X5, (X6), X7 // af335324
|
||||
AMOMAXW X5, (X6), X7 // af2353a4
|
||||
AMOMAXD X5, (X6), X7 // af3353a4
|
||||
AMOMAXUW X5, (X6), X7 // af2353e4
|
||||
AMOMAXUD X5, (X6), X7 // af3353e4
|
||||
AMOMINW X5, (X6), X7 // af235384
|
||||
AMOMIND X5, (X6), X7 // af335384
|
||||
AMOMINUW X5, (X6), X7 // af2353c4
|
||||
AMOMINUD X5, (X6), X7 // af3353c4
|
||||
AMOSWAPW X5, (X6), X7 // af23530e
|
||||
AMOSWAPD X5, (X6), X7 // af33530e
|
||||
AMOADDW X5, (X6), X7 // af235306
|
||||
AMOADDD X5, (X6), X7 // af335306
|
||||
AMOANDW X5, (X6), X7 // af235366
|
||||
AMOANDD X5, (X6), X7 // af335366
|
||||
AMOORW X5, (X6), X7 // af235346
|
||||
AMOORD X5, (X6), X7 // af335346
|
||||
AMOXORW X5, (X6), X7 // af235326
|
||||
AMOXORD X5, (X6), X7 // af335326
|
||||
AMOMAXW X5, (X6), X7 // af2353a6
|
||||
AMOMAXD X5, (X6), X7 // af3353a6
|
||||
AMOMAXUW X5, (X6), X7 // af2353e6
|
||||
AMOMAXUD X5, (X6), X7 // af3353e6
|
||||
AMOMINW X5, (X6), X7 // af235386
|
||||
AMOMIND X5, (X6), X7 // af335386
|
||||
AMOMINUW X5, (X6), X7 // af2353c6
|
||||
AMOMINUD X5, (X6), X7 // af3353c6
|
||||
|
||||
// 10.1: Base Counters and Timers
|
||||
RDCYCLE X5 // f32200c0
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package cgotest
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lm
|
||||
#cgo !darwin LDFLAGS: -lm
|
||||
#include <math.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package issue8756
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lm
|
||||
#cgo !darwin LDFLAGS: -lm
|
||||
#include <math.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
@@ -23,7 +23,7 @@ package cgotest
|
||||
#include <unistd.h>
|
||||
#include <sys/stat.h>
|
||||
#include <errno.h>
|
||||
#cgo LDFLAGS: -lm
|
||||
#cgo !darwin LDFLAGS: -lm
|
||||
|
||||
#ifndef WIN32
|
||||
#include <pthread.h>
|
||||
|
||||
@@ -111,6 +111,7 @@ func TestReportsTypeErrors(t *testing.T) {
|
||||
for _, file := range []string{
|
||||
"err1.go",
|
||||
"err2.go",
|
||||
"err5.go",
|
||||
"issue11097a.go",
|
||||
"issue11097b.go",
|
||||
"issue18452.go",
|
||||
|
||||
10
src/cmd/cgo/internal/testerrors/testdata/err5.go
vendored
Normal file
10
src/cmd/cgo/internal/testerrors/testdata/err5.go
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
//line /tmp/_cgo_.go:1
|
||||
//go:cgo_dynamic_linker "/elf/interp" // ERROR HERE: only allowed in cgo-generated code
|
||||
|
||||
func main() {}
|
||||
@@ -389,9 +389,18 @@ func TestForkExec(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGeneric(t *testing.T) {
|
||||
func TestSymbolNameMangle(t *testing.T) {
|
||||
// Issue 58800: generic function name may contain weird characters
|
||||
// that confuse the external linker.
|
||||
// Issue 62098: the name mangling code doesn't handle some string
|
||||
// symbols correctly.
|
||||
globalSkip(t)
|
||||
goCmd(t, "build", "-buildmode=plugin", "-o", "generic.so", "./generic/plugin.go")
|
||||
goCmd(t, "build", "-buildmode=plugin", "-o", "mangle.so", "./mangle/plugin.go")
|
||||
}
|
||||
|
||||
func TestIssue62430(t *testing.T) {
|
||||
globalSkip(t)
|
||||
goCmd(t, "build", "-buildmode=plugin", "-o", "issue62430.so", "./issue62430/plugin.go")
|
||||
goCmd(t, "build", "-o", "issue62430.exe", "./issue62430/main.go")
|
||||
run(t, "./issue62430.exe")
|
||||
}
|
||||
|
||||
35
src/cmd/cgo/internal/testplugin/testdata/issue62430/main.go
vendored
Normal file
35
src/cmd/cgo/internal/testplugin/testdata/issue62430/main.go
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Issue 62430: a program that uses plugins may appear
|
||||
// to have no references to an initialized global map variable defined
|
||||
// in some stdlib package (ex: unicode), however there
|
||||
// may be references to that map var from a plugin that
|
||||
// gets loaded.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"plugin"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
func main() {
|
||||
p, err := plugin.Open("issue62430.so")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
s, err := p.Lookup("F")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
f := s.(func(string) *unicode.RangeTable)
|
||||
if f("C") == nil {
|
||||
panic("unicode.Categories not properly initialized")
|
||||
} else {
|
||||
fmt.Println("unicode.Categories properly initialized")
|
||||
}
|
||||
}
|
||||
11
src/cmd/cgo/internal/testplugin/testdata/issue62430/plugin.go
vendored
Normal file
11
src/cmd/cgo/internal/testplugin/testdata/issue62430/plugin.go
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"unicode"
|
||||
)
|
||||
|
||||
func F(s string) *unicode.RangeTable {
|
||||
return unicode.Categories[s]
|
||||
}
|
||||
|
||||
func main() {}
|
||||
@@ -2,21 +2,37 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Instantiated function name may contain weird characters
|
||||
// that confuse the external linker, so it needs to be
|
||||
// mangled.
|
||||
// Test cases for symbol name mangling.
|
||||
|
||||
package main
|
||||
|
||||
//go:noinline
|
||||
func F[T any]() {}
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Issue 58800:
|
||||
// Instantiated function name may contain weird characters
|
||||
// that confuse the external linker, so it needs to be
|
||||
// mangled.
|
||||
type S struct {
|
||||
X int `parser:"|@@)"`
|
||||
}
|
||||
|
||||
//go:noinline
|
||||
func F[T any]() {}
|
||||
|
||||
func P() {
|
||||
F[S]()
|
||||
}
|
||||
|
||||
// Issue 62098: the name mangling code doesn't handle some string
|
||||
// symbols correctly.
|
||||
func G(id string) error {
|
||||
if strings.ContainsAny(id, "&$@;/:+,?\\{^}%`]\">[~<#|") {
|
||||
return fmt.Errorf("invalid")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {}
|
||||
@@ -333,8 +333,14 @@ func (p *noder) pragma(pos syntax.Pos, blankLine bool, text string, old syntax.P
|
||||
// contain cgo directives, and for security reasons
|
||||
// (primarily misuse of linker flags), other files are not.
|
||||
// See golang.org/issue/23672.
|
||||
// Note that cmd/go ignores files whose names start with underscore,
|
||||
// so the only _cgo_ files we will see from cmd/go are generated by cgo.
|
||||
// It's easy to bypass this check by calling the compiler directly;
|
||||
// we only protect against uses by cmd/go.
|
||||
func isCgoGeneratedFile(pos syntax.Pos) bool {
|
||||
return strings.HasPrefix(filepath.Base(trimFilename(pos.Base())), "_cgo_")
|
||||
// We need the absolute file, independent of //line directives,
|
||||
// so we call pos.Base().Pos().
|
||||
return strings.HasPrefix(filepath.Base(trimFilename(pos.Base().Pos().Base())), "_cgo_")
|
||||
}
|
||||
|
||||
// safeArg reports whether arg is a "safe" command-line argument,
|
||||
|
||||
@@ -1552,33 +1552,27 @@
|
||||
(GreaterEqualU (FlagConstant [fc])) => (MOVDconst [b2i(fc.uge())])
|
||||
|
||||
// absorb InvertFlags into boolean values
|
||||
(Equal (InvertFlags x)) => (Equal x)
|
||||
(NotEqual (InvertFlags x)) => (NotEqual x)
|
||||
(LessThan (InvertFlags x)) => (GreaterThan x)
|
||||
(LessThanU (InvertFlags x)) => (GreaterThanU x)
|
||||
(GreaterThan (InvertFlags x)) => (LessThan x)
|
||||
(GreaterThanU (InvertFlags x)) => (LessThanU x)
|
||||
(LessEqual (InvertFlags x)) => (GreaterEqual x)
|
||||
(LessEqualU (InvertFlags x)) => (GreaterEqualU x)
|
||||
(GreaterEqual (InvertFlags x)) => (LessEqual x)
|
||||
(GreaterEqualU (InvertFlags x)) => (LessEqualU x)
|
||||
(LessThanF (InvertFlags x)) => (GreaterThanF x)
|
||||
(LessEqualF (InvertFlags x)) => (GreaterEqualF x)
|
||||
(GreaterThanF (InvertFlags x)) => (LessThanF x)
|
||||
(GreaterEqualF (InvertFlags x)) => (LessEqualF x)
|
||||
(Equal (InvertFlags x)) => (Equal x)
|
||||
(NotEqual (InvertFlags x)) => (NotEqual x)
|
||||
(LessThan (InvertFlags x)) => (GreaterThan x)
|
||||
(LessThanU (InvertFlags x)) => (GreaterThanU x)
|
||||
(GreaterThan (InvertFlags x)) => (LessThan x)
|
||||
(GreaterThanU (InvertFlags x)) => (LessThanU x)
|
||||
(LessEqual (InvertFlags x)) => (GreaterEqual x)
|
||||
(LessEqualU (InvertFlags x)) => (GreaterEqualU x)
|
||||
(GreaterEqual (InvertFlags x)) => (LessEqual x)
|
||||
(GreaterEqualU (InvertFlags x)) => (LessEqualU x)
|
||||
(LessThanF (InvertFlags x)) => (GreaterThanF x)
|
||||
(LessEqualF (InvertFlags x)) => (GreaterEqualF x)
|
||||
(GreaterThanF (InvertFlags x)) => (LessThanF x)
|
||||
(GreaterEqualF (InvertFlags x)) => (LessEqualF x)
|
||||
(LessThanNoov (InvertFlags x)) => (BIC (GreaterEqualNoov <typ.Bool> x) (Equal <typ.Bool> x))
|
||||
(GreaterEqualNoov (InvertFlags x)) => (OR (LessThanNoov <typ.Bool> x) (Equal <typ.Bool> x))
|
||||
|
||||
// Boolean-generating instructions (NOTE: NOT all boolean Values) always
|
||||
// zero upper bit of the register; no need to zero-extend
|
||||
(MOVBUreg x:((Equal|NotEqual|LessThan|LessThanU|LessThanF|LessEqual|LessEqualU|LessEqualF|GreaterThan|GreaterThanU|GreaterThanF|GreaterEqual|GreaterEqualU|GreaterEqualF) _)) => (MOVDreg x)
|
||||
|
||||
// omit unsign extension
|
||||
(MOVWUreg x) && zeroUpper32Bits(x, 3) => x
|
||||
|
||||
// omit sign extension
|
||||
(MOVWreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffff80000000) == 0 => (ANDconst <t> x [c])
|
||||
(MOVHreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffffffff8000) == 0 => (ANDconst <t> x [c])
|
||||
(MOVBreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffffffffff80) == 0 => (ANDconst <t> x [c])
|
||||
|
||||
// absorb flag constants into conditional instructions
|
||||
(CSEL [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
|
||||
(CSEL [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => y
|
||||
|
||||
@@ -13,7 +13,6 @@ import "strings"
|
||||
// - *const instructions may use a constant larger than the instruction can encode.
|
||||
// In this case the assembler expands to multiple instructions and uses tmp
|
||||
// register (R27).
|
||||
// - All 32-bit Ops will zero the upper 32 bits of the destination register.
|
||||
|
||||
// Suffixes encode the bit width of various instructions.
|
||||
// D (double word) = 64 bit
|
||||
|
||||
@@ -588,16 +588,16 @@
|
||||
|
||||
// small and of zero-extend => either zero-extend or small and
|
||||
(Select0 (ANDCCconst [c] y:(MOVBZreg _))) && c&0xFF == 0xFF => y
|
||||
(Select0 (ANDCCconst [0xFF] y:(MOVBreg _))) => y
|
||||
(Select0 (ANDCCconst [0xFF] (MOVBreg x))) => (MOVBZreg x)
|
||||
(Select0 (ANDCCconst [c] y:(MOVHZreg _))) && c&0xFFFF == 0xFFFF => y
|
||||
(Select0 (ANDCCconst [0xFFFF] y:(MOVHreg _))) => y
|
||||
(Select0 (ANDCCconst [0xFFFF] (MOVHreg x))) => (MOVHZreg x)
|
||||
|
||||
(AND (MOVDconst [c]) y:(MOVWZreg _)) && c&0xFFFFFFFF == 0xFFFFFFFF => y
|
||||
(AND (MOVDconst [0xFFFFFFFF]) y:(MOVWreg x)) => (MOVWZreg x)
|
||||
// normal case
|
||||
(Select0 (ANDCCconst [c] (MOV(B|BZ)reg x))) => (Select0 (ANDCCconst [c&0xFF] x))
|
||||
(Select0 (ANDCCconst [c] (MOV(H|HZ)reg x))) => (Select0 (ANDCCconst [c&0xFFFF] x))
|
||||
(Select0 (ANDCCconst [c] (MOV(W|WZ)reg x))) => (Select0 (ANDCCconst [c&0xFFFFFFFF] x))
|
||||
(Select0 (ANDCCconst [c] (MOVBZreg x))) => (Select0 (ANDCCconst [c&0xFF] x))
|
||||
(Select0 (ANDCCconst [c] (MOVHZreg x))) => (Select0 (ANDCCconst [c&0xFFFF] x))
|
||||
(Select0 (ANDCCconst [c] (MOVWZreg x))) => (Select0 (ANDCCconst [c&0xFFFFFFFF] x))
|
||||
|
||||
// Eliminate unnecessary sign/zero extend following right shift
|
||||
(MOV(B|H|W)Zreg (SRWconst [c] (MOVBZreg x))) => (SRWconst [c] (MOVBZreg x))
|
||||
|
||||
@@ -855,7 +855,7 @@ func storeOneArg(x *expandState, pos src.XPos, b *Block, locs []*LocalSlot, suff
|
||||
// storeOneLoad creates a decomposed (one step) load that is then stored.
|
||||
func storeOneLoad(x *expandState, pos src.XPos, b *Block, source, mem *Value, t *types.Type, offArg, offStore int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value {
|
||||
from := x.offsetFrom(source.Block, source.Args[0], offArg, types.NewPtr(t))
|
||||
w := source.Block.NewValue2(source.Pos, OpLoad, t, from, mem)
|
||||
w := b.NewValue2(source.Pos, OpLoad, t, from, mem)
|
||||
return x.storeArgOrLoad(pos, b, w, mem, t, offStore, loadRegOffset, storeRc)
|
||||
}
|
||||
|
||||
@@ -962,7 +962,7 @@ func (x *expandState) storeArgOrLoad(pos src.XPos, b *Block, source, mem *Value,
|
||||
eltRO := x.regWidth(elt)
|
||||
source.Type = t
|
||||
for i := int64(0); i < t.NumElem(); i++ {
|
||||
sel := source.Block.NewValue1I(pos, OpArraySelect, elt, i, source)
|
||||
sel := b.NewValue1I(pos, OpArraySelect, elt, i, source)
|
||||
mem = x.storeArgOrLoad(pos, b, sel, mem, elt, storeOffset+i*elt.Size(), loadRegOffset, storeRc.at(t, 0))
|
||||
loadRegOffset += eltRO
|
||||
pos = pos.WithNotStmt()
|
||||
@@ -997,7 +997,7 @@ func (x *expandState) storeArgOrLoad(pos src.XPos, b *Block, source, mem *Value,
|
||||
source.Type = t
|
||||
for i := 0; i < t.NumFields(); i++ {
|
||||
fld := t.Field(i)
|
||||
sel := source.Block.NewValue1I(pos, OpStructSelect, fld.Type, int64(i), source)
|
||||
sel := b.NewValue1I(pos, OpStructSelect, fld.Type, int64(i), source)
|
||||
mem = x.storeArgOrLoad(pos, b, sel, mem, fld.Type, storeOffset+fld.Offset, loadRegOffset, storeRc.next(fld.Type))
|
||||
loadRegOffset += x.regWidth(fld.Type)
|
||||
pos = pos.WithNotStmt()
|
||||
@@ -1009,48 +1009,48 @@ func (x *expandState) storeArgOrLoad(pos src.XPos, b *Block, source, mem *Value,
|
||||
break
|
||||
}
|
||||
tHi, tLo := x.intPairTypes(t.Kind())
|
||||
sel := source.Block.NewValue1(pos, OpInt64Hi, tHi, source)
|
||||
sel := b.NewValue1(pos, OpInt64Hi, tHi, source)
|
||||
mem = x.storeArgOrLoad(pos, b, sel, mem, tHi, storeOffset+x.hiOffset, loadRegOffset+x.hiRo, storeRc.plus(x.hiRo))
|
||||
pos = pos.WithNotStmt()
|
||||
sel = source.Block.NewValue1(pos, OpInt64Lo, tLo, source)
|
||||
sel = b.NewValue1(pos, OpInt64Lo, tLo, source)
|
||||
return x.storeArgOrLoad(pos, b, sel, mem, tLo, storeOffset+x.lowOffset, loadRegOffset+x.loRo, storeRc.plus(x.hiRo))
|
||||
|
||||
case types.TINTER:
|
||||
sel := source.Block.NewValue1(pos, OpITab, x.typs.BytePtr, source)
|
||||
sel := b.NewValue1(pos, OpITab, x.typs.BytePtr, source)
|
||||
mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.BytePtr, storeOffset, loadRegOffset, storeRc.next(x.typs.BytePtr))
|
||||
pos = pos.WithNotStmt()
|
||||
sel = source.Block.NewValue1(pos, OpIData, x.typs.BytePtr, source)
|
||||
sel = b.NewValue1(pos, OpIData, x.typs.BytePtr, source)
|
||||
return x.storeArgOrLoad(pos, b, sel, mem, x.typs.BytePtr, storeOffset+x.ptrSize, loadRegOffset+RO_iface_data, storeRc)
|
||||
|
||||
case types.TSTRING:
|
||||
sel := source.Block.NewValue1(pos, OpStringPtr, x.typs.BytePtr, source)
|
||||
sel := b.NewValue1(pos, OpStringPtr, x.typs.BytePtr, source)
|
||||
mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.BytePtr, storeOffset, loadRegOffset, storeRc.next(x.typs.BytePtr))
|
||||
pos = pos.WithNotStmt()
|
||||
sel = source.Block.NewValue1(pos, OpStringLen, x.typs.Int, source)
|
||||
sel = b.NewValue1(pos, OpStringLen, x.typs.Int, source)
|
||||
return x.storeArgOrLoad(pos, b, sel, mem, x.typs.Int, storeOffset+x.ptrSize, loadRegOffset+RO_string_len, storeRc)
|
||||
|
||||
case types.TSLICE:
|
||||
et := types.NewPtr(t.Elem())
|
||||
sel := source.Block.NewValue1(pos, OpSlicePtr, et, source)
|
||||
sel := b.NewValue1(pos, OpSlicePtr, et, source)
|
||||
mem = x.storeArgOrLoad(pos, b, sel, mem, et, storeOffset, loadRegOffset, storeRc.next(et))
|
||||
pos = pos.WithNotStmt()
|
||||
sel = source.Block.NewValue1(pos, OpSliceLen, x.typs.Int, source)
|
||||
sel = b.NewValue1(pos, OpSliceLen, x.typs.Int, source)
|
||||
mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.Int, storeOffset+x.ptrSize, loadRegOffset+RO_slice_len, storeRc.next(x.typs.Int))
|
||||
sel = source.Block.NewValue1(pos, OpSliceCap, x.typs.Int, source)
|
||||
sel = b.NewValue1(pos, OpSliceCap, x.typs.Int, source)
|
||||
return x.storeArgOrLoad(pos, b, sel, mem, x.typs.Int, storeOffset+2*x.ptrSize, loadRegOffset+RO_slice_cap, storeRc)
|
||||
|
||||
case types.TCOMPLEX64:
|
||||
sel := source.Block.NewValue1(pos, OpComplexReal, x.typs.Float32, source)
|
||||
sel := b.NewValue1(pos, OpComplexReal, x.typs.Float32, source)
|
||||
mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.Float32, storeOffset, loadRegOffset, storeRc.next(x.typs.Float32))
|
||||
pos = pos.WithNotStmt()
|
||||
sel = source.Block.NewValue1(pos, OpComplexImag, x.typs.Float32, source)
|
||||
sel = b.NewValue1(pos, OpComplexImag, x.typs.Float32, source)
|
||||
return x.storeArgOrLoad(pos, b, sel, mem, x.typs.Float32, storeOffset+4, loadRegOffset+RO_complex_imag, storeRc)
|
||||
|
||||
case types.TCOMPLEX128:
|
||||
sel := source.Block.NewValue1(pos, OpComplexReal, x.typs.Float64, source)
|
||||
sel := b.NewValue1(pos, OpComplexReal, x.typs.Float64, source)
|
||||
mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.Float64, storeOffset, loadRegOffset, storeRc.next(x.typs.Float64))
|
||||
pos = pos.WithNotStmt()
|
||||
sel = source.Block.NewValue1(pos, OpComplexImag, x.typs.Float64, source)
|
||||
sel = b.NewValue1(pos, OpComplexImag, x.typs.Float64, source)
|
||||
return x.storeArgOrLoad(pos, b, sel, mem, x.typs.Float64, storeOffset+8, loadRegOffset+RO_complex_imag, storeRc)
|
||||
}
|
||||
|
||||
@@ -1113,6 +1113,9 @@ func (x *expandState) rewriteArgs(v *Value, firstArg int) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if x.debug > 1 {
|
||||
x.Printf("...storeArg %s, %v, %d\n", a.LongString(), aType, aOffset)
|
||||
}
|
||||
// "Dereference" of addressed (probably not-SSA-eligible) value becomes Move
|
||||
// TODO(register args) this will be more complicated with registers in the picture.
|
||||
mem = x.rewriteDereference(v.Block, sp, a, mem, aOffset, aux.SizeOfArg(auxI), aType, v.Pos)
|
||||
|
||||
@@ -41,6 +41,7 @@ func memcombineLoads(f *Func) {
|
||||
}
|
||||
}
|
||||
for _, b := range f.Blocks {
|
||||
order = order[:0]
|
||||
for _, v := range b.Values {
|
||||
if v.Op != OpOr16 && v.Op != OpOr32 && v.Op != OpOr64 {
|
||||
continue
|
||||
|
||||
@@ -1281,10 +1281,6 @@ func zeroUpper32Bits(x *Value, depth int) bool {
|
||||
OpAMD64SHRL, OpAMD64SHRLconst, OpAMD64SARL, OpAMD64SARLconst,
|
||||
OpAMD64SHLL, OpAMD64SHLLconst:
|
||||
return true
|
||||
case OpARM64REV16W, OpARM64REVW, OpARM64RBITW, OpARM64CLZW, OpARM64EXTRWconst,
|
||||
OpARM64MULW, OpARM64MNEGW, OpARM64UDIVW, OpARM64DIVW, OpARM64UMODW,
|
||||
OpARM64MADDW, OpARM64MSUBW, OpARM64RORW, OpARM64RORWconst:
|
||||
return true
|
||||
case OpArg:
|
||||
return x.Type.Size() == 4
|
||||
case OpPhi, OpSelect0, OpSelect1:
|
||||
|
||||
@@ -154,6 +154,8 @@ func rewriteValueARM64(v *Value) bool {
|
||||
return rewriteValueARM64_OpARM64GreaterEqual(v)
|
||||
case OpARM64GreaterEqualF:
|
||||
return rewriteValueARM64_OpARM64GreaterEqualF(v)
|
||||
case OpARM64GreaterEqualNoov:
|
||||
return rewriteValueARM64_OpARM64GreaterEqualNoov(v)
|
||||
case OpARM64GreaterEqualU:
|
||||
return rewriteValueARM64_OpARM64GreaterEqualU(v)
|
||||
case OpARM64GreaterThan:
|
||||
@@ -174,6 +176,8 @@ func rewriteValueARM64(v *Value) bool {
|
||||
return rewriteValueARM64_OpARM64LessThan(v)
|
||||
case OpARM64LessThanF:
|
||||
return rewriteValueARM64_OpARM64LessThanF(v)
|
||||
case OpARM64LessThanNoov:
|
||||
return rewriteValueARM64_OpARM64LessThanNoov(v)
|
||||
case OpARM64LessThanU:
|
||||
return rewriteValueARM64_OpARM64LessThanU(v)
|
||||
case OpARM64MADD:
|
||||
@@ -5953,6 +5957,27 @@ func rewriteValueARM64_OpARM64GreaterEqualF(v *Value) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueARM64_OpARM64GreaterEqualNoov(v *Value) bool {
|
||||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
typ := &b.Func.Config.Types
|
||||
// match: (GreaterEqualNoov (InvertFlags x))
|
||||
// result: (OR (LessThanNoov <typ.Bool> x) (Equal <typ.Bool> x))
|
||||
for {
|
||||
if v_0.Op != OpARM64InvertFlags {
|
||||
break
|
||||
}
|
||||
x := v_0.Args[0]
|
||||
v.reset(OpARM64OR)
|
||||
v0 := b.NewValue0(v.Pos, OpARM64LessThanNoov, typ.Bool)
|
||||
v0.AddArg(x)
|
||||
v1 := b.NewValue0(v.Pos, OpARM64Equal, typ.Bool)
|
||||
v1.AddArg(x)
|
||||
v.AddArg2(v0, v1)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueARM64_OpARM64GreaterEqualU(v *Value) bool {
|
||||
v_0 := v.Args[0]
|
||||
// match: (GreaterEqualU (FlagConstant [fc]))
|
||||
@@ -6667,6 +6692,27 @@ func rewriteValueARM64_OpARM64LessThanF(v *Value) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueARM64_OpARM64LessThanNoov(v *Value) bool {
|
||||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
typ := &b.Func.Config.Types
|
||||
// match: (LessThanNoov (InvertFlags x))
|
||||
// result: (BIC (GreaterEqualNoov <typ.Bool> x) (Equal <typ.Bool> x))
|
||||
for {
|
||||
if v_0.Op != OpARM64InvertFlags {
|
||||
break
|
||||
}
|
||||
x := v_0.Args[0]
|
||||
v.reset(OpARM64BIC)
|
||||
v0 := b.NewValue0(v.Pos, OpARM64GreaterEqualNoov, typ.Bool)
|
||||
v0.AddArg(x)
|
||||
v1 := b.NewValue0(v.Pos, OpARM64Equal, typ.Bool)
|
||||
v1.AddArg(x)
|
||||
v.AddArg2(v0, v1)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueARM64_OpARM64LessThanU(v *Value) bool {
|
||||
v_0 := v.Args[0]
|
||||
// match: (LessThanU (FlagConstant [fc]))
|
||||
@@ -8668,25 +8714,6 @@ func rewriteValueARM64_OpARM64MOVBreg(v *Value) bool {
|
||||
v.AuxInt = int64ToAuxInt(int64(int8(c)))
|
||||
return true
|
||||
}
|
||||
// match: (MOVBreg <t> (ANDconst x [c]))
|
||||
// cond: uint64(c) & uint64(0xffffffffffffff80) == 0
|
||||
// result: (ANDconst <t> x [c])
|
||||
for {
|
||||
t := v.Type
|
||||
if v_0.Op != OpARM64ANDconst {
|
||||
break
|
||||
}
|
||||
c := auxIntToInt64(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if !(uint64(c)&uint64(0xffffffffffffff80) == 0) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64ANDconst)
|
||||
v.Type = t
|
||||
v.AuxInt = int64ToAuxInt(c)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (MOVBreg (SLLconst [lc] x))
|
||||
// cond: lc < 8
|
||||
// result: (SBFIZ [armBFAuxInt(lc, 8-lc)] x)
|
||||
@@ -10765,25 +10792,6 @@ func rewriteValueARM64_OpARM64MOVHreg(v *Value) bool {
|
||||
v.AuxInt = int64ToAuxInt(int64(int16(c)))
|
||||
return true
|
||||
}
|
||||
// match: (MOVHreg <t> (ANDconst x [c]))
|
||||
// cond: uint64(c) & uint64(0xffffffffffff8000) == 0
|
||||
// result: (ANDconst <t> x [c])
|
||||
for {
|
||||
t := v.Type
|
||||
if v_0.Op != OpARM64ANDconst {
|
||||
break
|
||||
}
|
||||
c := auxIntToInt64(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if !(uint64(c)&uint64(0xffffffffffff8000) == 0) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64ANDconst)
|
||||
v.Type = t
|
||||
v.AuxInt = int64ToAuxInt(c)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (MOVHreg (SLLconst [lc] x))
|
||||
// cond: lc < 16
|
||||
// result: (SBFIZ [armBFAuxInt(lc, 16-lc)] x)
|
||||
@@ -11943,17 +11951,6 @@ func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool {
|
||||
v.AuxInt = int64ToAuxInt(int64(uint32(c)))
|
||||
return true
|
||||
}
|
||||
// match: (MOVWUreg x)
|
||||
// cond: zeroUpper32Bits(x, 3)
|
||||
// result: x
|
||||
for {
|
||||
x := v_0
|
||||
if !(zeroUpper32Bits(x, 3)) {
|
||||
break
|
||||
}
|
||||
v.copyOf(x)
|
||||
return true
|
||||
}
|
||||
// match: (MOVWUreg (SLLconst [lc] x))
|
||||
// cond: lc >= 32
|
||||
// result: (MOVDconst [0])
|
||||
@@ -12458,25 +12455,6 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool {
|
||||
v.AuxInt = int64ToAuxInt(int64(int32(c)))
|
||||
return true
|
||||
}
|
||||
// match: (MOVWreg <t> (ANDconst x [c]))
|
||||
// cond: uint64(c) & uint64(0xffffffff80000000) == 0
|
||||
// result: (ANDconst <t> x [c])
|
||||
for {
|
||||
t := v.Type
|
||||
if v_0.Op != OpARM64ANDconst {
|
||||
break
|
||||
}
|
||||
c := auxIntToInt64(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if !(uint64(c)&uint64(0xffffffff80000000) == 0) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64ANDconst)
|
||||
v.Type = t
|
||||
v.AuxInt = int64ToAuxInt(c)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (MOVWreg (SLLconst [lc] x))
|
||||
// cond: lc < 32
|
||||
// result: (SBFIZ [armBFAuxInt(lc, 32-lc)] x)
|
||||
|
||||
@@ -14410,17 +14410,19 @@ func rewriteValuePPC64_OpSelect0(v *Value) bool {
|
||||
v.copyOf(y)
|
||||
return true
|
||||
}
|
||||
// match: (Select0 (ANDCCconst [0xFF] y:(MOVBreg _)))
|
||||
// result: y
|
||||
// match: (Select0 (ANDCCconst [0xFF] (MOVBreg x)))
|
||||
// result: (MOVBZreg x)
|
||||
for {
|
||||
if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 0xFF {
|
||||
break
|
||||
}
|
||||
y := v_0.Args[0]
|
||||
if y.Op != OpPPC64MOVBreg {
|
||||
v_0_0 := v_0.Args[0]
|
||||
if v_0_0.Op != OpPPC64MOVBreg {
|
||||
break
|
||||
}
|
||||
v.copyOf(y)
|
||||
x := v_0_0.Args[0]
|
||||
v.reset(OpPPC64MOVBZreg)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (Select0 (ANDCCconst [c] y:(MOVHZreg _)))
|
||||
@@ -14438,36 +14440,19 @@ func rewriteValuePPC64_OpSelect0(v *Value) bool {
|
||||
v.copyOf(y)
|
||||
return true
|
||||
}
|
||||
// match: (Select0 (ANDCCconst [0xFFFF] y:(MOVHreg _)))
|
||||
// result: y
|
||||
// match: (Select0 (ANDCCconst [0xFFFF] (MOVHreg x)))
|
||||
// result: (MOVHZreg x)
|
||||
for {
|
||||
if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 0xFFFF {
|
||||
break
|
||||
}
|
||||
y := v_0.Args[0]
|
||||
if y.Op != OpPPC64MOVHreg {
|
||||
break
|
||||
}
|
||||
v.copyOf(y)
|
||||
return true
|
||||
}
|
||||
// match: (Select0 (ANDCCconst [c] (MOVBreg x)))
|
||||
// result: (Select0 (ANDCCconst [c&0xFF] x))
|
||||
for {
|
||||
if v_0.Op != OpPPC64ANDCCconst {
|
||||
break
|
||||
}
|
||||
c := auxIntToInt64(v_0.AuxInt)
|
||||
v_0_0 := v_0.Args[0]
|
||||
if v_0_0.Op != OpPPC64MOVBreg {
|
||||
if v_0_0.Op != OpPPC64MOVHreg {
|
||||
break
|
||||
}
|
||||
x := v_0_0.Args[0]
|
||||
v.reset(OpSelect0)
|
||||
v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
|
||||
v0.AuxInt = int64ToAuxInt(c & 0xFF)
|
||||
v0.AddArg(x)
|
||||
v.AddArg(v0)
|
||||
v.reset(OpPPC64MOVHZreg)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (Select0 (ANDCCconst [c] (MOVBZreg x)))
|
||||
@@ -14489,25 +14474,6 @@ func rewriteValuePPC64_OpSelect0(v *Value) bool {
|
||||
v.AddArg(v0)
|
||||
return true
|
||||
}
|
||||
// match: (Select0 (ANDCCconst [c] (MOVHreg x)))
|
||||
// result: (Select0 (ANDCCconst [c&0xFFFF] x))
|
||||
for {
|
||||
if v_0.Op != OpPPC64ANDCCconst {
|
||||
break
|
||||
}
|
||||
c := auxIntToInt64(v_0.AuxInt)
|
||||
v_0_0 := v_0.Args[0]
|
||||
if v_0_0.Op != OpPPC64MOVHreg {
|
||||
break
|
||||
}
|
||||
x := v_0_0.Args[0]
|
||||
v.reset(OpSelect0)
|
||||
v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
|
||||
v0.AuxInt = int64ToAuxInt(c & 0xFFFF)
|
||||
v0.AddArg(x)
|
||||
v.AddArg(v0)
|
||||
return true
|
||||
}
|
||||
// match: (Select0 (ANDCCconst [c] (MOVHZreg x)))
|
||||
// result: (Select0 (ANDCCconst [c&0xFFFF] x))
|
||||
for {
|
||||
@@ -14527,25 +14493,6 @@ func rewriteValuePPC64_OpSelect0(v *Value) bool {
|
||||
v.AddArg(v0)
|
||||
return true
|
||||
}
|
||||
// match: (Select0 (ANDCCconst [c] (MOVWreg x)))
|
||||
// result: (Select0 (ANDCCconst [c&0xFFFFFFFF] x))
|
||||
for {
|
||||
if v_0.Op != OpPPC64ANDCCconst {
|
||||
break
|
||||
}
|
||||
c := auxIntToInt64(v_0.AuxInt)
|
||||
v_0_0 := v_0.Args[0]
|
||||
if v_0_0.Op != OpPPC64MOVWreg {
|
||||
break
|
||||
}
|
||||
x := v_0_0.Args[0]
|
||||
v.reset(OpSelect0)
|
||||
v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
|
||||
v0.AuxInt = int64ToAuxInt(c & 0xFFFFFFFF)
|
||||
v0.AddArg(x)
|
||||
v.AddArg(v0)
|
||||
return true
|
||||
}
|
||||
// match: (Select0 (ANDCCconst [c] (MOVWZreg x)))
|
||||
// result: (Select0 (ANDCCconst [c&0xFFFFFFFF] x))
|
||||
for {
|
||||
|
||||
@@ -53,7 +53,10 @@ func mightContainHeapPointer(ptr *Value, size int64, mem *Value, zeroes map[ID]Z
|
||||
}
|
||||
|
||||
ptrSize := ptr.Block.Func.Config.PtrSize
|
||||
if off%ptrSize != 0 || size%ptrSize != 0 {
|
||||
if off%ptrSize != 0 {
|
||||
return true // see issue 61187
|
||||
}
|
||||
if size%ptrSize != 0 {
|
||||
ptr.Fatalf("unaligned pointer write")
|
||||
}
|
||||
if off < 0 || off+size > 64*ptrSize {
|
||||
@@ -130,7 +133,7 @@ func needWBdst(ptr, mem *Value, zeroes map[ID]ZeroRegion) bool {
|
||||
}
|
||||
ptrSize := ptr.Block.Func.Config.PtrSize
|
||||
if off%ptrSize != 0 {
|
||||
ptr.Fatalf("unaligned pointer write")
|
||||
return true // see issue 61187
|
||||
}
|
||||
if off < 0 || off >= 64*ptrSize {
|
||||
// write goes off end of tracked offsets
|
||||
|
||||
@@ -7083,8 +7083,21 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
|
||||
// for an empty block this will be used for its control
|
||||
// instruction. We won't use the actual liveness map on a
|
||||
// control instruction. Just mark it something that is
|
||||
// preemptible, unless this function is "all unsafe".
|
||||
s.pp.NextLive = objw.LivenessIndex{StackMapIndex: -1, IsUnsafePoint: liveness.IsUnsafe(f)}
|
||||
// preemptible, unless this function is "all unsafe", or
|
||||
// the empty block is in a write barrier.
|
||||
unsafe := liveness.IsUnsafe(f)
|
||||
if b.Kind == ssa.BlockPlain {
|
||||
// Empty blocks that are part of write barriers need
|
||||
// to have their control instructions marked unsafe.
|
||||
c := b.Succs[0].Block()
|
||||
for _, v := range c.Values {
|
||||
if v.Op == ssa.OpWBend {
|
||||
unsafe = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
s.pp.NextLive = objw.LivenessIndex{StackMapIndex: -1, IsUnsafePoint: unsafe}
|
||||
|
||||
if idx, ok := argLiveBlockMap[b.ID]; ok && idx != argLiveIdx {
|
||||
argLiveIdx = idx
|
||||
|
||||
@@ -110,11 +110,11 @@ type Config struct {
|
||||
// type checker will initialize this field with a newly created context.
|
||||
Context *Context
|
||||
|
||||
// GoVersion describes the accepted Go language version. The string
|
||||
// must follow the format "go%d.%d" (e.g. "go1.12") or ist must be
|
||||
// empty; an empty string disables Go language version checks.
|
||||
// If the format is invalid, invoking the type checker will cause a
|
||||
// panic.
|
||||
// GoVersion describes the accepted Go language version. The string must
|
||||
// start with a prefix of the form "go%d.%d" (e.g. "go1.20", "go1.21rc1", or
|
||||
// "go1.21.0") or it must be empty; an empty string disables Go language
|
||||
// version checks. If the format is invalid, invoking the type checker will
|
||||
// result in an error.
|
||||
GoVersion string
|
||||
|
||||
// If IgnoreFuncBodies is set, function bodies are not
|
||||
|
||||
@@ -2070,6 +2070,29 @@ func TestIdenticalUnions(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssue61737(t *testing.T) {
|
||||
// This test verifies that it is possible to construct invalid interfaces
|
||||
// containing duplicate methods using the go/types API.
|
||||
//
|
||||
// It must be possible for importers to construct such invalid interfaces.
|
||||
// Previously, this panicked.
|
||||
|
||||
sig1 := NewSignatureType(nil, nil, nil, NewTuple(NewParam(nopos, nil, "", Typ[Int])), nil, false)
|
||||
sig2 := NewSignatureType(nil, nil, nil, NewTuple(NewParam(nopos, nil, "", Typ[String])), nil, false)
|
||||
|
||||
methods := []*Func{
|
||||
NewFunc(nopos, nil, "M", sig1),
|
||||
NewFunc(nopos, nil, "M", sig2),
|
||||
}
|
||||
|
||||
embeddedMethods := []*Func{
|
||||
NewFunc(nopos, nil, "M", sig2),
|
||||
}
|
||||
embedded := NewInterfaceType(embeddedMethods, nil)
|
||||
iface := NewInterfaceType(methods, []Type{embedded})
|
||||
iface.NumMethods() // unlike go/types, there is no Complete() method, so we complete implicitly
|
||||
}
|
||||
|
||||
func TestIssue15305(t *testing.T) {
|
||||
const src = "package p; func f() int16; var _ = f(undef)"
|
||||
f := mustParse(src)
|
||||
|
||||
@@ -576,6 +576,11 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
|
||||
// If nargs == 1, make sure x.mode is either a value or a constant.
|
||||
if x.mode != constant_ {
|
||||
x.mode = value
|
||||
// A value must not be untyped.
|
||||
check.assignment(x, &emptyInterface, "argument to "+bin.name)
|
||||
if x.mode == invalid {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Use the final type computed above for all arguments.
|
||||
|
||||
@@ -610,20 +610,17 @@ func (check *Checker) arguments(call *syntax.CallExpr, sig *Signature, targs []T
|
||||
return // error already reported
|
||||
}
|
||||
|
||||
// compute result signature: instantiate if needed
|
||||
rsig = sig
|
||||
// update result signature: instantiate if needed
|
||||
if n > 0 {
|
||||
rsig = check.instantiateSignature(call.Pos(), call.Fun, sig, targs[:n], xlist)
|
||||
}
|
||||
|
||||
// Optimization: Only if the callee's parameter list was adjusted do we need to
|
||||
// compute it from the adjusted list; otherwise we can simply use the result
|
||||
// signature's parameter list. We only need the n type parameters and arguments
|
||||
// of the callee.
|
||||
if n > 0 && adjusted {
|
||||
sigParams = check.subst(call.Pos(), sigParams, makeSubstMap(tparams[:n], targs[:n]), nil, check.context()).(*Tuple)
|
||||
} else {
|
||||
sigParams = rsig.params
|
||||
// If the callee's parameter list was adjusted we need to update (instantiate)
|
||||
// it separately. Otherwise we can simply use the result signature's parameter
|
||||
// list.
|
||||
if adjusted {
|
||||
sigParams = check.subst(call.Pos(), sigParams, makeSubstMap(tparams[:n], targs[:n]), nil, check.context()).(*Tuple)
|
||||
} else {
|
||||
sigParams = rsig.params
|
||||
}
|
||||
}
|
||||
|
||||
// compute argument signatures: instantiate if needed
|
||||
|
||||
@@ -73,7 +73,6 @@ func representableConst(x constant.Value, check *Checker, typ *Basic, rounded *c
|
||||
|
||||
sizeof := func(T Type) int64 {
|
||||
s := conf.sizeof(T)
|
||||
assert(s == 4 || s == 8)
|
||||
return s
|
||||
}
|
||||
|
||||
|
||||
@@ -96,7 +96,7 @@ func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type,
|
||||
// Unify parameter and argument types for generic parameters with typed arguments
|
||||
// and collect the indices of generic parameters with untyped arguments.
|
||||
// Terminology: generic parameter = function parameter with a type-parameterized type
|
||||
u := newUnifier(tparams, targs)
|
||||
u := newUnifier(tparams, targs, check.allowVersion(check.pkg, pos, go1_21))
|
||||
|
||||
errorf := func(kind string, tpar, targ Type, arg *operand) {
|
||||
// provide a better error message if we can
|
||||
|
||||
@@ -900,3 +900,23 @@ func _cgoCheckResult(interface{})
|
||||
*boolFieldAddr(cfg, "go115UsesCgo") = true
|
||||
})
|
||||
}
|
||||
|
||||
func TestIssue61931(t *testing.T) {
|
||||
const src = `
|
||||
package p
|
||||
|
||||
func A(func(any), ...any) {}
|
||||
func B[T any](T) {}
|
||||
|
||||
func _() {
|
||||
A(B, nil // syntax error: missing ',' before newline in argument list
|
||||
}
|
||||
`
|
||||
f, err := syntax.Parse(syntax.NewFileBase(pkgName(src)), strings.NewReader(src), func(error) {}, nil, 0)
|
||||
if err == nil {
|
||||
t.Fatal("expected syntax error")
|
||||
}
|
||||
|
||||
var conf Config
|
||||
conf.Check(f.PkgName.Value, []*syntax.File{f}, nil) // must not panic
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ package types2
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/syntax"
|
||||
"fmt"
|
||||
. "internal/types/errors"
|
||||
"sort"
|
||||
"strings"
|
||||
@@ -212,7 +211,6 @@ func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_
|
||||
// we can get rid of the mpos map below and simply use the cloned method's
|
||||
// position.
|
||||
|
||||
var todo []*Func
|
||||
var seen objset
|
||||
var allMethods []*Func
|
||||
mpos := make(map[*Func]syntax.Pos) // method specification or method embedding position, for good error messages
|
||||
@@ -222,36 +220,30 @@ func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_
|
||||
allMethods = append(allMethods, m)
|
||||
mpos[m] = pos
|
||||
case explicit:
|
||||
if check == nil {
|
||||
panic(fmt.Sprintf("%s: duplicate method %s", m.pos, m.name))
|
||||
if check != nil {
|
||||
var err error_
|
||||
err.code = DuplicateDecl
|
||||
err.errorf(pos, "duplicate method %s", m.name)
|
||||
err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name)
|
||||
check.report(&err)
|
||||
}
|
||||
// check != nil
|
||||
var err error_
|
||||
err.code = DuplicateDecl
|
||||
err.errorf(pos, "duplicate method %s", m.name)
|
||||
err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name)
|
||||
check.report(&err)
|
||||
default:
|
||||
// We have a duplicate method name in an embedded (not explicitly declared) method.
|
||||
// Check method signatures after all types are computed (go.dev/issue/33656).
|
||||
// If we're pre-go1.14 (overlapping embeddings are not permitted), report that
|
||||
// error here as well (even though we could do it eagerly) because it's the same
|
||||
// error message.
|
||||
if check == nil {
|
||||
// check method signatures after all locally embedded interfaces are computed
|
||||
todo = append(todo, m, other.(*Func))
|
||||
break
|
||||
if check != nil {
|
||||
check.later(func() {
|
||||
if !check.allowVersion(m.pkg, pos, go1_14) || !Identical(m.typ, other.Type()) {
|
||||
var err error_
|
||||
err.code = DuplicateDecl
|
||||
err.errorf(pos, "duplicate method %s", m.name)
|
||||
err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name)
|
||||
check.report(&err)
|
||||
}
|
||||
}).describef(pos, "duplicate method check for %s", m.name)
|
||||
}
|
||||
// check != nil
|
||||
check.later(func() {
|
||||
if !check.allowVersion(m.pkg, pos, go1_14) || !Identical(m.typ, other.Type()) {
|
||||
var err error_
|
||||
err.code = DuplicateDecl
|
||||
err.errorf(pos, "duplicate method %s", m.name)
|
||||
err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name)
|
||||
check.report(&err)
|
||||
}
|
||||
}).describef(pos, "duplicate method check for %s", m.name)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -314,15 +306,6 @@ func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_
|
||||
}
|
||||
ityp.embedPos = nil // not needed anymore (errors have been reported)
|
||||
|
||||
// process todo's (this only happens if check == nil)
|
||||
for i := 0; i < len(todo); i += 2 {
|
||||
m := todo[i]
|
||||
other := todo[i+1]
|
||||
if !Identical(m.typ, other.typ) {
|
||||
panic(fmt.Sprintf("%s: duplicate method %s", m.pos, m.name))
|
||||
}
|
||||
}
|
||||
|
||||
ityp.tset.comparable = allComparable
|
||||
if len(allMethods) != 0 {
|
||||
sortMethods(allMethods)
|
||||
|
||||
@@ -53,11 +53,6 @@ const (
|
||||
// the core types, if any, of non-local (unbound) type parameters.
|
||||
enableCoreTypeUnification = true
|
||||
|
||||
// If enableInterfaceInference is set, type inference uses
|
||||
// shared methods for improved type inference involving
|
||||
// interfaces.
|
||||
enableInterfaceInference = true
|
||||
|
||||
// If traceInference is set, unification will print a trace of its operation.
|
||||
// Interpretation of trace:
|
||||
// x ≡ y attempt to unify types x and y
|
||||
@@ -81,15 +76,16 @@ type unifier struct {
|
||||
// that inferring the type for a given type parameter P will
|
||||
// automatically infer the same type for all other parameters
|
||||
// unified (joined) with P.
|
||||
handles map[*TypeParam]*Type
|
||||
depth int // recursion depth during unification
|
||||
handles map[*TypeParam]*Type
|
||||
depth int // recursion depth during unification
|
||||
enableInterfaceInference bool // use shared methods for better inference
|
||||
}
|
||||
|
||||
// newUnifier returns a new unifier initialized with the given type parameter
|
||||
// and corresponding type argument lists. The type argument list may be shorter
|
||||
// than the type parameter list, and it may contain nil types. Matching type
|
||||
// parameters and arguments must have the same index.
|
||||
func newUnifier(tparams []*TypeParam, targs []Type) *unifier {
|
||||
func newUnifier(tparams []*TypeParam, targs []Type, enableInterfaceInference bool) *unifier {
|
||||
assert(len(tparams) >= len(targs))
|
||||
handles := make(map[*TypeParam]*Type, len(tparams))
|
||||
// Allocate all handles up-front: in a correct program, all type parameters
|
||||
@@ -103,7 +99,7 @@ func newUnifier(tparams []*TypeParam, targs []Type) *unifier {
|
||||
}
|
||||
handles[x] = &t
|
||||
}
|
||||
return &unifier{handles, 0}
|
||||
return &unifier{handles, 0, enableInterfaceInference}
|
||||
}
|
||||
|
||||
// unifyMode controls the behavior of the unifier.
|
||||
@@ -339,7 +335,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
|
||||
// we will fail at function instantiation or argument assignment time.
|
||||
//
|
||||
// If we have at least one defined type, there is one in y.
|
||||
if ny, _ := y.(*Named); mode&exact == 0 && ny != nil && isTypeLit(x) && !(enableInterfaceInference && IsInterface(x)) {
|
||||
if ny, _ := y.(*Named); mode&exact == 0 && ny != nil && isTypeLit(x) && !(u.enableInterfaceInference && IsInterface(x)) {
|
||||
if traceInference {
|
||||
u.tracef("%s ≡ under %s", x, ny)
|
||||
}
|
||||
@@ -405,18 +401,40 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
|
||||
// Therefore, we must fail unification (go.dev/issue/60933).
|
||||
return false
|
||||
}
|
||||
// If y is a defined type, make sure we record that type
|
||||
// for type parameter x, which may have until now only
|
||||
// recorded an underlying type (go.dev/issue/43056).
|
||||
// Either both types are interfaces, or neither type is.
|
||||
// If both are interfaces, they have the same methods.
|
||||
// If we have inexact unification and one of x or y is a defined type, select the
|
||||
// defined type. This ensures that in a series of types, all matching against the
|
||||
// same type parameter, we infer a defined type if there is one, independent of
|
||||
// order. Type inference or assignment may fail, which is ok.
|
||||
// Selecting a defined type, if any, ensures that we don't lose the type name;
|
||||
// and since we have inexact unification, a value of equally named or matching
|
||||
// undefined type remains assignable (go.dev/issue/43056).
|
||||
//
|
||||
// Note: Changing the recorded type for a type parameter to
|
||||
// a defined type is only ok when unification is inexact.
|
||||
// But in exact unification, if we have a match, x and y must
|
||||
// be identical, so changing the recorded type for x is a no-op.
|
||||
if yn {
|
||||
u.set(px, y)
|
||||
// Similarly, if we have inexact unification and there are no defined types but
|
||||
// channel types, select a directed channel, if any. This ensures that in a series
|
||||
// of unnamed types, all matching against the same type parameter, we infer the
|
||||
// directed channel if there is one, independent of order.
|
||||
// Selecting a directional channel, if any, ensures that a value of another
|
||||
// inexactly unifying channel type remains assignable (go.dev/issue/62157).
|
||||
//
|
||||
// If we have multiple defined channel types, they are either identical or we
|
||||
// have assignment conflicts, so we can ignore directionality in this case.
|
||||
//
|
||||
// If we have defined and literal channel types, a defined type wins to avoid
|
||||
// order dependencies.
|
||||
if mode&exact == 0 {
|
||||
switch {
|
||||
case xn:
|
||||
// x is a defined type: nothing to do.
|
||||
case yn:
|
||||
// x is not a defined type and y is a defined type: select y.
|
||||
u.set(px, y)
|
||||
default:
|
||||
// Neither x nor y are defined types.
|
||||
if yc, _ := under(y).(*Chan); yc != nil && yc.dir != SendRecv {
|
||||
// y is a directed channel type: select y.
|
||||
u.set(px, y)
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
@@ -437,12 +455,12 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
|
||||
emode |= exact
|
||||
}
|
||||
|
||||
// If EnableInterfaceInference is set and we don't require exact unification,
|
||||
// If u.EnableInterfaceInference is set and we don't require exact unification,
|
||||
// if both types are interfaces, one interface must have a subset of the
|
||||
// methods of the other and corresponding method signatures must unify.
|
||||
// If only one type is an interface, all its methods must be present in the
|
||||
// other type and corresponding method signatures must unify.
|
||||
if enableInterfaceInference && mode&exact == 0 {
|
||||
if u.enableInterfaceInference && mode&exact == 0 {
|
||||
// One or both interfaces may be defined types.
|
||||
// Look under the name, but not under type parameters (go.dev/issue/60564).
|
||||
xi := asInterface(x)
|
||||
@@ -505,7 +523,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
|
||||
}
|
||||
// All xmethods must exist in ymethods and corresponding signatures must unify.
|
||||
for _, xm := range xmethods {
|
||||
if ym := ymap[xm.Id()]; ym == nil || !u.nify(xm.typ, ym.typ, emode, p) {
|
||||
if ym := ymap[xm.Id()]; ym == nil || !u.nify(xm.typ, ym.typ, exact, p) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -526,7 +544,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
|
||||
xmethods := xi.typeSet().methods
|
||||
for _, xm := range xmethods {
|
||||
obj, _, _ := LookupFieldOrMethod(y, false, xm.pkg, xm.name)
|
||||
if ym, _ := obj.(*Func); ym == nil || !u.nify(xm.typ, ym.typ, emode, p) {
|
||||
if ym, _ := obj.(*Func); ym == nil || !u.nify(xm.typ, ym.typ, exact, p) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -632,7 +650,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
|
||||
}
|
||||
|
||||
case *Interface:
|
||||
assert(!enableInterfaceInference || mode&exact != 0) // handled before this switch
|
||||
assert(!u.enableInterfaceInference || mode&exact != 0) // handled before this switch
|
||||
|
||||
// Two interface types unify if they have the same set of methods with
|
||||
// the same names, and corresponding function types unify.
|
||||
@@ -685,7 +703,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
|
||||
}
|
||||
for i, f := range a {
|
||||
g := b[i]
|
||||
if f.Id() != g.Id() || !u.nify(f.typ, g.typ, emode, q) {
|
||||
if f.Id() != g.Id() || !u.nify(f.typ, g.typ, exact, q) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -135,7 +135,11 @@ func walkClear(n *ir.UnaryExpr) ir.Node {
|
||||
typ := n.X.Type()
|
||||
switch {
|
||||
case typ.IsSlice():
|
||||
return arrayClear(n.X.Pos(), n.X, nil)
|
||||
if n := arrayClear(n.X.Pos(), n.X, nil); n != nil {
|
||||
return n
|
||||
}
|
||||
// If n == nil, we are clearing an array which takes zero memory, do nothing.
|
||||
return ir.NewBlockStmt(n.Pos(), nil)
|
||||
case typ.IsMap():
|
||||
return mapClear(n.X, reflectdata.TypePtrAt(n.X.Pos(), n.X.Type()))
|
||||
}
|
||||
@@ -251,7 +255,10 @@ func walkLenCap(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
|
||||
return mkcall("countrunes", n.Type(), init, typecheck.Conv(n.X.(*ir.ConvExpr).X, types.Types[types.TSTRING]))
|
||||
}
|
||||
if isByteCount(n) {
|
||||
_, len := backingArrayPtrLen(cheapExpr(n.X.(*ir.ConvExpr).X, init))
|
||||
conv := n.X.(*ir.ConvExpr)
|
||||
walkStmtList(conv.Init())
|
||||
init.Append(ir.TakeInit(conv)...)
|
||||
_, len := backingArrayPtrLen(cheapExpr(conv.X, init))
|
||||
return len
|
||||
}
|
||||
|
||||
|
||||
@@ -278,8 +278,10 @@ func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) {
|
||||
} else {
|
||||
ptr.SetType(n.Type().Elem().PtrTo())
|
||||
}
|
||||
ptr.SetTypecheck(1)
|
||||
length = ir.NewUnaryExpr(base.Pos, ir.OLEN, n)
|
||||
length.SetType(types.Types[types.TINT])
|
||||
length.SetTypecheck(1)
|
||||
return ptr, length
|
||||
}
|
||||
|
||||
|
||||
@@ -3,82 +3,78 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
//
|
||||
// Covdata is a program for manipulating and generating reports
|
||||
// from 2nd-generation coverage testing output files, those produced
|
||||
// from running applications or integration tests. E.g.
|
||||
//
|
||||
// $ mkdir ./profiledir
|
||||
// $ go build -cover -o myapp.exe .
|
||||
// $ GOCOVERDIR=./profiledir ./myapp.exe <arguments>
|
||||
// $ ls ./profiledir
|
||||
// covcounters.cce1b350af34b6d0fb59cc1725f0ee27.821598.1663006712821344241
|
||||
// covmeta.cce1b350af34b6d0fb59cc1725f0ee27
|
||||
// $
|
||||
//
|
||||
// Run covdata via "go tool covdata <mode>", where 'mode' is a subcommand
|
||||
// selecting a specific reporting, merging, or data manipulation operation.
|
||||
// Descriptions on the various modes (run "go tool cover <mode> -help" for
|
||||
// specifics on usage of a given mode:
|
||||
//
|
||||
// 1. Report percent of statements covered in each profiled package
|
||||
//
|
||||
// $ go tool covdata percent -i=profiledir
|
||||
// cov-example/p coverage: 41.1% of statements
|
||||
// main coverage: 87.5% of statements
|
||||
// $
|
||||
//
|
||||
//
|
||||
// 2. Report import paths of packages profiled
|
||||
//
|
||||
// $ go tool covdata pkglist -i=profiledir
|
||||
// cov-example/p
|
||||
// main
|
||||
// $
|
||||
//
|
||||
// 3. Report percent statements covered by function:
|
||||
//
|
||||
// $ go tool covdata func -i=profiledir
|
||||
// cov-example/p/p.go:12: emptyFn 0.0%
|
||||
// cov-example/p/p.go:32: Small 100.0%
|
||||
// cov-example/p/p.go:47: Medium 90.9%
|
||||
// ...
|
||||
// $
|
||||
//
|
||||
// 4. Convert coverage data to legacy textual format:
|
||||
//
|
||||
// $ go tool covdata textfmt -i=profiledir -o=cov.txt
|
||||
// $ head cov.txt
|
||||
// mode: set
|
||||
// cov-example/p/p.go:12.22,13.2 0 0
|
||||
// cov-example/p/p.go:15.31,16.2 1 0
|
||||
// cov-example/p/p.go:16.3,18.3 0 0
|
||||
// cov-example/p/p.go:19.3,21.3 0 0
|
||||
// ...
|
||||
// $ go tool cover -html=cov.txt
|
||||
// $
|
||||
//
|
||||
// 5. Merge profiles together:
|
||||
//
|
||||
// $ go tool covdata merge -i=indir1,indir2 -o=outdir -modpaths=github.com/go-delve/delve
|
||||
// $
|
||||
//
|
||||
// 6. Subtract one profile from another
|
||||
//
|
||||
// $ go tool covdata subtract -i=indir1,indir2 -o=outdir
|
||||
// $
|
||||
//
|
||||
// 7. Intersect profiles
|
||||
//
|
||||
// $ go tool covdata intersect -i=indir1,indir2 -o=outdir
|
||||
// $
|
||||
//
|
||||
// 8. Dump a profile for debugging purposes.
|
||||
//
|
||||
// $ go tool covdata debugdump -i=indir
|
||||
// <human readable output>
|
||||
// $
|
||||
//
|
||||
*/
|
||||
Covdata is a program for manipulating and generating reports
|
||||
from 2nd-generation coverage testing output files, those produced
|
||||
from running applications or integration tests. E.g.
|
||||
|
||||
$ mkdir ./profiledir
|
||||
$ go build -cover -o myapp.exe .
|
||||
$ GOCOVERDIR=./profiledir ./myapp.exe <arguments>
|
||||
$ ls ./profiledir
|
||||
covcounters.cce1b350af34b6d0fb59cc1725f0ee27.821598.1663006712821344241
|
||||
covmeta.cce1b350af34b6d0fb59cc1725f0ee27
|
||||
$
|
||||
|
||||
Run covdata via "go tool covdata <mode>", where 'mode' is a subcommand
|
||||
selecting a specific reporting, merging, or data manipulation operation.
|
||||
Descriptions on the various modes (run "go tool cover <mode> -help" for
|
||||
specifics on usage of a given mode:
|
||||
|
||||
1. Report percent of statements covered in each profiled package
|
||||
|
||||
$ go tool covdata percent -i=profiledir
|
||||
cov-example/p coverage: 41.1% of statements
|
||||
main coverage: 87.5% of statements
|
||||
$
|
||||
|
||||
2. Report import paths of packages profiled
|
||||
|
||||
$ go tool covdata pkglist -i=profiledir
|
||||
cov-example/p
|
||||
main
|
||||
$
|
||||
|
||||
3. Report percent statements covered by function:
|
||||
|
||||
$ go tool covdata func -i=profiledir
|
||||
cov-example/p/p.go:12: emptyFn 0.0%
|
||||
cov-example/p/p.go:32: Small 100.0%
|
||||
cov-example/p/p.go:47: Medium 90.9%
|
||||
...
|
||||
$
|
||||
|
||||
4. Convert coverage data to legacy textual format:
|
||||
|
||||
$ go tool covdata textfmt -i=profiledir -o=cov.txt
|
||||
$ head cov.txt
|
||||
mode: set
|
||||
cov-example/p/p.go:12.22,13.2 0 0
|
||||
cov-example/p/p.go:15.31,16.2 1 0
|
||||
cov-example/p/p.go:16.3,18.3 0 0
|
||||
cov-example/p/p.go:19.3,21.3 0 0
|
||||
...
|
||||
$ go tool cover -html=cov.txt
|
||||
$
|
||||
|
||||
5. Merge profiles together:
|
||||
|
||||
$ go tool covdata merge -i=indir1,indir2 -o=outdir -modpaths=github.com/go-delve/delve
|
||||
$
|
||||
|
||||
6. Subtract one profile from another
|
||||
|
||||
$ go tool covdata subtract -i=indir1,indir2 -o=outdir
|
||||
$
|
||||
|
||||
7. Intersect profiles
|
||||
|
||||
$ go tool covdata intersect -i=indir1,indir2 -o=outdir
|
||||
$
|
||||
|
||||
8. Dump a profile for debugging purposes.
|
||||
|
||||
$ go tool covdata debugdump -i=indir
|
||||
<human readable output>
|
||||
$
|
||||
*/
|
||||
package main
|
||||
|
||||
30
src/cmd/dist/test.go
vendored
30
src/cmd/dist/test.go
vendored
@@ -91,6 +91,29 @@ type work struct {
|
||||
end chan bool
|
||||
}
|
||||
|
||||
// printSkip prints a skip message for all of work.
|
||||
func (w *work) printSkip(t *tester, msg string) {
|
||||
if t.json {
|
||||
type event struct {
|
||||
Time time.Time
|
||||
Action string
|
||||
Package string
|
||||
Output string `json:",omitempty"`
|
||||
}
|
||||
enc := json.NewEncoder(&w.out)
|
||||
ev := event{Time: time.Now(), Package: w.dt.name, Action: "start"}
|
||||
enc.Encode(ev)
|
||||
ev.Action = "output"
|
||||
ev.Output = msg
|
||||
enc.Encode(ev)
|
||||
ev.Action = "skip"
|
||||
ev.Output = ""
|
||||
enc.Encode(ev)
|
||||
return
|
||||
}
|
||||
fmt.Fprintln(&w.out, msg)
|
||||
}
|
||||
|
||||
// A distTest is a test run by dist test.
|
||||
// Each test has a unique name and belongs to a group (heading)
|
||||
type distTest struct {
|
||||
@@ -405,6 +428,9 @@ func (opts *goTest) buildArgs(t *tester) (build, run, pkgs, testFlags []string,
|
||||
if opts.timeout != 0 {
|
||||
d := opts.timeout * time.Duration(t.timeoutScale)
|
||||
run = append(run, "-timeout="+d.String())
|
||||
} else if t.timeoutScale != 1 {
|
||||
const goTestDefaultTimeout = 10 * time.Minute // Default value of go test -timeout flag.
|
||||
run = append(run, "-timeout="+(goTestDefaultTimeout*time.Duration(t.timeoutScale)).String())
|
||||
}
|
||||
if opts.short || t.short {
|
||||
run = append(run, "-short")
|
||||
@@ -1235,7 +1261,7 @@ func (t *tester) runPending(nextTest *distTest) {
|
||||
go func(w *work) {
|
||||
if !<-w.start {
|
||||
timelog("skip", w.dt.name)
|
||||
w.out.WriteString("skipped due to earlier error\n")
|
||||
w.printSkip(t, "skipped due to earlier error")
|
||||
} else {
|
||||
timelog("start", w.dt.name)
|
||||
w.err = w.cmd.Run()
|
||||
@@ -1246,7 +1272,7 @@ func (t *tester) runPending(nextTest *distTest) {
|
||||
if isUnsupportedVMASize(w) {
|
||||
timelog("skip", w.dt.name)
|
||||
w.out.Reset()
|
||||
w.out.WriteString("skipped due to unsupported VMA\n")
|
||||
w.printSkip(t, "skipped due to unsupported VMA")
|
||||
w.err = nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ type fileInfo struct {
|
||||
func (i fileInfo) Name() string { return path.Base(i.f.Name) }
|
||||
func (i fileInfo) ModTime() time.Time { return i.f.Time }
|
||||
func (i fileInfo) Mode() fs.FileMode { return i.f.Mode }
|
||||
func (i fileInfo) IsDir() bool { return false }
|
||||
func (i fileInfo) IsDir() bool { return i.f.Mode&fs.ModeDir != 0 }
|
||||
func (i fileInfo) Size() int64 { return i.f.Size }
|
||||
func (i fileInfo) Sys() any { return nil }
|
||||
|
||||
|
||||
@@ -329,8 +329,47 @@ func writeTgz(name string, a *Archive) {
|
||||
|
||||
zw := check(gzip.NewWriterLevel(out, gzip.BestCompression))
|
||||
tw := tar.NewWriter(zw)
|
||||
|
||||
// Find the mode and mtime to use for directory entries,
|
||||
// based on the mode and mtime of the first file we see.
|
||||
// We know that modes and mtimes are uniform across the archive.
|
||||
var dirMode fs.FileMode
|
||||
var mtime time.Time
|
||||
for _, f := range a.Files {
|
||||
dirMode = fs.ModeDir | f.Mode | (f.Mode&0444)>>2 // copy r bits down to x bits
|
||||
mtime = f.Time
|
||||
break
|
||||
}
|
||||
|
||||
// mkdirAll ensures that the tar file contains directory
|
||||
// entries for dir and all its parents. Some programs reading
|
||||
// these tar files expect that. See go.dev/issue/61862.
|
||||
haveDir := map[string]bool{".": true}
|
||||
var mkdirAll func(string)
|
||||
mkdirAll = func(dir string) {
|
||||
if dir == "/" {
|
||||
panic("mkdirAll /")
|
||||
}
|
||||
if haveDir[dir] {
|
||||
return
|
||||
}
|
||||
haveDir[dir] = true
|
||||
mkdirAll(path.Dir(dir))
|
||||
df := &File{
|
||||
Name: dir + "/",
|
||||
Time: mtime,
|
||||
Mode: dirMode,
|
||||
}
|
||||
h := check(tar.FileInfoHeader(df.Info(), ""))
|
||||
h.Name = dir + "/"
|
||||
if err := tw.WriteHeader(h); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, f = range a.Files {
|
||||
h := check(tar.FileInfoHeader(f.Info(), ""))
|
||||
mkdirAll(path.Dir(f.Name))
|
||||
h.Name = f.Name
|
||||
if err := tw.WriteHeader(h); err != nil {
|
||||
panic(err)
|
||||
|
||||
@@ -4,12 +4,12 @@ go 1.21
|
||||
|
||||
require (
|
||||
github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26
|
||||
golang.org/x/arch v0.3.0
|
||||
golang.org/x/mod v0.10.1-0.20230606122920-62c7e578f1a7
|
||||
golang.org/x/sync v0.2.1-0.20230601203510-93782cc822b6
|
||||
golang.org/x/sys v0.9.0
|
||||
golang.org/x/term v0.9.0
|
||||
golang.org/x/tools v0.9.4-0.20230613194514-c6c983054920
|
||||
golang.org/x/arch v0.4.0
|
||||
golang.org/x/mod v0.12.0
|
||||
golang.org/x/sync v0.3.0
|
||||
golang.org/x/sys v0.10.0
|
||||
golang.org/x/term v0.10.0
|
||||
golang.org/x/tools v0.11.1-0.20230712164437-1ca21856af7b
|
||||
)
|
||||
|
||||
require github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2 // indirect
|
||||
|
||||
@@ -2,15 +2,15 @@ github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbu
|
||||
github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2 h1:rcanfLhLDA8nozr/K289V1zcntHr3V+SHlXwzz1ZI2g=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
|
||||
golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k=
|
||||
golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/mod v0.10.1-0.20230606122920-62c7e578f1a7 h1:OSEstGpBW1+G0wiXI0bBgOnI8nRJQKX3GCNxF75VR1s=
|
||||
golang.org/x/mod v0.10.1-0.20230606122920-62c7e578f1a7/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/sync v0.2.1-0.20230601203510-93782cc822b6 h1:kiysxTbHE5FVnrNyc9BC/yeJi3DTUBHIJtNbC9uvXk4=
|
||||
golang.org/x/sync v0.2.1-0.20230601203510-93782cc822b6/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s=
|
||||
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.9.0 h1:GRRCnKYhdQrD8kfRAdQ6Zcw1P0OcELxGLKJvtjVMZ28=
|
||||
golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo=
|
||||
golang.org/x/tools v0.9.4-0.20230613194514-c6c983054920 h1:FJIPEU9owLOeJgghpx63YhobtkWkORJ3O5ZnbFr8Bzs=
|
||||
golang.org/x/tools v0.9.4-0.20230613194514-c6c983054920/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
|
||||
golang.org/x/arch v0.4.0 h1:A8WCeEWhLwPBKNbFi5Wv5UTCBx5zzubnXDlMOFAzFMc=
|
||||
golang.org/x/arch v0.4.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c=
|
||||
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
|
||||
golang.org/x/tools v0.11.1-0.20230712164437-1ca21856af7b h1:KIZCni6lCdxd4gxHx49Zp9mhckTFRbI/ZPDbR3jKu90=
|
||||
golang.org/x/tools v0.11.1-0.20230712164437-1ca21856af7b/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8=
|
||||
|
||||
@@ -490,25 +490,43 @@ func findGOROOT(env string) string {
|
||||
// depend on the executable's location.
|
||||
return def
|
||||
}
|
||||
|
||||
// canonical returns a directory path that represents
|
||||
// the same directory as dir,
|
||||
// preferring the spelling in def if the two are the same.
|
||||
canonical := func(dir string) string {
|
||||
if isSameDir(def, dir) {
|
||||
return def
|
||||
}
|
||||
return dir
|
||||
}
|
||||
|
||||
exe, err := os.Executable()
|
||||
if err == nil {
|
||||
exe, err = filepath.Abs(exe)
|
||||
if err == nil {
|
||||
// cmd/go may be installed in GOROOT/bin or GOROOT/bin/GOOS_GOARCH,
|
||||
// depending on whether it was cross-compiled with a different
|
||||
// GOHOSTOS (see https://go.dev/issue/62119). Try both.
|
||||
if dir := filepath.Join(exe, "../.."); isGOROOT(dir) {
|
||||
// If def (runtime.GOROOT()) and dir are the same
|
||||
// directory, prefer the spelling used in def.
|
||||
if isSameDir(def, dir) {
|
||||
return def
|
||||
}
|
||||
return dir
|
||||
return canonical(dir)
|
||||
}
|
||||
if dir := filepath.Join(exe, "../../.."); isGOROOT(dir) {
|
||||
return canonical(dir)
|
||||
}
|
||||
|
||||
// Depending on what was passed on the command line, it is possible
|
||||
// that os.Executable is a symlink (like /usr/local/bin/go) referring
|
||||
// to a binary installed in a real GOROOT elsewhere
|
||||
// (like /usr/lib/go/bin/go).
|
||||
// Try to find that GOROOT by resolving the symlinks.
|
||||
exe, err = filepath.EvalSymlinks(exe)
|
||||
if err == nil {
|
||||
if dir := filepath.Join(exe, "../.."); isGOROOT(dir) {
|
||||
if isSameDir(def, dir) {
|
||||
return def
|
||||
}
|
||||
return dir
|
||||
return canonical(dir)
|
||||
}
|
||||
if dir := filepath.Join(exe, "../../.."); isGOROOT(dir) {
|
||||
return canonical(dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -179,6 +179,9 @@ func parse(x string) version {
|
||||
// Parse prerelease.
|
||||
i := 0
|
||||
for i < len(x) && (x[i] < '0' || '9' < x[i]) {
|
||||
if x[i] < 'a' || 'z' < x[i] {
|
||||
return version{}
|
||||
}
|
||||
i++
|
||||
}
|
||||
if i == 0 {
|
||||
|
||||
@@ -95,6 +95,25 @@ var prevTests = []testCase1[string, string]{
|
||||
{"1.40000000000000000", "1.39999999999999999"},
|
||||
}
|
||||
|
||||
func TestIsValid(t *testing.T) { test1(t, isValidTests, "IsValid", IsValid) }
|
||||
|
||||
var isValidTests = []testCase1[string, bool]{
|
||||
{"1.2rc3", true},
|
||||
{"1.2.3", true},
|
||||
{"1.999testmod", true},
|
||||
{"1.600+auto", false},
|
||||
{"1.22", true},
|
||||
{"1.21.0", true},
|
||||
{"1.21rc2", true},
|
||||
{"1.21", true},
|
||||
{"1.20.0", true},
|
||||
{"1.20", true},
|
||||
{"1.19", true},
|
||||
{"1.3", true},
|
||||
{"1.2", true},
|
||||
{"1", true},
|
||||
}
|
||||
|
||||
type testCase1[In, Out any] struct {
|
||||
in In
|
||||
out Out
|
||||
|
||||
@@ -15,13 +15,20 @@ import (
|
||||
// FromToolchain returns the Go version for the named toolchain,
|
||||
// derived from the name itself (not by running the toolchain).
|
||||
// A toolchain is named "goVERSION".
|
||||
// A suffix after the VERSION introduced by a +, -, space, or tab is removed.
|
||||
// A suffix after the VERSION introduced by a -, space, or tab is removed.
|
||||
// Examples:
|
||||
//
|
||||
// FromToolchain("go1.2.3") == "1.2.3"
|
||||
// FromToolchain("go1.2.3-bigcorp") == "1.2.3"
|
||||
// FromToolchain("invalid") == ""
|
||||
func FromToolchain(name string) string {
|
||||
if strings.ContainsAny(name, "\\/") {
|
||||
// The suffix must not include a path separator, since that would cause
|
||||
// exec.LookPath to resolve it from a relative directory instead of from
|
||||
// $PATH.
|
||||
return ""
|
||||
}
|
||||
|
||||
var v string
|
||||
if strings.HasPrefix(name, "go") {
|
||||
v = name[2:]
|
||||
|
||||
@@ -959,7 +959,10 @@ func collectDepsErrors(p *load.Package) {
|
||||
if len(stkj) != 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
return p.DepsErrors[i].Err.Error() < p.DepsErrors[j].Err.Error()
|
||||
} else if len(stkj) == 0 {
|
||||
return false
|
||||
}
|
||||
pathi, pathj := stki[len(stki)-1], stkj[len(stkj)-1]
|
||||
return pathi < pathj
|
||||
|
||||
@@ -473,6 +473,7 @@ func recompileForTest(pmain, preal, ptest, pxtest *Package) *PackageError {
|
||||
p.Target = ""
|
||||
p.Internal.BuildInfo = nil
|
||||
p.Internal.ForceLibrary = true
|
||||
p.Internal.PGOProfile = preal.Internal.PGOProfile
|
||||
}
|
||||
|
||||
// Update p.Internal.Imports to use test copies.
|
||||
@@ -496,6 +497,11 @@ func recompileForTest(pmain, preal, ptest, pxtest *Package) *PackageError {
|
||||
if p.Name == "main" && p != pmain && p != ptest {
|
||||
split()
|
||||
}
|
||||
// Split and attach PGO information to test dependencies if preal
|
||||
// is built with PGO.
|
||||
if preal.Internal.PGOProfile != "" && p.Internal.PGOProfile == "" {
|
||||
split()
|
||||
}
|
||||
}
|
||||
|
||||
// Do search to find cycle.
|
||||
|
||||
@@ -60,6 +60,15 @@ func (r *toolchainRepo) Versions(ctx context.Context, prefix string) (*Versions,
|
||||
}
|
||||
}
|
||||
|
||||
// Always include our own version.
|
||||
// This means that the development branch of Go 1.21 (say) will allow 'go get go@1.21'
|
||||
// even though there are no Go 1.21 releases yet.
|
||||
// Once there is a release, 1.21 will be treated as a query matching the latest available release.
|
||||
// Before then, 1.21 will be treated as a query that resolves to this entry we are adding (1.21).
|
||||
if v := gover.Local(); !have[v] {
|
||||
list = append(list, goPrefix+v)
|
||||
}
|
||||
|
||||
if r.path == "go" {
|
||||
sort.Slice(list, func(i, j int) bool {
|
||||
return gover.Compare(list[i], list[j]) < 0
|
||||
@@ -74,21 +83,38 @@ func (r *toolchainRepo) Versions(ctx context.Context, prefix string) (*Versions,
|
||||
}
|
||||
|
||||
func (r *toolchainRepo) Stat(ctx context.Context, rev string) (*RevInfo, error) {
|
||||
// If we're asking about "go" (not "toolchain"), pretend to have
|
||||
// all earlier Go versions available without network access:
|
||||
// we will provide those ourselves, at least in GOTOOLCHAIN=auto mode.
|
||||
if r.path == "go" && gover.Compare(rev, gover.Local()) <= 0 {
|
||||
return &RevInfo{Version: rev}, nil
|
||||
}
|
||||
|
||||
// Convert rev to DL version and stat that to make sure it exists.
|
||||
// In theory the go@ versions should be like 1.21.0
|
||||
// and the toolchain@ versions should be like go1.21.0
|
||||
// but people will type the wrong one, and so we accept
|
||||
// both and silently correct it to the standard form.
|
||||
prefix := ""
|
||||
v := rev
|
||||
v = strings.TrimPrefix(v, "go")
|
||||
if r.path == "toolchain" {
|
||||
prefix = "go"
|
||||
}
|
||||
|
||||
if !gover.IsValid(v) {
|
||||
return nil, fmt.Errorf("invalid %s version %s", r.path, rev)
|
||||
}
|
||||
|
||||
// If we're asking about "go" (not "toolchain"), pretend to have
|
||||
// all earlier Go versions available without network access:
|
||||
// we will provide those ourselves, at least in GOTOOLCHAIN=auto mode.
|
||||
if r.path == "go" && gover.Compare(v, gover.Local()) <= 0 {
|
||||
return &RevInfo{Version: prefix + v}, nil
|
||||
}
|
||||
|
||||
// Similarly, if we're asking about *exactly* the current toolchain,
|
||||
// we don't need to access the network to know that it exists.
|
||||
if r.path == "toolchain" && v == gover.Local() {
|
||||
return &RevInfo{Version: prefix + v}, nil
|
||||
}
|
||||
|
||||
if gover.IsLang(v) {
|
||||
// We can only use a language (development) version if the current toolchain
|
||||
// implements that version, and the two checks above have ruled that out.
|
||||
return nil, fmt.Errorf("go language version %s is not a toolchain version", rev)
|
||||
}
|
||||
|
||||
|
||||
@@ -239,10 +239,13 @@ func (q *query) matchesPath(path string) bool {
|
||||
// canMatchInModule reports whether the given module path can potentially
|
||||
// contain q.pattern.
|
||||
func (q *query) canMatchInModule(mPath string) bool {
|
||||
if gover.IsToolchain(mPath) {
|
||||
return false
|
||||
}
|
||||
if q.canMatchWildcardInModule != nil {
|
||||
return q.canMatchWildcardInModule(mPath)
|
||||
}
|
||||
return str.HasPathPrefix(q.pattern, mPath) && !gover.IsToolchain(mPath)
|
||||
return str.HasPathPrefix(q.pattern, mPath)
|
||||
}
|
||||
|
||||
// pathOnce invokes f to generate the pathSet for the given path,
|
||||
|
||||
@@ -110,7 +110,13 @@ func ListModules(ctx context.Context, args []string, mode ListMode, reuseFile st
|
||||
|
||||
if err == nil {
|
||||
requirements = rs
|
||||
if !ExplicitWriteGoMod {
|
||||
// TODO(#61605): The extra ListU clause fixes a problem with Go 1.21rc3
|
||||
// where "go mod tidy" and "go list -m -u all" fight over whether the go.sum
|
||||
// should be considered up-to-date. The fix for now is to always treat the
|
||||
// go.sum as up-to-date during list -m -u. Probably the right fix is more targeted,
|
||||
// but in general list -u is looking up other checksums in the checksum database
|
||||
// that won't be necessary later, so it makes sense not to write the go.sum back out.
|
||||
if !ExplicitWriteGoMod && mode&ListU == 0 {
|
||||
err = commitRequirements(ctx, WriteOpts{})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -473,7 +473,11 @@ func newQueryMatcher(path string, query, current string, allowed AllowedFunc) (*
|
||||
// AllowedFunc of qm.
|
||||
func (qm *queryMatcher) allowsVersion(ctx context.Context, v string) bool {
|
||||
if qm.prefix != "" && !strings.HasPrefix(v, qm.prefix) {
|
||||
return false
|
||||
if gover.IsToolchain(qm.path) && strings.TrimSuffix(qm.prefix, ".") == v {
|
||||
// Allow 1.21 to match "1.21." prefix.
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if qm.filter != nil && !qm.filter(v) {
|
||||
return false
|
||||
|
||||
@@ -1363,65 +1363,87 @@ func (r *runTestActor) Act(b *work.Builder, ctx context.Context, a *work.Action)
|
||||
ctx, cancel := context.WithTimeout(ctx, testKillTimeout)
|
||||
defer cancel()
|
||||
|
||||
cmd := exec.CommandContext(ctx, args[0], args[1:]...)
|
||||
cmd.Dir = a.Package.Dir
|
||||
|
||||
env := slices.Clip(cfg.OrigEnv)
|
||||
env = base.AppendPATH(env)
|
||||
env = base.AppendPWD(env, cmd.Dir)
|
||||
cmd.Env = env
|
||||
if addToEnv != "" {
|
||||
cmd.Env = append(cmd.Env, addToEnv)
|
||||
}
|
||||
|
||||
cmd.Stdout = stdout
|
||||
cmd.Stderr = stdout
|
||||
|
||||
// If there are any local SWIG dependencies, we want to load
|
||||
// the shared library from the build directory.
|
||||
if a.Package.UsesSwig() {
|
||||
env := cmd.Env
|
||||
found := false
|
||||
prefix := "LD_LIBRARY_PATH="
|
||||
for i, v := range env {
|
||||
if strings.HasPrefix(v, prefix) {
|
||||
env[i] = v + ":."
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
env = append(env, "LD_LIBRARY_PATH=.")
|
||||
}
|
||||
cmd.Env = env
|
||||
}
|
||||
// Now we're ready to actually run the command.
|
||||
//
|
||||
// If the -o flag is set, or if at some point we change cmd/go to start
|
||||
// copying test executables into the build cache, we may run into spurious
|
||||
// ETXTBSY errors on Unix platforms (see https://go.dev/issue/22315).
|
||||
//
|
||||
// Since we know what causes those, and we know that they should resolve
|
||||
// quickly (the ETXTBSY error will resolve as soon as the subprocess
|
||||
// holding the descriptor open reaches its 'exec' call), we retry them
|
||||
// in a loop.
|
||||
|
||||
var (
|
||||
cmd *exec.Cmd
|
||||
t0 time.Time
|
||||
cancelKilled = false
|
||||
cancelSignaled = false
|
||||
)
|
||||
cmd.Cancel = func() error {
|
||||
if base.SignalTrace == nil {
|
||||
err := cmd.Process.Kill()
|
||||
for {
|
||||
cmd = exec.CommandContext(ctx, args[0], args[1:]...)
|
||||
cmd.Dir = a.Package.Dir
|
||||
|
||||
env := slices.Clip(cfg.OrigEnv)
|
||||
env = base.AppendPATH(env)
|
||||
env = base.AppendPWD(env, cmd.Dir)
|
||||
cmd.Env = env
|
||||
if addToEnv != "" {
|
||||
cmd.Env = append(cmd.Env, addToEnv)
|
||||
}
|
||||
|
||||
cmd.Stdout = stdout
|
||||
cmd.Stderr = stdout
|
||||
|
||||
// If there are any local SWIG dependencies, we want to load
|
||||
// the shared library from the build directory.
|
||||
if a.Package.UsesSwig() {
|
||||
env := cmd.Env
|
||||
found := false
|
||||
prefix := "LD_LIBRARY_PATH="
|
||||
for i, v := range env {
|
||||
if strings.HasPrefix(v, prefix) {
|
||||
env[i] = v + ":."
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
env = append(env, "LD_LIBRARY_PATH=.")
|
||||
}
|
||||
cmd.Env = env
|
||||
}
|
||||
|
||||
cmd.Cancel = func() error {
|
||||
if base.SignalTrace == nil {
|
||||
err := cmd.Process.Kill()
|
||||
if err == nil {
|
||||
cancelKilled = true
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Send a quit signal in the hope that the program will print
|
||||
// a stack trace and exit.
|
||||
err := cmd.Process.Signal(base.SignalTrace)
|
||||
if err == nil {
|
||||
cancelKilled = true
|
||||
cancelSignaled = true
|
||||
}
|
||||
return err
|
||||
}
|
||||
cmd.WaitDelay = testWaitDelay
|
||||
|
||||
// Send a quit signal in the hope that the program will print
|
||||
// a stack trace and exit.
|
||||
err := cmd.Process.Signal(base.SignalTrace)
|
||||
if err == nil {
|
||||
cancelSignaled = true
|
||||
base.StartSigHandlers()
|
||||
t0 = time.Now()
|
||||
err = cmd.Run()
|
||||
|
||||
if !isETXTBSY(err) {
|
||||
// We didn't hit the race in #22315, so there is no reason to retry the
|
||||
// command.
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
cmd.WaitDelay = testWaitDelay
|
||||
|
||||
base.StartSigHandlers()
|
||||
t0 := time.Now()
|
||||
err = cmd.Run()
|
||||
out := buf.Bytes()
|
||||
a.TestOutput = &buf
|
||||
t := fmt.Sprintf("%.3fs", time.Since(t0).Seconds())
|
||||
|
||||
12
src/cmd/go/internal/test/test_nonunix.go
Normal file
12
src/cmd/go/internal/test/test_nonunix.go
Normal file
@@ -0,0 +1,12 @@
|
||||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !unix
|
||||
|
||||
package test
|
||||
|
||||
func isETXTBSY(err error) bool {
|
||||
// syscall.ETXTBSY is only meaningful on Unix platforms.
|
||||
return false
|
||||
}
|
||||
16
src/cmd/go/internal/test/test_unix.go
Normal file
16
src/cmd/go/internal/test/test_unix.go
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build unix
|
||||
|
||||
package test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func isETXTBSY(err error) bool {
|
||||
return errors.Is(err, syscall.ETXTBSY)
|
||||
}
|
||||
@@ -61,7 +61,7 @@ func init() {
|
||||
cf.String("run", "", "")
|
||||
cf.Bool("short", false, "")
|
||||
cf.String("skip", "", "")
|
||||
cf.DurationVar(&testTimeout, "timeout", 10*time.Minute, "")
|
||||
cf.DurationVar(&testTimeout, "timeout", 10*time.Minute, "") // known to cmd/dist
|
||||
cf.String("fuzztime", "", "")
|
||||
cf.String("fuzzminimizetime", "", "")
|
||||
cf.StringVar(&testTrace, "trace", "", "")
|
||||
|
||||
@@ -131,7 +131,7 @@ func Select() {
|
||||
} else {
|
||||
min, suffix, plus := strings.Cut(gotoolchain, "+") // go1.2.3+auto
|
||||
if min != "local" {
|
||||
v := gover.FromToolchain(gotoolchain)
|
||||
v := gover.FromToolchain(min)
|
||||
if v == "" {
|
||||
if plus {
|
||||
base.Fatalf("invalid GOTOOLCHAIN %q: invalid minimum toolchain %q", gotoolchain, min)
|
||||
|
||||
@@ -212,16 +212,22 @@ func get(security SecurityMode, url *urlpkg.URL) (*Response, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if res == nil || res.Body == nil {
|
||||
if err != nil {
|
||||
// Per the docs for [net/http.Client.Do], “On error, any Response can be
|
||||
// ignored. A non-nil Response with a non-nil error only occurs when
|
||||
// CheckRedirect fails, and even then the returned Response.Body is
|
||||
// already closed.”
|
||||
release()
|
||||
} else {
|
||||
body := res.Body
|
||||
res.Body = hookCloser{
|
||||
ReadCloser: body,
|
||||
afterClose: release,
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// “If the returned error is nil, the Response will contain a non-nil Body
|
||||
// which the user is expected to close.”
|
||||
body := res.Body
|
||||
res.Body = hookCloser{
|
||||
ReadCloser: body,
|
||||
afterClose: release,
|
||||
}
|
||||
return url, res, err
|
||||
}
|
||||
|
||||
|
||||
@@ -175,7 +175,11 @@ func main() {
|
||||
if used > 0 {
|
||||
helpArg += " " + strings.Join(args[:used], " ")
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "go %s: unknown command\nRun 'go help%s' for usage.\n", cfg.CmdName, helpArg)
|
||||
cmdName := cfg.CmdName
|
||||
if cmdName == "" {
|
||||
cmdName = args[0]
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "go %s: unknown command\nRun 'go help%s' for usage.\n", cmdName, helpArg)
|
||||
base.SetExitStatus(2)
|
||||
base.Exit()
|
||||
}
|
||||
|
||||
@@ -49,6 +49,7 @@ func scriptConditions() map[string]script.Cond {
|
||||
add("git", lazyBool("the 'git' executable exists and provides the standard CLI", hasWorkingGit))
|
||||
add("GODEBUG", script.PrefixCondition("GODEBUG contains <suffix>", hasGodebug))
|
||||
add("GOEXPERIMENT", script.PrefixCondition("GOEXPERIMENT <suffix> is enabled", hasGoexperiment))
|
||||
add("go-builder", script.BoolCondition("GO_BUILDER_NAME is non-empty", testenv.Builder() != ""))
|
||||
add("link", lazyBool("testenv.HasLink()", testenv.HasLink))
|
||||
add("mismatched-goroot", script.Condition("test's GOROOT_FINAL does not match the real GOROOT", isMismatchedGoroot))
|
||||
add("msan", sysCondition("-msan", platform.MSanSupported, true))
|
||||
|
||||
2
src/cmd/go/testdata/script/README
vendored
2
src/cmd/go/testdata/script/README
vendored
@@ -398,6 +398,8 @@ The available conditions are:
|
||||
GOOS/GOARCH supports -fuzz with instrumentation
|
||||
[git]
|
||||
the 'git' executable exists and provides the standard CLI
|
||||
[go-builder]
|
||||
GO_BUILDER_NAME is non-empty
|
||||
[link]
|
||||
testenv.HasLink()
|
||||
[mismatched-goroot]
|
||||
|
||||
@@ -45,6 +45,12 @@ stderr 'compile.*-pgoprofile=.*b(/|\\\\)default\.pgo.*b(/|\\\\)b_test\.go'
|
||||
stderr 'compile.*-pgoprofile=.*b(/|\\\\)default\.pgo.*dep(/|\\\\)dep\.go'
|
||||
! stderr 'compile.*-pgoprofile=.*nopgo(/|\\\\)nopgo_test\.go'
|
||||
|
||||
# test-only dependencies also have profiles attached
|
||||
stderr 'compile.*-pgoprofile=.*a(/|\\\\)default\.pgo.*testdep(/|\\\\)testdep\.go'
|
||||
stderr 'compile.*-pgoprofile=.*b(/|\\\\)default\.pgo.*testdep(/|\\\\)testdep\.go'
|
||||
stderr 'compile.*-pgoprofile=.*a(/|\\\\)default\.pgo.*testdep2(/|\\\\)testdep2\.go'
|
||||
stderr 'compile.*-pgoprofile=.*b(/|\\\\)default\.pgo.*testdep2(/|\\\\)testdep2\.go'
|
||||
|
||||
# go list -deps prints packages built multiple times.
|
||||
go list -pgo=auto -deps ./a ./b ./nopgo
|
||||
stdout 'test/dep \[test/a\]'
|
||||
@@ -66,6 +72,7 @@ func main() {}
|
||||
-- a/a_test.go --
|
||||
package main
|
||||
import "testing"
|
||||
import _ "test/testdep"
|
||||
func TestA(*testing.T) {}
|
||||
-- a/default.pgo --
|
||||
-- b/b.go --
|
||||
@@ -76,6 +83,7 @@ func main() {}
|
||||
-- b/b_test.go --
|
||||
package main
|
||||
import "testing"
|
||||
import _ "test/testdep"
|
||||
func TestB(*testing.T) {}
|
||||
-- b/default.pgo --
|
||||
-- nopgo/nopgo.go --
|
||||
@@ -94,3 +102,8 @@ import _ "test/dep3"
|
||||
package dep2
|
||||
-- dep3/dep3.go --
|
||||
package dep3
|
||||
-- testdep/testdep.go --
|
||||
package testdep
|
||||
import _ "test/testdep2"
|
||||
-- testdep2/testdep2.go --
|
||||
package testdep2
|
||||
|
||||
2
src/cmd/go/testdata/script/go_badcmd.txt
vendored
Normal file
2
src/cmd/go/testdata/script/go_badcmd.txt
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
! go asdf
|
||||
stderr '^go asdf: unknown command'
|
||||
@@ -1,4 +1,5 @@
|
||||
[compiler:gccgo] skip
|
||||
[short] skip 'builds and links another cmd/go'
|
||||
|
||||
mkdir $WORK/new/bin
|
||||
|
||||
@@ -9,15 +10,18 @@ mkdir $WORK/new/bin
|
||||
# new cmd/go is built.
|
||||
env GOROOT_FINAL=
|
||||
|
||||
# $GOROOT/bin/go is whatever the user has already installed
|
||||
# (using make.bash or similar). We can't make assumptions about what
|
||||
# options it may have been built with, such as -trimpath or GOROOT_FINAL.
|
||||
# Instead, we build a fresh copy of the binary with known settings.
|
||||
go build -o $WORK/new/bin/go$GOEXE cmd/go &
|
||||
go build -o $WORK/bin/check$GOEXE check.go &
|
||||
go build -trimpath -o $WORK/bin/check$GOEXE check.go &
|
||||
wait
|
||||
|
||||
env TESTGOROOT=$GOROOT
|
||||
env GOROOT=
|
||||
|
||||
# Relocated Executable
|
||||
# cp $TESTGOROOT/bin/go$GOEXE $WORK/new/bin/go$GOEXE
|
||||
exec $WORK/bin/check$GOEXE $WORK/new/bin/go$GOEXE $TESTGOROOT
|
||||
|
||||
# Relocated Tree:
|
||||
|
||||
91
src/cmd/go/testdata/script/goroot_executable_trimpath.txt
vendored
Normal file
91
src/cmd/go/testdata/script/goroot_executable_trimpath.txt
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
# Regression test for https://go.dev/issue/62119:
|
||||
# A 'go' command cross-compiled with a different GOHOSTOS
|
||||
# should be able to locate its GOROOT using os.Executable.
|
||||
#
|
||||
# (This also tests a 'go' command built with -trimpath
|
||||
# that is not cross-compiled, since we need to build that
|
||||
# configuration for the test anyway.)
|
||||
|
||||
[short] skip 'builds and links another cmd/go'
|
||||
|
||||
mkdir $WORK/new/bin
|
||||
mkdir $WORK/new/bin/${GOOS}_${GOARCH}
|
||||
|
||||
# In this test, we are specifically checking the logic for deriving
|
||||
# the value of GOROOT from os.Executable when runtime.GOROOT is
|
||||
# trimmed away.
|
||||
# GOROOT_FINAL changes the default behavior of runtime.GOROOT,
|
||||
# so we explicitly clear it to remove it as a confounding variable.
|
||||
env GOROOT_FINAL=
|
||||
|
||||
# $GOROOT/bin/go is whatever the user has already installed
|
||||
# (using make.bash or similar). We can't make assumptions about what
|
||||
# options it may have been built with, such as -trimpath or GOROOT_FINAL.
|
||||
# Instead, we build a fresh copy of the binary with known settings.
|
||||
go build -trimpath -o $WORK/new/bin/go$GOEXE cmd/go &
|
||||
go build -trimpath -o $WORK/bin/check$GOEXE check.go &
|
||||
wait
|
||||
|
||||
env TESTGOROOT=$GOROOT
|
||||
env GOROOT=
|
||||
|
||||
# Relocated Executable
|
||||
# Since we built with -trimpath and the binary isn't installed in a
|
||||
# normal-looking GOROOT, this command should fail.
|
||||
|
||||
! exec $WORK/new/bin/go$GOEXE env GOROOT
|
||||
stderr '^go: cannot find GOROOT directory: ''go'' binary is trimmed and GOROOT is not set$'
|
||||
|
||||
# Cross-compiled binaries in cmd are installed to a ${GOOS}_${GOARCH} subdirectory,
|
||||
# so we also want to try a copy there.
|
||||
# (Note that the script engine's 'exec' engine already works around
|
||||
# https://go.dev/issue/22315, so we don't have to do that explicitly in the
|
||||
# 'check' program we use later.)
|
||||
cp $WORK/new/bin/go$GOEXE $WORK/new/bin/${GOOS}_${GOARCH}/go$GOEXE
|
||||
! exec $WORK/new/bin/${GOOS}_${GOARCH}/go$GOEXE env GOROOT
|
||||
stderr '^go: cannot find GOROOT directory: ''go'' binary is trimmed and GOROOT is not set$'
|
||||
|
||||
# Relocated Tree:
|
||||
# If the binary is sitting in a bin dir next to ../pkg/tool, that counts as a GOROOT,
|
||||
# so it should find the new tree.
|
||||
mkdir $WORK/new/pkg/tool
|
||||
exec $WORK/bin/check$GOEXE $WORK/new/bin/go$GOEXE $WORK/new
|
||||
exec $WORK/bin/check$GOEXE $WORK/new/bin/${GOOS}_${GOARCH}/go$GOEXE $WORK/new
|
||||
|
||||
-- check.go --
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func main() {
|
||||
exe := os.Args[1]
|
||||
want := os.Args[2]
|
||||
cmd := exec.Command(exe, "env", "GOROOT")
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%s env GOROOT: %v, %s\n", exe, err, out)
|
||||
os.Exit(1)
|
||||
}
|
||||
goroot, err := filepath.EvalSymlinks(strings.TrimSpace(string(out)))
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
want, err = filepath.EvalSymlinks(want)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if !strings.EqualFold(goroot, want) {
|
||||
fmt.Fprintf(os.Stderr, "go env GOROOT:\nhave %s\nwant %s\n", goroot, want)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "go env GOROOT: %s\n", goroot)
|
||||
|
||||
}
|
||||
@@ -34,9 +34,9 @@ env GOTOOLCHAIN=go1.600+auto
|
||||
go version
|
||||
stdout go1.600
|
||||
|
||||
env GOTOOLCHAIN=go1.400+auto
|
||||
env GOTOOLCHAIN=go1.400.0+auto
|
||||
go version
|
||||
stdout go1.400
|
||||
stdout go1.400.0
|
||||
|
||||
# GOTOOLCHAIN=version+path sets a minimum too.
|
||||
env GOTOOLCHAIN=go1.600+path
|
||||
|
||||
@@ -43,11 +43,13 @@ env GOSUMDB=$oldsumdb
|
||||
# Test a real GOTOOLCHAIN
|
||||
[short] skip
|
||||
[!net:golang.org] skip
|
||||
[!net:sum.golang.org] skip
|
||||
[!GOOS:darwin] [!GOOS:windows] [!GOOS:linux] skip
|
||||
[!GOARCH:amd64] [!GOARCH:arm64] skip
|
||||
|
||||
env GOPROXY=
|
||||
env GOSUMDB=
|
||||
[go-builder] env GOSUMDB=
|
||||
[!go-builder] env GOSUMDB=sum.golang.org # Set explicitly in case GOROOT/go.env is modified.
|
||||
env GOTOOLCHAIN=go1.20.1
|
||||
|
||||
# Avoid resolving a "go1.20.1" from the user's real $PATH.
|
||||
|
||||
@@ -8,11 +8,12 @@ env TESTGO_VERSION=go1.21pre3
|
||||
# Compile a fake toolchain to put in the path under various names.
|
||||
env GOTOOLCHAIN=
|
||||
mkdir $WORK/bin
|
||||
[!GOOS:plan9] env PATH=$WORK/bin${:}$PATH
|
||||
[GOOS:plan9] env path=$WORK/bin${:}$path
|
||||
go build -o $WORK/bin/ ./fakego.go # adds .exe extension implicitly on Windows
|
||||
cp $WORK/bin/fakego$GOEXE $WORK/bin/go1.50.0$GOEXE
|
||||
|
||||
[!GOOS:plan9] env PATH=$WORK/bin
|
||||
[GOOS:plan9] env path=$WORK/bin
|
||||
|
||||
go version
|
||||
stdout go1.21pre3
|
||||
|
||||
|
||||
@@ -1,6 +1,15 @@
|
||||
[!net:golang.org] skip
|
||||
[!net:proxy.golang.org] skip
|
||||
|
||||
env GOPROXY=
|
||||
# In the Go project's official release GOPROXY defaults to proxy.golang.org,
|
||||
# but it may be changed in GOROOT/go.env (such as in third-party
|
||||
# distributions).
|
||||
#
|
||||
# Make sure it is in use here, because the server for releases not served
|
||||
# through the proxy (https://golang.org/toolchain?go-get=1) currently only
|
||||
# serves the latest patch release for each of the supported stable releases.
|
||||
|
||||
[go-builder] env GOPROXY=
|
||||
[!go-builder] env GOPROXY=https://proxy.golang.org
|
||||
|
||||
go list -m -versions go
|
||||
stdout 1.20.1 # among others
|
||||
|
||||
26
src/cmd/go/testdata/script/list_issue_59905.txt
vendored
26
src/cmd/go/testdata/script/list_issue_59905.txt
vendored
@@ -1,8 +1,13 @@
|
||||
# Expect no panic
|
||||
go list -f '{{if .DepsErrors}}{{.DepsErrors}}{{end}}' -export -e -deps
|
||||
cmpenv stdout wanterr
|
||||
cmpenv stdout wanterr_59905
|
||||
|
||||
-- wanterr --
|
||||
# Expect no panic (Issue 61816)
|
||||
cp level1b_61816.txt level1b/pkg.go
|
||||
go list -f '{{if .DepsErrors}}{{.DepsErrors}}{{end}}' -export -e -deps
|
||||
cmpenv stdout wanterr_61816
|
||||
|
||||
-- wanterr_59905 --
|
||||
[# test/main/level1a
|
||||
level1a${/}pkg.go:5:2: level2x redeclared in this block
|
||||
level1a${/}pkg.go:4:2: other declaration of level2x
|
||||
@@ -14,6 +19,23 @@ level1b${/}pkg.go:5:2: level2x redeclared in this block
|
||||
level1b${/}pkg.go:5:2: "test/main/level1b/level2y" imported as level2x and not used
|
||||
level1b${/}pkg.go:8:39: undefined: level2y
|
||||
]
|
||||
-- wanterr_61816 --
|
||||
[level1b${/}pkg.go:4:2: package foo is not in std ($GOROOT${/}src${/}foo)]
|
||||
[# test/main/level1a
|
||||
level1a${/}pkg.go:5:2: level2x redeclared in this block
|
||||
level1a${/}pkg.go:4:2: other declaration of level2x
|
||||
level1a${/}pkg.go:5:2: "test/main/level1a/level2y" imported as level2x and not used
|
||||
level1a${/}pkg.go:8:39: undefined: level2y
|
||||
level1b${/}pkg.go:4:2: package foo is not in std ($GOROOT${/}src${/}foo)]
|
||||
-- level1b_61816.txt --
|
||||
package level1b
|
||||
|
||||
import (
|
||||
"foo"
|
||||
)
|
||||
|
||||
func Print() { println(level2x.Value, level2y.Value) }
|
||||
|
||||
-- go.mod --
|
||||
module test/main
|
||||
|
||||
|
||||
19
src/cmd/go/testdata/script/mod_get_insecure_redirect.txt
vendored
Normal file
19
src/cmd/go/testdata/script/mod_get_insecure_redirect.txt
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
# golang.org/issue/29591: 'go get' was following plain-HTTP redirects even without -insecure (now replaced by GOINSECURE).
|
||||
# golang.org/issue/61877: 'go get' would panic in case of an insecure redirect in module mode
|
||||
|
||||
[!git] skip
|
||||
|
||||
env GOPRIVATE=vcs-test.golang.org
|
||||
|
||||
! go get -d vcs-test.golang.org/insecure/go/insecure
|
||||
stderr 'redirected .* to insecure URL'
|
||||
|
||||
[short] stop 'builds a git repo'
|
||||
|
||||
env GOINSECURE=vcs-test.golang.org/insecure/go/insecure
|
||||
go get -d vcs-test.golang.org/insecure/go/insecure
|
||||
|
||||
-- go.mod --
|
||||
module example
|
||||
go 1.21
|
||||
|
||||
43
src/cmd/go/testdata/script/mod_get_toolchain.txt
vendored
43
src/cmd/go/testdata/script/mod_get_toolchain.txt
vendored
@@ -1,5 +1,5 @@
|
||||
# setup
|
||||
env TESTGO_VERSION=go1.99.0
|
||||
env TESTGO_VERSION=go1.99rc1
|
||||
env TESTGO_VERSION_SWITCH=switch
|
||||
|
||||
# go get go should use the latest Go 1.23
|
||||
@@ -7,28 +7,28 @@ cp go.mod.orig go.mod
|
||||
go get go
|
||||
stderr '^go: upgraded go 1.21 => 1.23.9$'
|
||||
grep 'go 1.23.9' go.mod
|
||||
grep 'toolchain go1.99.0' go.mod
|
||||
grep 'toolchain go1.99rc1' go.mod
|
||||
|
||||
# go get go@1.23 should use the latest Go 1.23
|
||||
cp go.mod.orig go.mod
|
||||
go get go@1.23
|
||||
stderr '^go: upgraded go 1.21 => 1.23.9$'
|
||||
grep 'go 1.23.9' go.mod
|
||||
grep 'toolchain go1.99.0' go.mod
|
||||
grep 'toolchain go1.99rc1' go.mod
|
||||
|
||||
# go get go@1.22 should use the latest Go 1.22
|
||||
cp go.mod.orig go.mod
|
||||
go get go@1.22
|
||||
stderr '^go: upgraded go 1.21 => 1.22.9$'
|
||||
grep 'go 1.22.9' go.mod
|
||||
grep 'toolchain go1.99.0' go.mod
|
||||
grep 'toolchain go1.99rc1' go.mod
|
||||
|
||||
# go get go@patch should use the latest patch release
|
||||
go get go@1.22.1
|
||||
go get go@patch
|
||||
stderr '^go: upgraded go 1.22.1 => 1.22.9$'
|
||||
grep 'go 1.22.9' go.mod
|
||||
grep 'toolchain go1.99.0' go.mod
|
||||
grep 'toolchain go1.99rc1' go.mod
|
||||
|
||||
# go get go@1.24 does NOT find the release candidate
|
||||
cp go.mod.orig go.mod
|
||||
@@ -40,20 +40,20 @@ cp go.mod.orig go.mod
|
||||
go get go@1.24rc1
|
||||
stderr '^go: upgraded go 1.21 => 1.24rc1$'
|
||||
grep 'go 1.24rc1' go.mod
|
||||
grep 'toolchain go1.99.0' go.mod
|
||||
grep 'toolchain go1.99rc1' go.mod
|
||||
|
||||
# go get go@latest finds the latest Go 1.23
|
||||
cp go.mod.orig go.mod
|
||||
go get go@latest
|
||||
stderr '^go: upgraded go 1.21 => 1.23.9$'
|
||||
grep 'go 1.23.9' go.mod
|
||||
grep 'toolchain go1.99.0' go.mod
|
||||
grep 'toolchain go1.99rc1' go.mod
|
||||
|
||||
# Again, with toolchains.
|
||||
|
||||
# go get toolchain should find go1.999testmod.
|
||||
go get toolchain
|
||||
stderr '^go: upgraded toolchain go1.99.0 => go1.999testmod$'
|
||||
stderr '^go: upgraded toolchain go1.99rc1 => go1.999testmod$'
|
||||
grep 'go 1.23.9' go.mod
|
||||
grep 'toolchain go1.999testmod' go.mod
|
||||
|
||||
@@ -96,6 +96,33 @@ stderr '^go: added toolchain go1.999testmod$'
|
||||
grep 'go 1.21' go.mod
|
||||
grep 'toolchain go1.999testmod' go.mod
|
||||
|
||||
# Bug fixes.
|
||||
|
||||
# go get go@garbage should fail but not crash
|
||||
! go get go@garbage
|
||||
! stderr panic
|
||||
stderr '^go: invalid go version garbage$'
|
||||
|
||||
# go get go@go1.21.0 is OK - we silently correct to 1.21.0
|
||||
go get go@1.19
|
||||
go get go@go1.21.0
|
||||
stderr '^go: upgraded go 1.19 => 1.21.0'
|
||||
|
||||
# go get toolchain@1.24rc1 is OK too.
|
||||
go get toolchain@1.24rc1
|
||||
stderr '^go: downgraded toolchain go1.999testmod => go1.24rc1$'
|
||||
|
||||
# go get go@1.21 should work if we are the Go 1.21 language version,
|
||||
# even though there's no toolchain for it.
|
||||
# (Older versions resolve to the latest release in that version, so for example
|
||||
# go get go@1.20 might resolve to 1.20.9, but if we're the devel copy of
|
||||
# Go 1.21, there's no release yet to resolve to, so we resolve to ourselves.)
|
||||
env TESTGO_VERSION=go1.21
|
||||
go get go@1.19 toolchain@none
|
||||
go get go@1.21
|
||||
grep 'go 1.21$' go.mod
|
||||
! grep toolchain go.mod
|
||||
|
||||
-- go.mod.orig --
|
||||
module m
|
||||
|
||||
|
||||
@@ -10,9 +10,9 @@ env GOPROXY=
|
||||
env GOSUMDB=
|
||||
|
||||
# github.com/russross/blackfriday v2.0.0+incompatible exists,
|
||||
# and should be resolved if we ask for v2.0 explicitly.
|
||||
# and should be resolved if we ask for it explicitly.
|
||||
|
||||
go list -m github.com/russross/blackfriday@v2.0
|
||||
go list -m github.com/russross/blackfriday@v2.0.0+incompatible
|
||||
stdout '^github.com/russross/blackfriday v2\.0\.0\+incompatible$'
|
||||
|
||||
# blackfriday v1.5.2 has a go.mod file, so v1.5.2 should be preferred over
|
||||
@@ -27,6 +27,7 @@ stdout '^github.com/russross/blackfriday v1\.'
|
||||
! go list -m github.com/russross/blackfriday@patch
|
||||
stderr '^go: github.com/russross/blackfriday@patch: can''t query version "patch" of module github.com/russross/blackfriday: no existing version is required$'
|
||||
|
||||
|
||||
# If we're fetching directly from version control, ignored +incompatible
|
||||
# versions should also be omitted by 'go list'.
|
||||
|
||||
@@ -38,10 +39,23 @@ stderr '^go: github.com/russross/blackfriday@patch: can''t query version "patch"
|
||||
[!git] stop
|
||||
env GOPROXY=direct
|
||||
|
||||
go list -versions -m github.com/russross/blackfriday github.com/russross/blackfriday
|
||||
go list -versions -m github.com/russross/blackfriday
|
||||
stdout '^github.com/russross/blackfriday v1\.5\.1 v1\.5\.2' # and possibly others
|
||||
! stdout ' v2\.'
|
||||
|
||||
# For this module, v2.1.0 exists and has a go.mod file.
|
||||
# 'go list -m github.com/russross/blackfriday@v2.0' will check
|
||||
# the latest v2.0 tag, discover that it isn't the right module, and stop there
|
||||
# (instead of spending the time to check O(N) previous tags).
|
||||
|
||||
! go list -m github.com/russross/blackfriday@v2.0
|
||||
stderr '^go: module github.com/russross/blackfriday: no matching versions for query "v2\.0\"'
|
||||
|
||||
# (But asking for exactly v2.0.0+incompatible should still succeed.)
|
||||
go list -m github.com/russross/blackfriday@v2.0.0+incompatible
|
||||
stdout '^github.com/russross/blackfriday v2\.0\.0\+incompatible$'
|
||||
|
||||
|
||||
# However, if the latest compatible version does not include a go.mod file,
|
||||
# +incompatible versions should still be listed, as they may still reflect the
|
||||
# intent of the module author.
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
[!net:proxy.golang.org] skip
|
||||
[!net:sum.golang.org] skip
|
||||
|
||||
env GO111MODULE=on
|
||||
env GOSUMDB=
|
||||
[go-builder] env GOSUMDB=
|
||||
[!go-builder] env GOSUMDB=sum.golang.org # Set explicitly in case GOROOT/go.env is modified.
|
||||
env GOPATH=$WORK/gopath1
|
||||
|
||||
# With a file-based proxy with an empty checksum directory,
|
||||
|
||||
22
src/cmd/go/testdata/script/mod_sumdb_golang.txt
vendored
22
src/cmd/go/testdata/script/mod_sumdb_golang.txt
vendored
@@ -1,13 +1,13 @@
|
||||
# Test default GOPROXY and GOSUMDB
|
||||
env GOPROXY=
|
||||
env GOSUMDB=
|
||||
go env GOPROXY
|
||||
stdout '^https://proxy.golang.org,direct$'
|
||||
go env GOSUMDB
|
||||
stdout '^sum.golang.org$'
|
||||
env GOPROXY=https://proxy.golang.org
|
||||
go env GOSUMDB
|
||||
stdout '^sum.golang.org$'
|
||||
[go-builder] env GOPROXY=
|
||||
[go-builder] env GOSUMDB=
|
||||
[go-builder] go env GOPROXY
|
||||
[go-builder] stdout '^https://proxy.golang.org,direct$'
|
||||
[go-builder] go env GOSUMDB
|
||||
[go-builder] stdout '^sum.golang.org$'
|
||||
[go-builder] env GOPROXY=https://proxy.golang.org
|
||||
[go-builder] go env GOSUMDB
|
||||
[go-builder] stdout '^sum.golang.org$'
|
||||
|
||||
# Download direct from github.
|
||||
|
||||
@@ -26,8 +26,8 @@ cp go.sum saved.sum
|
||||
# files not listed in go.sum.
|
||||
|
||||
go clean -modcache
|
||||
env GOSUMDB=
|
||||
env GOPROXY=
|
||||
env GOSUMDB=sum.golang.org
|
||||
env GOPROXY=https://proxy.golang.org,direct
|
||||
|
||||
go list -x -m all # Download go.mod files.
|
||||
! stderr github
|
||||
|
||||
32
src/cmd/go/testdata/script/mod_toolchain_slash.txt
vendored
Normal file
32
src/cmd/go/testdata/script/mod_toolchain_slash.txt
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
[!exec:/bin/sh] skip
|
||||
|
||||
chmod 0777 go1.999999-/run.sh
|
||||
chmod 0777 run.sh
|
||||
|
||||
! go list all
|
||||
! stdout 'RAN SCRIPT'
|
||||
|
||||
cd subdir
|
||||
! go list all
|
||||
! stdout 'RAN SCRIPT'
|
||||
|
||||
-- go.mod --
|
||||
module exploit
|
||||
|
||||
go 1.21
|
||||
toolchain go1.999999-/run.sh
|
||||
-- go1.999999-/run.sh --
|
||||
#!/bin/sh
|
||||
printf 'RAN SCRIPT\n'
|
||||
exit 1
|
||||
-- run.sh --
|
||||
#!/bin/sh
|
||||
printf 'RAN SCRIPT\n'
|
||||
exit 1
|
||||
-- subdir/go.mod --
|
||||
module exploit
|
||||
|
||||
go 1.21
|
||||
toolchain go1.999999-/../../run.sh
|
||||
-- subdir/go1.999999-/README.txt --
|
||||
heh heh heh
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
# Verify test -c can output multiple executables to a directory.
|
||||
|
||||
# This test also serves as a regression test for https://go.dev/issue/62221:
|
||||
# prior to the fix for that issue, it occasionally failed with ETXTBSY when
|
||||
# run on Unix platforms.
|
||||
|
||||
go test -c -o $WORK/some/nonexisting/directory/ ./pkg/...
|
||||
exists -exec $WORK/some/nonexisting/directory/pkg1.test$GOEXE
|
||||
exists -exec $WORK/some/nonexisting/directory/pkg2.test$GOEXE
|
||||
@@ -43,4 +47,4 @@ package pkg1
|
||||
package pkg2
|
||||
|
||||
-- anotherpkg/pkg1/pkg1_test.go --
|
||||
package pkg1
|
||||
package pkg1
|
||||
|
||||
@@ -2067,17 +2067,22 @@ func instructionsForProg(p *obj.Prog) []*instruction {
|
||||
return instructionsForStore(p, ins.as, p.To.Reg)
|
||||
|
||||
case ALRW, ALRD:
|
||||
// Set aq to use acquire access ordering, which matches Go's memory requirements.
|
||||
// Set aq to use acquire access ordering
|
||||
ins.funct7 = 2
|
||||
ins.rs1, ins.rs2 = uint32(p.From.Reg), REG_ZERO
|
||||
|
||||
case AADDI, AANDI, AORI, AXORI:
|
||||
inss = instructionsForOpImmediate(p, ins.as, p.Reg)
|
||||
|
||||
case ASCW, ASCD, AAMOSWAPW, AAMOSWAPD, AAMOADDW, AAMOADDD, AAMOANDW, AAMOANDD, AAMOORW, AAMOORD,
|
||||
case ASCW, ASCD:
|
||||
// Set release access ordering
|
||||
ins.funct7 = 1
|
||||
ins.rd, ins.rs1, ins.rs2 = uint32(p.RegTo2), uint32(p.To.Reg), uint32(p.From.Reg)
|
||||
|
||||
case AAMOSWAPW, AAMOSWAPD, AAMOADDW, AAMOADDD, AAMOANDW, AAMOANDD, AAMOORW, AAMOORD,
|
||||
AAMOXORW, AAMOXORD, AAMOMINW, AAMOMIND, AAMOMINUW, AAMOMINUD, AAMOMAXW, AAMOMAXD, AAMOMAXUW, AAMOMAXUD:
|
||||
// Set aq to use acquire access ordering, which matches Go's memory requirements.
|
||||
ins.funct7 = 2
|
||||
// Set aqrl to use acquire & release access ordering
|
||||
ins.funct7 = 3
|
||||
ins.rd, ins.rs1, ins.rs2 = uint32(p.RegTo2), uint32(p.To.Reg), uint32(p.From.Reg)
|
||||
|
||||
case AECALL, AEBREAK, ARDCYCLE, ARDTIME, ARDINSTRET:
|
||||
|
||||
@@ -446,7 +446,7 @@ func machoreloc1(arch *sys.Arch, out *ld.OutBuf, ldr *loader.Loader, s loader.Sy
|
||||
rs := r.Xsym
|
||||
rt := r.Type
|
||||
|
||||
if ldr.SymType(rs) == sym.SHOSTOBJ || rt == objabi.R_PCREL || rt == objabi.R_GOTPCREL || rt == objabi.R_CALL {
|
||||
if rt == objabi.R_PCREL || rt == objabi.R_GOTPCREL || rt == objabi.R_CALL || ldr.SymType(rs) == sym.SHOSTOBJ || ldr.SymType(s) == sym.SINITARR {
|
||||
if ldr.SymDynid(rs) < 0 {
|
||||
ldr.Errorf(s, "reloc %d (%s) to non-macho symbol %s type=%d (%s)", rt, sym.RelocName(arch, rt), ldr.SymName(rs), ldr.SymType(rs), ldr.SymType(rs))
|
||||
return false
|
||||
|
||||
@@ -545,10 +545,11 @@ func machoreloc1(arch *sys.Arch, out *ld.OutBuf, ldr *loader.Loader, s loader.Sy
|
||||
}
|
||||
}
|
||||
|
||||
if ldr.SymType(rs) == sym.SHOSTOBJ || rt == objabi.R_CALLARM64 ||
|
||||
if rt == objabi.R_CALLARM64 ||
|
||||
rt == objabi.R_ARM64_PCREL_LDST8 || rt == objabi.R_ARM64_PCREL_LDST16 ||
|
||||
rt == objabi.R_ARM64_PCREL_LDST32 || rt == objabi.R_ARM64_PCREL_LDST64 ||
|
||||
rt == objabi.R_ADDRARM64 || rt == objabi.R_ARM64_GOTPCREL {
|
||||
rt == objabi.R_ADDRARM64 || rt == objabi.R_ARM64_GOTPCREL ||
|
||||
ldr.SymType(rs) == sym.SHOSTOBJ || ldr.SymType(s) == sym.SINITARR {
|
||||
if ldr.SymDynid(rs) < 0 {
|
||||
ldr.Errorf(s, "reloc %d (%s) to non-macho symbol %s type=%d (%s)", rt, sym.RelocName(arch, rt), ldr.SymName(rs), ldr.SymType(rs), ldr.SymType(rs))
|
||||
return false
|
||||
|
||||
@@ -368,7 +368,9 @@ func (st *relocSymState) relocsym(s loader.Sym, P []byte) {
|
||||
o = 0
|
||||
}
|
||||
} else if target.IsDarwin() {
|
||||
if ldr.SymType(rs) != sym.SHOSTOBJ {
|
||||
if ldr.SymType(rs) != sym.SHOSTOBJ && ldr.SymType(s) != sym.SINITARR {
|
||||
// ld-prime drops the offset in data for SINITARR. We need to use
|
||||
// symbol-targeted relocation. See also machoreloc1.
|
||||
o += ldr.SymValue(rs)
|
||||
}
|
||||
} else if target.IsWindows() {
|
||||
|
||||
@@ -141,10 +141,26 @@ func (d *deadcodePass) flood() {
|
||||
methods = methods[:0]
|
||||
for i := 0; i < relocs.Count(); i++ {
|
||||
r := relocs.At(i)
|
||||
// When build with "-linkshared", we can't tell if the interface
|
||||
// method in itab will be used or not. Ignore the weak attribute.
|
||||
if r.Weak() && !(d.ctxt.linkShared && d.ldr.IsItab(symIdx)) {
|
||||
continue
|
||||
if r.Weak() {
|
||||
convertWeakToStrong := false
|
||||
// When build with "-linkshared", we can't tell if the
|
||||
// interface method in itab will be used or not.
|
||||
// Ignore the weak attribute.
|
||||
if d.ctxt.linkShared && d.ldr.IsItab(symIdx) {
|
||||
convertWeakToStrong = true
|
||||
}
|
||||
// If the program uses plugins, we can no longer treat
|
||||
// relocs from pkg init functions to outlined map init
|
||||
// fragments as weak, since doing so can cause package
|
||||
// init clashes between the main program and the
|
||||
// plugin. See #62430 for more details.
|
||||
if d.ctxt.canUsePlugins && r.Type().IsDirectCall() {
|
||||
convertWeakToStrong = true
|
||||
}
|
||||
if !convertWeakToStrong {
|
||||
// skip this reloc
|
||||
continue
|
||||
}
|
||||
}
|
||||
t := r.Type()
|
||||
switch t {
|
||||
|
||||
@@ -992,6 +992,11 @@ func typeSymbolMangle(name string) string {
|
||||
if strings.HasPrefix(name, "type:runtime.") {
|
||||
return name
|
||||
}
|
||||
if strings.HasPrefix(name, "go:string.") {
|
||||
// String symbols will be grouped to a single go:string.* symbol.
|
||||
// No need to mangle individual symbol names.
|
||||
return name
|
||||
}
|
||||
if len(name) <= 14 && !strings.Contains(name, "@") { // Issue 19529
|
||||
return name
|
||||
}
|
||||
@@ -1006,7 +1011,7 @@ func typeSymbolMangle(name string) string {
|
||||
// instantiated symbol, replace type name in []
|
||||
i := strings.IndexByte(name, '[')
|
||||
j := strings.LastIndexByte(name, ']')
|
||||
if j == -1 {
|
||||
if j == -1 || j <= i {
|
||||
j = len(name)
|
||||
}
|
||||
hash := notsha256.Sum256([]byte(name[i+1 : j]))
|
||||
@@ -1413,6 +1418,10 @@ func (ctxt *Link) hostlink() {
|
||||
// resolving a lazy binding. See issue 38824.
|
||||
// Force eager resolution to work around.
|
||||
argv = append(argv, "-Wl,-flat_namespace", "-Wl,-bind_at_load")
|
||||
if linkerFlagSupported(ctxt.Arch, argv[0], "", "-Wl,-ld_classic") {
|
||||
// Force old linker to work around a bug in Apple's new linker.
|
||||
argv = append(argv, "-Wl,-ld_classic")
|
||||
}
|
||||
}
|
||||
if !combineDwarf {
|
||||
argv = append(argv, "-Wl,-S") // suppress STAB (symbolic debugging) symbols
|
||||
@@ -1893,6 +1902,16 @@ func (ctxt *Link) hostlink() {
|
||||
out = append(out[:i], out[i+len(noPieWarning):]...)
|
||||
}
|
||||
}
|
||||
if ctxt.IsDarwin() {
|
||||
const bindAtLoadWarning = "ld: warning: -bind_at_load is deprecated on macOS\n"
|
||||
if i := bytes.Index(out, []byte(bindAtLoadWarning)); i >= 0 {
|
||||
// -bind_at_load is deprecated with ld-prime, but needed for
|
||||
// correctness with older versions of ld64. Swallow the warning.
|
||||
// TODO: maybe pass -bind_at_load conditionally based on C
|
||||
// linker version.
|
||||
out = append(out[:i], out[i+len(bindAtLoadWarning):]...)
|
||||
}
|
||||
}
|
||||
ctxt.Logf("%s", out)
|
||||
}
|
||||
|
||||
|
||||
@@ -833,9 +833,9 @@ func asmbMacho(ctxt *Link) {
|
||||
ml.data[2] = uint32(linkoff + s1 + s2 + s3 + s4 + s5) /* stroff */
|
||||
ml.data[3] = uint32(s6) /* strsize */
|
||||
|
||||
machodysymtab(ctxt, linkoff+s1+s2)
|
||||
|
||||
if ctxt.LinkMode != LinkExternal {
|
||||
machodysymtab(ctxt, linkoff+s1+s2)
|
||||
|
||||
ml := newMachoLoad(ctxt.Arch, LC_LOAD_DYLINKER, 6)
|
||||
ml.data[0] = 12 /* offset to string */
|
||||
stringtouint32(ml.data[1:], "/usr/lib/dyld")
|
||||
|
||||
@@ -197,6 +197,10 @@ func Main(arch *sys.Arch, theArch Arch) {
|
||||
|
||||
checkStrictDups = *FlagStrictDups
|
||||
|
||||
if ctxt.IsDarwin() && ctxt.BuildMode == BuildModeCShared {
|
||||
*FlagW = true // default to -w in c-shared mode on darwin, see #61229
|
||||
}
|
||||
|
||||
if !buildcfg.Experiment.RegabiWrappers {
|
||||
abiInternalVer = 0
|
||||
}
|
||||
|
||||
@@ -222,7 +222,7 @@ type peLoaderState struct {
|
||||
var comdatDefinitions = make(map[string]int64)
|
||||
|
||||
// Load loads the PE file pn from input.
|
||||
// Symbols from the object file are created via the loader 'l', and
|
||||
// Symbols from the object file are created via the loader 'l',
|
||||
// and a slice of the text symbols is returned.
|
||||
// If an .rsrc section or set of .rsrc$xx sections is found, its symbols are
|
||||
// returned as rsrc.
|
||||
|
||||
6
src/cmd/vendor/golang.org/x/arch/x86/x86asm/plan9x.go
generated
vendored
6
src/cmd/vendor/golang.org/x/arch/x86/x86asm/plan9x.go
generated
vendored
@@ -83,6 +83,12 @@ func GoSyntax(inst Inst, pc uint64, symname SymLookup) string {
|
||||
}
|
||||
}
|
||||
|
||||
if inst.Op == CMP {
|
||||
// Use reads-left-to-right ordering for comparisons.
|
||||
// See issue 60920.
|
||||
args[0], args[1] = args[1], args[0]
|
||||
}
|
||||
|
||||
if args != nil {
|
||||
op += " " + strings.Join(args, ", ")
|
||||
}
|
||||
|
||||
2
src/cmd/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go
generated
vendored
2
src/cmd/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go
generated
vendored
@@ -13,7 +13,7 @@ import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Regexp is a wrapper around regexp.Regexp, where the underlying regexp will be
|
||||
// Regexp is a wrapper around [regexp.Regexp], where the underlying regexp will be
|
||||
// compiled the first time it is needed.
|
||||
type Regexp struct {
|
||||
str string
|
||||
|
||||
2
src/cmd/vendor/golang.org/x/mod/modfile/read.go
generated
vendored
2
src/cmd/vendor/golang.org/x/mod/modfile/read.go
generated
vendored
@@ -65,7 +65,7 @@ type Comments struct {
|
||||
}
|
||||
|
||||
// Comment returns the receiver. This isn't useful by itself, but
|
||||
// a Comments struct is embedded into all the expression
|
||||
// a [Comments] struct is embedded into all the expression
|
||||
// implementation types, and this gives each of those a Comment
|
||||
// method to satisfy the Expr interface.
|
||||
func (c *Comments) Comment() *Comments {
|
||||
|
||||
20
src/cmd/vendor/golang.org/x/mod/modfile/rule.go
generated
vendored
20
src/cmd/vendor/golang.org/x/mod/modfile/rule.go
generated
vendored
@@ -5,17 +5,17 @@
|
||||
// Package modfile implements a parser and formatter for go.mod files.
|
||||
//
|
||||
// The go.mod syntax is described in
|
||||
// https://golang.org/cmd/go/#hdr-The_go_mod_file.
|
||||
// https://pkg.go.dev/cmd/go/#hdr-The_go_mod_file.
|
||||
//
|
||||
// The Parse and ParseLax functions both parse a go.mod file and return an
|
||||
// The [Parse] and [ParseLax] functions both parse a go.mod file and return an
|
||||
// abstract syntax tree. ParseLax ignores unknown statements and may be used to
|
||||
// parse go.mod files that may have been developed with newer versions of Go.
|
||||
//
|
||||
// The File struct returned by Parse and ParseLax represent an abstract
|
||||
// go.mod file. File has several methods like AddNewRequire and DropReplace
|
||||
// that can be used to programmatically edit a file.
|
||||
// The [File] struct returned by Parse and ParseLax represent an abstract
|
||||
// go.mod file. File has several methods like [File.AddNewRequire] and
|
||||
// [File.DropReplace] that can be used to programmatically edit a file.
|
||||
//
|
||||
// The Format function formats a File back to a byte slice which can be
|
||||
// The [Format] function formats a File back to a byte slice which can be
|
||||
// written to a file.
|
||||
package modfile
|
||||
|
||||
@@ -226,7 +226,7 @@ var dontFixRetract VersionFixer = func(_, vers string) (string, error) {
|
||||
// data is the content of the file.
|
||||
//
|
||||
// fix is an optional function that canonicalizes module versions.
|
||||
// If fix is nil, all module versions must be canonical (module.CanonicalVersion
|
||||
// If fix is nil, all module versions must be canonical ([module.CanonicalVersion]
|
||||
// must return the same string).
|
||||
func Parse(file string, data []byte, fix VersionFixer) (*File, error) {
|
||||
return parseToFile(file, data, fix, true)
|
||||
@@ -923,7 +923,7 @@ func (f *File) Format() ([]byte, error) {
|
||||
}
|
||||
|
||||
// Cleanup cleans up the file f after any edit operations.
|
||||
// To avoid quadratic behavior, modifications like DropRequire
|
||||
// To avoid quadratic behavior, modifications like [File.DropRequire]
|
||||
// clear the entry but do not remove it from the slice.
|
||||
// Cleanup cleans out all the cleared entries.
|
||||
func (f *File) Cleanup() {
|
||||
@@ -1075,8 +1075,8 @@ func (f *File) AddNewRequire(path, vers string, indirect bool) {
|
||||
// The requirements in req must specify at most one distinct version for each
|
||||
// module path.
|
||||
//
|
||||
// If any existing requirements may be removed, the caller should call Cleanup
|
||||
// after all edits are complete.
|
||||
// If any existing requirements may be removed, the caller should call
|
||||
// [File.Cleanup] after all edits are complete.
|
||||
func (f *File) SetRequire(req []*Require) {
|
||||
type elem struct {
|
||||
version string
|
||||
|
||||
4
src/cmd/vendor/golang.org/x/mod/modfile/work.go
generated
vendored
4
src/cmd/vendor/golang.org/x/mod/modfile/work.go
generated
vendored
@@ -34,7 +34,7 @@ type Use struct {
|
||||
// data is the content of the file.
|
||||
//
|
||||
// fix is an optional function that canonicalizes module versions.
|
||||
// If fix is nil, all module versions must be canonical (module.CanonicalVersion
|
||||
// If fix is nil, all module versions must be canonical ([module.CanonicalVersion]
|
||||
// must return the same string).
|
||||
func ParseWork(file string, data []byte, fix VersionFixer) (*WorkFile, error) {
|
||||
fs, err := parse(file, data)
|
||||
@@ -83,7 +83,7 @@ func ParseWork(file string, data []byte, fix VersionFixer) (*WorkFile, error) {
|
||||
}
|
||||
|
||||
// Cleanup cleans up the file f after any edit operations.
|
||||
// To avoid quadratic behavior, modifications like DropRequire
|
||||
// To avoid quadratic behavior, modifications like [WorkFile.DropRequire]
|
||||
// clear the entry but do not remove it from the slice.
|
||||
// Cleanup cleans out all the cleared entries.
|
||||
func (f *WorkFile) Cleanup() {
|
||||
|
||||
30
src/cmd/vendor/golang.org/x/mod/module/module.go
generated
vendored
30
src/cmd/vendor/golang.org/x/mod/module/module.go
generated
vendored
@@ -4,7 +4,7 @@
|
||||
|
||||
// Package module defines the module.Version type along with support code.
|
||||
//
|
||||
// The module.Version type is a simple Path, Version pair:
|
||||
// The [module.Version] type is a simple Path, Version pair:
|
||||
//
|
||||
// type Version struct {
|
||||
// Path string
|
||||
@@ -12,7 +12,7 @@
|
||||
// }
|
||||
//
|
||||
// There are no restrictions imposed directly by use of this structure,
|
||||
// but additional checking functions, most notably Check, verify that
|
||||
// but additional checking functions, most notably [Check], verify that
|
||||
// a particular path, version pair is valid.
|
||||
//
|
||||
// # Escaped Paths
|
||||
@@ -140,7 +140,7 @@ type ModuleError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
// VersionError returns a ModuleError derived from a Version and error,
|
||||
// VersionError returns a [ModuleError] derived from a [Version] and error,
|
||||
// or err itself if it is already such an error.
|
||||
func VersionError(v Version, err error) error {
|
||||
var mErr *ModuleError
|
||||
@@ -169,7 +169,7 @@ func (e *ModuleError) Unwrap() error { return e.Err }
|
||||
// An InvalidVersionError indicates an error specific to a version, with the
|
||||
// module path unknown or specified externally.
|
||||
//
|
||||
// A ModuleError may wrap an InvalidVersionError, but an InvalidVersionError
|
||||
// A [ModuleError] may wrap an InvalidVersionError, but an InvalidVersionError
|
||||
// must not wrap a ModuleError.
|
||||
type InvalidVersionError struct {
|
||||
Version string
|
||||
@@ -193,8 +193,8 @@ func (e *InvalidVersionError) Error() string {
|
||||
func (e *InvalidVersionError) Unwrap() error { return e.Err }
|
||||
|
||||
// An InvalidPathError indicates a module, import, or file path doesn't
|
||||
// satisfy all naming constraints. See CheckPath, CheckImportPath,
|
||||
// and CheckFilePath for specific restrictions.
|
||||
// satisfy all naming constraints. See [CheckPath], [CheckImportPath],
|
||||
// and [CheckFilePath] for specific restrictions.
|
||||
type InvalidPathError struct {
|
||||
Kind string // "module", "import", or "file"
|
||||
Path string
|
||||
@@ -294,7 +294,7 @@ func fileNameOK(r rune) bool {
|
||||
}
|
||||
|
||||
// CheckPath checks that a module path is valid.
|
||||
// A valid module path is a valid import path, as checked by CheckImportPath,
|
||||
// A valid module path is a valid import path, as checked by [CheckImportPath],
|
||||
// with three additional constraints.
|
||||
// First, the leading path element (up to the first slash, if any),
|
||||
// by convention a domain name, must contain only lower-case ASCII letters,
|
||||
@@ -380,7 +380,7 @@ const (
|
||||
// checkPath returns an error describing why the path is not valid.
|
||||
// Because these checks apply to module, import, and file paths,
|
||||
// and because other checks may be applied, the caller is expected to wrap
|
||||
// this error with InvalidPathError.
|
||||
// this error with [InvalidPathError].
|
||||
func checkPath(path string, kind pathKind) error {
|
||||
if !utf8.ValidString(path) {
|
||||
return fmt.Errorf("invalid UTF-8")
|
||||
@@ -532,7 +532,7 @@ var badWindowsNames = []string{
|
||||
// they require ".vN" instead of "/vN", and for all N, not just N >= 2.
|
||||
// SplitPathVersion returns with ok = false when presented with
|
||||
// a path whose last path element does not satisfy the constraints
|
||||
// applied by CheckPath, such as "example.com/pkg/v1" or "example.com/pkg/v1.2".
|
||||
// applied by [CheckPath], such as "example.com/pkg/v1" or "example.com/pkg/v1.2".
|
||||
func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) {
|
||||
if strings.HasPrefix(path, "gopkg.in/") {
|
||||
return splitGopkgIn(path)
|
||||
@@ -582,7 +582,7 @@ func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) {
|
||||
// MatchPathMajor reports whether the semantic version v
|
||||
// matches the path major version pathMajor.
|
||||
//
|
||||
// MatchPathMajor returns true if and only if CheckPathMajor returns nil.
|
||||
// MatchPathMajor returns true if and only if [CheckPathMajor] returns nil.
|
||||
func MatchPathMajor(v, pathMajor string) bool {
|
||||
return CheckPathMajor(v, pathMajor) == nil
|
||||
}
|
||||
@@ -622,7 +622,7 @@ func CheckPathMajor(v, pathMajor string) error {
|
||||
// PathMajorPrefix returns the major-version tag prefix implied by pathMajor.
|
||||
// An empty PathMajorPrefix allows either v0 or v1.
|
||||
//
|
||||
// Note that MatchPathMajor may accept some versions that do not actually begin
|
||||
// Note that [MatchPathMajor] may accept some versions that do not actually begin
|
||||
// with this prefix: namely, it accepts a 'v0.0.0-' prefix for a '.v1'
|
||||
// pathMajor, even though that pathMajor implies 'v1' tagging.
|
||||
func PathMajorPrefix(pathMajor string) string {
|
||||
@@ -643,7 +643,7 @@ func PathMajorPrefix(pathMajor string) string {
|
||||
}
|
||||
|
||||
// CanonicalVersion returns the canonical form of the version string v.
|
||||
// It is the same as semver.Canonical(v) except that it preserves the special build suffix "+incompatible".
|
||||
// It is the same as [semver.Canonical] except that it preserves the special build suffix "+incompatible".
|
||||
func CanonicalVersion(v string) string {
|
||||
cv := semver.Canonical(v)
|
||||
if semver.Build(v) == "+incompatible" {
|
||||
@@ -652,8 +652,8 @@ func CanonicalVersion(v string) string {
|
||||
return cv
|
||||
}
|
||||
|
||||
// Sort sorts the list by Path, breaking ties by comparing Version fields.
|
||||
// The Version fields are interpreted as semantic versions (using semver.Compare)
|
||||
// Sort sorts the list by Path, breaking ties by comparing [Version] fields.
|
||||
// The Version fields are interpreted as semantic versions (using [semver.Compare])
|
||||
// optionally followed by a tie-breaking suffix introduced by a slash character,
|
||||
// like in "v0.0.1/go.mod".
|
||||
func Sort(list []Version) {
|
||||
@@ -793,7 +793,7 @@ func unescapeString(escaped string) (string, bool) {
|
||||
}
|
||||
|
||||
// MatchPrefixPatterns reports whether any path prefix of target matches one of
|
||||
// the glob patterns (as defined by path.Match) in the comma-separated globs
|
||||
// the glob patterns (as defined by [path.Match]) in the comma-separated globs
|
||||
// list. This implements the algorithm used when matching a module path to the
|
||||
// GOPRIVATE environment variable, as described by 'go help module-private'.
|
||||
//
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user