Compare commits

..

2 Commits

Author SHA1 Message Date
Than McIntosh
71aaa8bde1 [dev.inline] merge with master at 894d24d617
Change-Id: I845eec08108c69228ebcba921f8a807a376d3fae
2023-07-07 16:49:15 -04:00
Than McIntosh
3aba453b66 [dev.inline] add back in codereview.cfg
Add back in an appropriately set up codereview.cfg for this work
branch.

Change-Id: I0e9f649da31c6ea1cbf8ddc1d906c20c41248721
Reviewed-on: https://go-review.googlesource.com/c/go/+/507157
Reviewed-by: Dmitri Shuralyov <dmitshur@golang.org>
TryBot-Bypass: Than McIntosh <thanm@google.com>
Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
2023-06-29 17:09:13 +00:00
259 changed files with 3319 additions and 3735 deletions

View File

@@ -1,2 +0,0 @@
go1.21.1
time 2023-08-31T22:36:09Z

View File

@@ -60,9 +60,7 @@ pkg crypto/tls, method (*QUICConn) Close() error #44886
pkg crypto/tls, method (*QUICConn) ConnectionState() ConnectionState #44886
pkg crypto/tls, method (*QUICConn) HandleData(QUICEncryptionLevel, []uint8) error #44886
pkg crypto/tls, method (*QUICConn) NextEvent() QUICEvent #44886
pkg crypto/tls, method (*QUICConn) SendSessionTicket(QUICSessionTicketOptions) error #60107
pkg crypto/tls, type QUICSessionTicketOptions struct #60107
pkg crypto/tls, type QUICSessionTicketOptions struct, EarlyData bool #60107
pkg crypto/tls, method (*QUICConn) SendSessionTicket(bool) error #60107
pkg crypto/tls, method (*QUICConn) SetTransportParameters([]uint8) #44886
pkg crypto/tls, method (*QUICConn) Start(context.Context) error #44886
pkg crypto/tls, method (QUICEncryptionLevel) String() string #44886
@@ -221,18 +219,18 @@ pkg log/slog, func Any(string, interface{}) Attr #56345
pkg log/slog, func AnyValue(interface{}) Value #56345
pkg log/slog, func Bool(string, bool) Attr #56345
pkg log/slog, func BoolValue(bool) Value #56345
pkg log/slog, func DebugContext(context.Context, string, ...interface{}) #61200
pkg log/slog, func DebugCtx(context.Context, string, ...interface{}) #56345
pkg log/slog, func Debug(string, ...interface{}) #56345
pkg log/slog, func Default() *Logger #56345
pkg log/slog, func Duration(string, time.Duration) Attr #56345
pkg log/slog, func DurationValue(time.Duration) Value #56345
pkg log/slog, func ErrorContext(context.Context, string, ...interface{}) #61200
pkg log/slog, func ErrorCtx(context.Context, string, ...interface{}) #56345
pkg log/slog, func Error(string, ...interface{}) #56345
pkg log/slog, func Float64(string, float64) Attr #56345
pkg log/slog, func Float64Value(float64) Value #56345
pkg log/slog, func Group(string, ...interface{}) Attr #59204
pkg log/slog, func GroupValue(...Attr) Value #56345
pkg log/slog, func InfoContext(context.Context, string, ...interface{}) #61200
pkg log/slog, func InfoCtx(context.Context, string, ...interface{}) #56345
pkg log/slog, func Info(string, ...interface{}) #56345
pkg log/slog, func Int64(string, int64) Attr #56345
pkg log/slog, func Int64Value(int64) Value #56345
@@ -252,7 +250,7 @@ pkg log/slog, func Time(string, time.Time) Attr #56345
pkg log/slog, func TimeValue(time.Time) Value #56345
pkg log/slog, func Uint64(string, uint64) Attr #56345
pkg log/slog, func Uint64Value(uint64) Value #56345
pkg log/slog, func WarnContext(context.Context, string, ...interface{}) #61200
pkg log/slog, func WarnCtx(context.Context, string, ...interface{}) #56345
pkg log/slog, func Warn(string, ...interface{}) #56345
pkg log/slog, func With(...interface{}) *Logger #56345
pkg log/slog, method (Attr) Equal(Attr) bool #56345
@@ -273,17 +271,17 @@ pkg log/slog, method (*LevelVar) MarshalText() ([]uint8, error) #56345
pkg log/slog, method (*LevelVar) Set(Level) #56345
pkg log/slog, method (*LevelVar) String() string #56345
pkg log/slog, method (*LevelVar) UnmarshalText([]uint8) error #56345
pkg log/slog, method (*Logger) DebugContext(context.Context, string, ...interface{}) #61200
pkg log/slog, method (*Logger) DebugCtx(context.Context, string, ...interface{}) #56345
pkg log/slog, method (*Logger) Debug(string, ...interface{}) #56345
pkg log/slog, method (*Logger) Enabled(context.Context, Level) bool #56345
pkg log/slog, method (*Logger) ErrorContext(context.Context, string, ...interface{}) #61200
pkg log/slog, method (*Logger) ErrorCtx(context.Context, string, ...interface{}) #56345
pkg log/slog, method (*Logger) Error(string, ...interface{}) #56345
pkg log/slog, method (*Logger) Handler() Handler #56345
pkg log/slog, method (*Logger) InfoContext(context.Context, string, ...interface{}) #61200
pkg log/slog, method (*Logger) InfoCtx(context.Context, string, ...interface{}) #56345
pkg log/slog, method (*Logger) Info(string, ...interface{}) #56345
pkg log/slog, method (*Logger) LogAttrs(context.Context, Level, string, ...Attr) #56345
pkg log/slog, method (*Logger) Log(context.Context, Level, string, ...interface{}) #56345
pkg log/slog, method (*Logger) WarnContext(context.Context, string, ...interface{}) #61200
pkg log/slog, method (*Logger) WarnCtx(context.Context, string, ...interface{}) #56345
pkg log/slog, method (*Logger) Warn(string, ...interface{}) #56345
pkg log/slog, method (*Logger) WithGroup(string) *Logger #56345
pkg log/slog, method (*Logger) With(...interface{}) *Logger #56345
@@ -346,6 +344,8 @@ pkg maps, func Copy[$0 interface{ ~map[$2]$3 }, $1 interface{ ~map[$2]$3 }, $2 c
pkg maps, func DeleteFunc[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0, func($1, $2) bool) #57436
pkg maps, func Equal[$0 interface{ ~map[$2]$3 }, $1 interface{ ~map[$2]$3 }, $2 comparable, $3 comparable]($0, $1) bool #57436
pkg maps, func EqualFunc[$0 interface{ ~map[$2]$3 }, $1 interface{ ~map[$2]$4 }, $2 comparable, $3 interface{}, $4 interface{}]($0, $1, func($3, $4) bool) bool #57436
pkg maps, func Keys[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0) []$1 #57436
pkg maps, func Values[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0) []$2 #57436
pkg math/big, method (*Int) Float64() (float64, Accuracy) #56984
pkg net/http, method (*ProtocolError) Is(error) bool #41198
pkg net/http, method (*ResponseController) EnableFullDuplex() error #57786

View File

@@ -1,2 +1,2 @@
branch: release-branch.go1.21
branch: dev.inline
parent-branch: master

1246
doc/go1.21.html Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
<!--{
"Title": "The Go Programming Language Specification",
"Subtitle": "Version of Aug 2, 2023",
"Subtitle": "Version of June 14, 2023",
"Path": "/ref/spec"
}-->
@@ -2511,7 +2511,7 @@ type (
<p>
A type definition creates a new, distinct type with the same
<a href="#Underlying_types">underlying type</a> and operations as the given type
<a href="#Types">underlying type</a> and operations as the given type
and binds an identifier, the <i>type name</i>, to it.
</p>
@@ -4343,7 +4343,7 @@ type parameter list type arguments after substitution
When using a generic function, type arguments may be provided explicitly,
or they may be partially or completely <a href="#Type_inference">inferred</a>
from the context in which the function is used.
Provided that they can be inferred, type argument lists may be omitted entirely if the function is:
Provided that they can be inferred, type arguments may be omitted entirely if the function is:
</p>
<ul>
@@ -4351,7 +4351,7 @@ Provided that they can be inferred, type argument lists may be omitted entirely
<a href="#Calls">called</a> with ordinary arguments,
</li>
<li>
<a href="#Assignment_statements">assigned</a> to a variable with a known type
<a href="#Assignment_statements">assigned</a> to a variable with an explicitly declared type,
</li>
<li>
<a href="#Calls">passed as an argument</a> to another function, or
@@ -4371,7 +4371,7 @@ must be inferrable from the context in which the function is used.
// sum returns the sum (concatenation, for strings) of its arguments.
func sum[T ~int | ~float64 | ~string](x... T) T { … }
x := sum // illegal: the type of x is unknown
x := sum // illegal: sum must have a type argument (x is a variable without a declared type)
intSum := sum[int] // intSum has type func(x... int) int
a := intSum(2, 3) // a has value 5 of type int
b := sum[float64](2.0, 3) // b has value 5.0 of type float64
@@ -4406,323 +4406,402 @@ For a generic type, all type arguments must always be provided explicitly.
<h3 id="Type_inference">Type inference</h3>
<p>
A use of a generic function may omit some or all type arguments if they can be
<i>inferred</i> from the context within which the function is used, including
the constraints of the function's type parameters.
Type inference succeeds if it can infer the missing type arguments
and <a href="#Instantiations">instantiation</a> succeeds with the
inferred type arguments.
Otherwise, type inference fails and the program is invalid.
<em>NOTE: This section is not yet up-to-date for Go 1.21.</em>
</p>
<p>
Type inference uses the type relationships between pairs of types for inference:
For instance, a function argument must be <a href="#Assignability">assignable</a>
to its respective function parameter; this establishes a relationship between the
type of the argument and the type of the parameter.
If either of these two types contains type parameters, type inference looks for the
type arguments to substitute the type parameters with such that the assignability
relationship is satisfied.
Similarly, type inference uses the fact that a type argument must
<a href="#Satisfying_a_type_constraint">satisfy</a> the constraint of its respective
type parameter.
Missing function type arguments may be <i>inferred</i> by a series of steps, described below.
Each step attempts to use known information to infer additional type arguments.
Type inference stops as soon as all type arguments are known.
After type inference is complete, it is still necessary to substitute all type arguments
for type parameters and verify that each type argument
<a href="#Implementing_an_interface">implements</a> the relevant constraint;
it is possible for an inferred type argument to fail to implement a constraint, in which
case instantiation fails.
</p>
<p>
Each such pair of matched types corresponds to a <i>type equation</i> containing
one or multiple type parameters, from one or possibly multiple generic functions.
Inferring the missing type arguments means solving the resulting set of type
equations for the respective type parameters.
</p>
<p>
For example, given
</p>
<pre>
// dedup returns a copy of the argument slice with any duplicate entries removed.
func dedup[S ~[]E, E comparable](S) S { … }
type Slice []int
var s Slice
s = dedup(s) // same as s = dedup[Slice, int](s)
</pre>
<p>
the variable <code>s</code> of type <code>Slice</code> must be assignable to
the function parameter type <code>S</code> for the program to be valid.
To reduce complexity, type inference ignores the directionality of assignments,
so the type relationship between <code>Slice</code> and <code>S</code> can be
expressed via the (symmetric) type equation <code>Slice ≡<sub>A</sub> S</code>
(or <code>S ≡<sub>A</sub> Slice</code> for that matter),
where the <code><sub>A</sub></code> in <code><sub>A</sub></code>
indicates that the LHS and RHS types must match per assignability rules
(see the section on <a href="#Type_unification">type unification</a> for
details).
Similarly, the type parameter <code>S</code> must satisfy its constraint
<code>~[]E</code>. This can be expressed as <code>S ≡<sub>C</sub> ~[]E</code>
where <code>X ≡<sub>C</sub> Y</code> stands for
"<code>X</code> satisfies constraint <code>Y</code>".
These observations lead to a set of two equations
</p>
<pre>
Slice ≡<sub>A</sub> S (1)
S ≡<sub>C</sub> ~[]E (2)
</pre>
<p>
which now can be solved for the type parameters <code>S</code> and <code>E</code>.
From (1) a compiler can infer that the type argument for <code>S</code> is <code>Slice</code>.
Similarly, because the underlying type of <code>Slice</code> is <code>[]int</code>
and <code>[]int</code> must match <code>[]E</code> of the constraint,
a compiler can infer that <code>E</code> must be <code>int</code>.
Thus, for these two equations, type inference infers
</p>
<pre>
S ➞ Slice
E ➞ int
</pre>
<p>
Given a set of type equations, the type parameters to solve for are
the type parameters of the functions that need to be instantiated
and for which no explicit type arguments is provided.
These type parameters are called <i>bound</i> type parameters.
For instance, in the <code>dedup</code> example above, the type parameters
<code>P</code> and <code>E</code> are bound to <code>dedup</code>.
An argument to a generic function call may be a generic function itself.
The type parameters of that function are included in the set of bound
type parameters.
The types of function arguments may contain type parameters from other
functions (such as a generic function enclosing a function call).
Those type parameters may also appear in type equations but they are
not bound in that context.
Type equations are always solved for the bound type parameters only.
</p>
<p>
Type inference supports calls of generic functions and assignments
of generic functions to (explicitly function-typed) variables.
This includes passing generic functions as arguments to other
(possibly also generic) functions, and returning generic functions
as results.
Type inference operates on a set of equations specific to each of
these cases.
The equations are as follows (type argument lists are omitted for clarity):
Type inference is based on
</p>
<ul>
<li>
<p>
For a function call <code>f(a<sub>0</sub>, a<sub>1</sub>, …)</code> where
<code>f</code> or a function argument <code>a<sub>i</sub></code> is
a generic function:
<br>
Each pair <code>(a<sub>i</sub>, p<sub>i</sub>)</code> of corresponding
function arguments and parameters where <code>a<sub>i</sub></code> is not an
<a href="#Constants">untyped constant</a> yields an equation
<code>typeof(p<sub>i</sub>) ≡<sub>A</sub> typeof(a<sub>i</sub>)</code>.
<br>
If <code>a<sub>i</sub></code> is an untyped constant <code>c<sub>j</sub></code>,
and <code>typeof(p<sub>i</sub>)</code> is a bound type parameter <code>P<sub>k</sub></code>,
the pair <code>(c<sub>j</sub>, P<sub>k</sub>)</code> is collected separately from
the type equations.
</p>
a <a href="#Type_parameter_declarations">type parameter list</a>
</li>
<li>
<p>
For an assignment <code>v = f</code> of a generic function <code>f</code> to a
(non-generic) variable <code>v</code> of function type:
<br>
<code>typeof(v) ≡<sub>A</sub> typeof(f)</code>.
</p>
a substitution map <i>M</i> initialized with the known type arguments, if any
</li>
<li>
<p>
For a return statement <code>return …, f, … </code> where <code>f</code> is a
generic function returned as a result to a (non-generic) result variable
<code>r</code> of function type:
<br>
<code>typeof(r) ≡<sub>A</sub> typeof(f)</code>.
</p>
a (possibly empty) list of ordinary function arguments (in case of a function call only)
</li>
</ul>
<p>
Additionally, each type parameter <code>P<sub>k</sub></code> and corresponding type constraint
<code>C<sub>k</sub></code> yields the type equation
<code>P<sub>k</sub><sub>C</sub> C<sub>k</sub></code>.
</p>
<p>
Type inference gives precedence to type information obtained from typed operands
before considering untyped constants.
Therefore, inference proceeds in two phases:
and then proceeds with the following steps:
</p>
<ol>
<li>
<p>
The type equations are solved for the bound
type parameters using <a href="#Type_unification">type unification</a>.
If unification fails, type inference fails.
</p>
apply <a href="#Function_argument_type_inference"><i>function argument type inference</i></a>
to all <i>typed</i> ordinary function arguments
</li>
<li>
<p>
For each bound type parameter <code>P<sub>k</sub></code> for which no type argument
has been inferred yet and for which one or more pairs
<code>(c<sub>j</sub>, P<sub>k</sub>)</code> with that same type parameter
were collected, determine the <a href="#Constant_expressions">constant kind</a>
of the constants <code>c<sub>j</sub></code> in all those pairs the same way as for
<a href="#Constant_expressions">constant expressions</a>.
The type argument for <code>P<sub>k</sub></code> is the
<a href="#Constants">default type</a> for the determined constant kind.
If a constant kind cannot be determined due to conflicting constant kinds,
type inference fails.
</p>
apply <a href="#Constraint_type_inference"><i>constraint type inference</i></a>
</li>
<li>
apply function argument type inference to all <i>untyped</i> ordinary function arguments
using the default type for each of the untyped function arguments
</li>
<li>
apply constraint type inference
</li>
</ol>
<p>
If not all type arguments have been found after these two phases, type inference fails.
If there are no ordinary or untyped function arguments, the respective steps are skipped.
Constraint type inference is skipped if the previous step didn't infer any new type arguments,
but it is run at least once if there are missing type arguments.
</p>
<p>
If the two phases are successful, type inference determined a type argument for each
bound type parameter:
</p>
<pre>
P<sub>k</sub> ➞ A<sub>k</sub>
</pre>
<p>
A type argument <code>A<sub>k</sub></code> may be a composite type,
containing other bound type parameters <code>P<sub>k</sub></code> as element types
(or even be just another bound type parameter).
In a process of repeated simplification, the bound type parameters in each type
argument are substituted with the respective type arguments for those type
parameters until each type argument is free of bound type parameters.
</p>
<p>
If type arguments contain cyclic references to themselves
through bound type parameters, simplification and thus type
inference fails.
Otherwise, type inference succeeds.
The substitution map <i>M</i> is carried through all steps, and each step may add entries to <i>M</i>.
The process stops as soon as <i>M</i> has a type argument for each type parameter or if an inference step fails.
If an inference step fails, or if <i>M</i> is still missing type arguments after the last step, type inference fails.
</p>
<h4 id="Type_unification">Type unification</h4>
<p>
Type inference solves type equations through <i>type unification</i>.
Type unification recursively compares the LHS and RHS types of an
equation, where either or both types may be or contain bound type parameters,
and looks for type arguments for those type parameters such that the LHS
and RHS match (become identical or assignment-compatible, depending on
context).
To that effect, type inference maintains a map of bound type parameters
to inferred type arguments; this map is consulted and updated during type unification.
Initially, the bound type parameters are known but the map is empty.
During type unification, if a new type argument <code>A</code> is inferred,
the respective mapping <code>P ➞ A</code> from type parameter to argument
is added to the map.
Conversely, when comparing types, a known type argument
(a type argument for which a map entry already exists)
takes the place of its corresponding type parameter.
As type inference progresses, the map is populated more and more
until all equations have been considered, or until unification fails.
Type inference succeeds if no unification step fails and the map has
an entry for each type parameter.
Type inference is based on <i>type unification</i>. A single unification step
applies to a <a href="#Type_inference">substitution map</a> and two types, either
or both of which may be or contain type parameters. The substitution map tracks
the known (explicitly provided or already inferred) type arguments: the map
contains an entry <code>P</code> &RightArrow; <code>A</code> for each type
parameter <code>P</code> and corresponding known type argument <code>A</code>.
During unification, known type arguments take the place of their corresponding type
parameters when comparing types. Unification is the process of finding substitution
map entries that make the two types equivalent.
</p>
</pre>
For example, given the type equation with the bound type parameter
<code>P</code>
<p>
For unification, two types that don't contain any type parameters from the current type
parameter list are <i>equivalent</i>
if they are identical, or if they are channel types that are identical ignoring channel
direction, or if their underlying types are equivalent.
</p>
<p>
Unification works by comparing the structure of pairs of types: their structure
disregarding type parameters must be identical, and types other than type parameters
must be equivalent.
A type parameter in one type may match any complete subtype in the other type;
each successful match causes an entry to be added to the substitution map.
If the structure differs, or types other than type parameters are not equivalent,
unification fails.
</p>
<!--
TODO(gri) Somewhere we need to describe the process of adding an entry to the
substitution map: if the entry is already present, the type argument
values are themselves unified.
-->
<p>
For example, if <code>T1</code> and <code>T2</code> are type parameters,
<code>[]map[int]bool</code> can be unified with any of the following:
</p>
<pre>
[10]struct{ elem P, list []P } ≡<sub>A</sub> [10]struct{ elem string; list []string }
[]map[int]bool // types are identical
T1 // adds T1 &RightArrow; []map[int]bool to substitution map
[]T1 // adds T1 &RightArrow; map[int]bool to substitution map
[]map[T1]T2 // adds T1 &RightArrow; int and T2 &RightArrow; bool to substitution map
</pre>
<p>
type inference starts with an empty map.
Unification first compares the top-level structure of the LHS and RHS
types.
Both are arrays of the same length; they unify if the element types unify.
Both element types are structs; they unify if they have
the same number of fields with the same names and if the
field types unify.
The type argument for <code>P</code> is not known yet (there is no map entry),
so unifying <code>P</code> with <code>string</code> adds
the mapping <code>P ➞ string</code> to the map.
Unifying the types of the <code>list</code> field requires
unifying <code>[]P</code> and <code>[]string</code> and
thus <code>P</code> and <code>string</code>.
Since the type argument for <code>P</code> is known at this point
(there is a map entry for <code>P</code>), its type argument
<code>string</code> takes the place of <code>P</code>.
And since <code>string</code> is identical to <code>string</code>,
this unification step succeeds as well.
Unification of the LHS and RHS of the equation is now finished.
Type inference succeeds because there is only one type equation,
no unification step failed, and the map is fully populated.
On the other hand, <code>[]map[int]bool</code> cannot be unified with any of
</p>
<pre>
int // int is not a slice
struct{} // a struct is not a slice
[]struct{} // a struct is not a map
[]map[T1]string // map element types don't match
</pre>
<p>
As an exception to this general rule, because a <a href="#Type_definitions">defined type</a>
<code>D</code> and a type literal <code>L</code> are never equivalent,
unification compares the underlying type of <code>D</code> with <code>L</code> instead.
For example, given the defined type
</p>
<pre>
type Vector []float64
</pre>
<p>
and the type literal <code>[]E</code>, unification compares <code>[]float64</code> with
<code>[]E</code> and adds an entry <code>E</code> &RightArrow; <code>float64</code> to
the substitution map.
</p>
<h4 id="Function_argument_type_inference">Function argument type inference</h4>
<!-- In this section and the section on constraint type inference we start with examples
rather than have the examples follow the rules as is customary elsewhere in spec.
Hopefully this helps building an intuition and makes the rules easier to follow. -->
<p>
Function argument type inference infers type arguments from function arguments:
if a function parameter is declared with a type <code>T</code> that uses
type parameters,
<a href="#Type_unification">unifying</a> the type of the corresponding
function argument with <code>T</code> may infer type arguments for the type
parameters used by <code>T</code>.
</p>
<p>
Unification uses a combination of <i>exact</i> and <i>loose</i>
unification depending on whether two types have to be
<a href="#Type_identity">identical</a>,
<a href="#Assignability">assignment-compatible</a>, or
only structurally equal.
The respective <a href="#Type_unification_rules">type unification rules</a>
are spelled out in detail in the <a href="#Appendix">Appendix</a>.
For instance, given the generic function
</p>
<pre>
func scale[Number ~int64|~float64|~complex128](v []Number, s Number) []Number
</pre>
<p>
and the call
</p>
<pre>
var vector []float64
scaledVector := scale(vector, 42)
</pre>
<p>
the type argument for <code>Number</code> can be inferred from the function argument
<code>vector</code> by unifying the type of <code>vector</code> with the corresponding
parameter type: <code>[]float64</code> and <code>[]Number</code>
match in structure and <code>float64</code> matches with <code>Number</code>.
This adds the entry <code>Number</code> &RightArrow; <code>float64</code> to the
<a href="#Type_unification">substitution map</a>.
Untyped arguments, such as the second function argument <code>42</code> here, are ignored
in the first round of function argument type inference and only considered if there are
unresolved type parameters left.
</p>
<p>
For an equation of the form <code>X ≡<sub>A</sub> Y</code>,
where <code>X</code> and <code>Y</code> are types involved
in an assignment (including parameter passing and return statements),
the top-level type structures may unify loosely but element types
must unify exactly, matching the rules for assignments.
Inference happens in two separate phases; each phase operates on a specific list of
(parameter, argument) pairs:
</p>
<p>
For an equation of the form <code>P ≡<sub>C</sub> C</code>,
where <code>P</code> is a type parameter and <code>C</code>
its corresponding constraint, the unification rules are bit
more complicated:
</p>
<ul>
<ol>
<li>
If <code>C</code> has a <a href="#Core_types">core type</a>
<code>core(C)</code>
and <code>P</code> has a known type argument <code>A</code>,
<code>core(C)</code> and <code>A</code> must unify loosely.
If <code>P</code> does not have a known type argument
and <code>C</code> contains exactly one type term <code>T</code>
that is not an underlying (tilde) type, unification adds the
mapping <code>P ➞ T</code> to the map.
The list <i>Lt</i> contains all (parameter, argument) pairs where the parameter
type uses type parameters and where the function argument is <i>typed</i>.
</li>
<li>
If <code>C</code> does not have a core type
and <code>P</code> has a known type argument <code>A</code>,
<code>A</code> must have all methods of <code>C</code>, if any,
and corresponding method types must unify exactly.
The list <i>Lu</i> contains all remaining pairs where the parameter type is a single
type parameter. In this list, the respective function arguments are untyped.
</li>
</ul>
</ol>
<p>
When solving type equations from type constraints,
solving one equation may infer additional type arguments,
which in turn may enable solving other equations that depend
on those type arguments.
Type inference repeats type unification as long as new type
arguments are inferred.
Any other (parameter, argument) pair is ignored.
</p>
<p>
By construction, the arguments of the pairs in <i>Lu</i> are <i>untyped</i> constants
(or the untyped boolean result of a comparison). And because <a href="#Constants">default types</a>
of untyped values are always predeclared non-composite types, they can never match against
a composite type, so it is sufficient to only consider parameter types that are single type
parameters.
</p>
<p>
Each list is processed in a separate phase:
</p>
<ol>
<li>
In the first phase, the parameter and argument types of each pair in <i>Lt</i>
are unified. If unification succeeds for a pair, it may yield new entries that
are added to the substitution map <i>M</i>. If unification fails, type inference
fails.
</li>
<li>
The second phase considers the entries of list <i>Lu</i>. Type parameters for
which the type argument has already been determined are ignored in this phase.
For each remaining pair, the parameter type (which is a single type parameter) and
the <a href="#Constants">default type</a> of the corresponding untyped argument is
unified. If unification fails, type inference fails.
</li>
</ol>
<p>
While unification is successful, processing of each list continues until all list elements
are considered, even if all type arguments are inferred before the last list element has
been processed.
</p>
<p>
Example:
</p>
<pre>
func min[T ~int|~float64](x, y T) T
var x int
min(x, 2.0) // T is int, inferred from typed argument x; 2.0 is assignable to int
min(1.0, 2.0) // T is float64, inferred from default type for 1.0 and matches default type for 2.0
min(1.0, 2) // illegal: default type float64 (for 1.0) doesn't match default type int (for 2)
</pre>
<p>
In the example <code>min(1.0, 2)</code>, processing the function argument <code>1.0</code>
yields the substitution map entry <code>T</code> &RightArrow; <code>float64</code>. Because
processing continues until all untyped arguments are considered, an error is reported. This
ensures that type inference does not depend on the order of the untyped arguments.
</p>
<h4 id="Constraint_type_inference">Constraint type inference</h4>
<p>
Constraint type inference infers type arguments by considering type constraints.
If a type parameter <code>P</code> has a constraint with a
<a href="#Core_types">core type</a> <code>C</code>,
<a href="#Type_unification">unifying</a> <code>P</code> with <code>C</code>
may infer additional type arguments, either the type argument for <code>P</code>,
or if that is already known, possibly the type arguments for type parameters
used in <code>C</code>.
</p>
<p>
For instance, consider the type parameter list with type parameters <code>List</code> and
<code>Elem</code>:
</p>
<pre>
[List ~[]Elem, Elem any]
</pre>
<p>
Constraint type inference can deduce the type of <code>Elem</code> from the type argument
for <code>List</code> because <code>Elem</code> is a type parameter in the core type
<code>[]Elem</code> of <code>List</code>.
If the type argument is <code>Bytes</code>:
</p>
<pre>
type Bytes []byte
</pre>
<p>
unifying the underlying type of <code>Bytes</code> with the core type means
unifying <code>[]byte</code> with <code>[]Elem</code>. That unification succeeds and yields
the <a href="#Type_unification">substitution map</a> entry
<code>Elem</code> &RightArrow; <code>byte</code>.
Thus, in this example, constraint type inference can infer the second type argument from the
first one.
</p>
<p>
Using the core type of a constraint may lose some information: In the (unlikely) case that
the constraint's type set contains a single <a href="#Type_definitions">defined type</a>
<code>N</code>, the corresponding core type is <code>N</code>'s underlying type rather than
<code>N</code> itself. In this case, constraint type inference may succeed but instantiation
will fail because the inferred type is not in the type set of the constraint.
Thus, constraint type inference uses the <i>adjusted core type</i> of
a constraint: if the type set contains a single type, use that type; otherwise use the
constraint's core type.
</p>
<p>
Generally, constraint type inference proceeds in two phases: Starting with a given
substitution map <i>M</i>
</p>
<ol>
<li>
For all type parameters with an adjusted core type, unify the type parameter with that
type. If any unification fails, constraint type inference fails.
</li>
<li>
At this point, some entries in <i>M</i> may map type parameters to other
type parameters or to types containing type parameters. For each entry
<code>P</code> &RightArrow; <code>A</code> in <i>M</i> where <code>A</code> is or
contains type parameters <code>Q</code> for which there exist entries
<code>Q</code> &RightArrow; <code>B</code> in <i>M</i>, substitute those
<code>Q</code> with the respective <code>B</code> in <code>A</code>.
Stop when no further substitution is possible.
</li>
</ol>
<p>
The result of constraint type inference is the final substitution map <i>M</i> from type
parameters <code>P</code> to type arguments <code>A</code> where no type parameter <code>P</code>
appears in any of the <code>A</code>.
</p>
<p>
For instance, given the type parameter list
</p>
<pre>
[A any, B []C, C *A]
</pre>
<p>
and the single provided type argument <code>int</code> for type parameter <code>A</code>,
the initial substitution map <i>M</i> contains the entry <code>A</code> &RightArrow; <code>int</code>.
</p>
<p>
In the first phase, the type parameters <code>B</code> and <code>C</code> are unified
with the core type of their respective constraints. This adds the entries
<code>B</code> &RightArrow; <code>[]C</code> and <code>C</code> &RightArrow; <code>*A</code>
to <i>M</i>.
<p>
At this point there are two entries in <i>M</i> where the right-hand side
is or contains type parameters for which there exists other entries in <i>M</i>:
<code>[]C</code> and <code>*A</code>.
In the second phase, these type parameters are replaced with their respective
types. It doesn't matter in which order this happens. Starting with the state
of <i>M</i> after the first phase:
</p>
<p>
<code>A</code> &RightArrow; <code>int</code>,
<code>B</code> &RightArrow; <code>[]C</code>,
<code>C</code> &RightArrow; <code>*A</code>
</p>
<p>
Replace <code>A</code> on the right-hand side of &RightArrow; with <code>int</code>:
</p>
<p>
<code>A</code> &RightArrow; <code>int</code>,
<code>B</code> &RightArrow; <code>[]C</code>,
<code>C</code> &RightArrow; <code>*int</code>
</p>
<p>
Replace <code>C</code> on the right-hand side of &RightArrow; with <code>*int</code>:
</p>
<p>
<code>A</code> &RightArrow; <code>int</code>,
<code>B</code> &RightArrow; <code>[]*int</code>,
<code>C</code> &RightArrow; <code>*int</code>
</p>
<p>
At this point no further substitution is possible and the map is full.
Therefore, <code>M</code> represents the final map of type parameters
to type arguments for the given type parameter list.
</p>
<h3 id="Operators">Operators</h3>
@@ -5400,7 +5479,7 @@ in any of these cases:
ignoring struct tags (see below),
<code>x</code>'s type and <code>T</code> are not
<a href="#Type_parameter_declarations">type parameters</a> but have
<a href="#Type_identity">identical</a> <a href="#Underlying_types">underlying types</a>.
<a href="#Type_identity">identical</a> <a href="#Types">underlying types</a>.
</li>
<li>
ignoring struct tags (see below),
@@ -7245,8 +7324,7 @@ clear(t) type parameter see below
</pre>
<p>
If the type of the argument to <code>clear</code> is a
<a href="#Type_parameter_declarations">type parameter</a>,
If the argument type is a <a href="#Type_parameter_declarations">type parameter</a>,
all types in its type set must be maps or slices, and <code>clear</code>
performs the operation corresponding to the actual type argument.
</p>
@@ -8212,7 +8290,7 @@ of if the general conversion rules take care of this.
<p>
A <code>Pointer</code> is a <a href="#Pointer_types">pointer type</a> but a <code>Pointer</code>
value may not be <a href="#Address_operators">dereferenced</a>.
Any pointer or value of <a href="#Underlying_types">underlying type</a> <code>uintptr</code> can be
Any pointer or value of <a href="#Types">underlying type</a> <code>uintptr</code> can be
<a href="#Conversions">converted</a> to a type of underlying type <code>Pointer</code> and vice versa.
The effect of converting between <code>Pointer</code> and <code>uintptr</code> is implementation-defined.
</p>
@@ -8360,145 +8438,3 @@ The following minimal alignment properties are guaranteed:
<p>
A struct or array type has size zero if it contains no fields (or elements, respectively) that have a size greater than zero. Two distinct zero-size variables may have the same address in memory.
</p>
<h2 id="Appendix">Appendix</h2>
<h3 id="Type_unification_rules">Type unification rules</h3>
<p>
The type unification rules describe if and how two types unify.
The precise details are relevant for Go implementations,
affect the specifics of error messages (such as whether
a compiler reports a type inference or other error),
and may explain why type inference fails in unusual code situations.
But by and large these rules can be ignored when writing Go code:
type inference is designed to mostly "work as expected",
and the unification rules are fine-tuned accordingly.
</p>
<p>
Type unification is controlled by a <i>matching mode</i>, which may
be <i>exact</i> or <i>loose</i>.
As unification recursively descends a composite type structure,
the matching mode used for elements of the type, the <i>element matching mode</i>,
remains the same as the matching mode except when two types are unified for
<a href="#Assignability">assignability</a> (<code><sub>A</sub></code>):
in this case, the matching mode is <i>loose</i> at the top level but
then changes to <i>exact</i> for element types, reflecting the fact
that types don't have to be identical to be assignable.
</p>
<p>
Two types that are not bound type parameters unify exactly if any of
following conditions is true:
</p>
<ul>
<li>
Both types are <a href="#Type_identity">identical</a>.
</li>
<li>
Both types have identical structure and their element types
unify exactly.
</li>
<li>
Exactly one type is an <a href="#Type_inference">unbound</a>
type parameter with a <a href="#Core_types">core type</a>,
and that core type unifies with the other type per the
unification rules for <code><sub>A</sub></code>
(loose unification at the top level and exact unification
for element types).
</li>
</ul>
<p>
If both types are bound type parameters, they unify per the given
matching modes if:
</p>
<ul>
<li>
Both type parameters are identical.
</li>
<li>
At most one of the type parameters has a known type argument.
In this case, the type parameters are <i>joined</i>:
they both stand for the same type argument.
If neither type parameter has a known type argument yet,
a future type argument inferred for one the type parameters
is simultaneously inferred for both of them.
</li>
<li>
Both type parameters have a known type argument
and the type arguments unify per the given matching modes.
</li>
</ul>
<p>
A single bound type parameter <code>P</code> and another type <code>T</code> unify
per the given matching modes if:
</p>
<ul>
<li>
<code>P</code> doesn't have a known type argument.
In this case, <code>T</code> is inferred as the type argument for <code>P</code>.
</li>
<li>
<code>P</code> does have a known type argument <code>A</code>,
<code>A</code> and <code>T</code> unify per the given matching modes,
and one of the following conditions is true:
<ul>
<li>
Both <code>A</code> and <code>T</code> are interface types:
In this case, if both <code>A</code> and <code>T</code> are
also <a href="#Type_definitions">defined</a> types,
they must be <a href="#Type_identity">identical</a>.
Otherwise, if neither of them is a defined type, they must
have the same number of methods
(unification of <code>A</code> and <code>T</code> already
established that the methods match).
</li>
<li>
Neither <code>A</code> nor <code>T</code> are interface types:
In this case, if <code>T</code> is a defined type, <code>T</code>
replaces <code>A</code> as the inferred type argument for <code>P</code>.
</li>
</ul>
</li>
</ul>
<p>
Finally, two types that are not bound type parameters unify loosely
(and per the element matching mode) if:
</p>
<ul>
<li>
Both types unify exactly.
</li>
<li>
One type is a <a href="#Type_definitions">defined type</a>,
the other type is a type literal, but not an interface,
and their underlying types unify per the element matching mode.
</li>
<li>
Both types are interfaces (but not type parameters) with
identical <a href="#Interface_types">type terms</a>,
both or neither embed the predeclared type
<a href="#Predeclared_identifiers">comparable</a>,
corresponding method types unify per the element matching mode,
and the method set of one of the interfaces is a subset of
the method set of the other interface.
</li>
<li>
Only one type is an interface (but not a type parameter),
corresponding methods of the two types unify per the element matching mode,
and the method set of the interface is a subset of
the method set of the other type.
</li>
<li>
Both types have the same structure and their element types
unify per the element matching mode.
</li>
</ul>

View File

@@ -126,14 +126,6 @@ for example,
see the [runtime documentation](/pkg/runtime#hdr-Environment_Variables)
and the [go command documentation](/cmd/go#hdr-Build_and_test_caching).
### Go 1.22
Go 1.22 adds a configurable limit to control the maximum acceptable RSA key size
that can be used in TLS handshakes, controlled by the [`tlsmaxrsasize`setting](/pkg/crypto/tls#Conn.Handshake).
The default is tlsmaxrsasize=8192, limiting RSA to 8192-bit keys. To avoid
denial of service attacks, this setting and default was backported to Go
1.19.13, Go 1.20.8, and Go 1.21.1.
### Go 1.21
Go 1.21 made it a run-time error to call `panic` with a nil interface value,
@@ -150,10 +142,6 @@ forms, controlled by the
respectively.
This behavior was backported to Go 1.19.8+ and Go 1.20.3+.
Go 1.21 adds the support of Multipath TCP but it is only used if the application
explicitly asked for it. This behavior can be controlled by the
[`multipathtcp` setting](/pkg/net#Dialer.SetMultipathTCP).
There is no plan to remove any of these settings.
### Go 1.20

View File

@@ -10,12 +10,12 @@ case "$GOWASIRUNTIME" in
"wasmer")
exec wasmer run --dir=/ --env PWD="$PWD" --env PATH="$PATH" ${GOWASIRUNTIMEARGS:-} "$1" -- "${@:2}"
;;
"wazero")
exec wazero run -mount /:/ -env-inherit -cachedir "${TMPDIR:-/tmp}"/wazero ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}"
;;
"wasmtime" | "")
"wasmtime")
exec wasmtime run --dir=/ --env PWD="$PWD" --env PATH="$PATH" --max-wasm-stack 1048576 ${GOWASIRUNTIMEARGS:-} "$1" -- "${@:2}"
;;
"wazero" | "")
exec wazero run -mount /:/ -env-inherit -cachedir "${TMPDIR:-/tmp}"/wazero ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}"
;;
*)
echo "Unknown Go WASI runtime specified: $GOWASIRUNTIME"
exit 1

View File

@@ -38,7 +38,7 @@ The vendor directory may be updated with 'go mod vendor'.
A typical sequence might be:
cd src
go get golang.org/x/net@master
go get golang.org/x/net@latest
go mod tidy
go mod vendor

View File

@@ -7,6 +7,7 @@ package bytes_test
import (
. "bytes"
"fmt"
"internal/testenv"
"testing"
)
@@ -72,7 +73,7 @@ func TestCompareBytes(t *testing.T) {
}
lengths = append(lengths, 256, 512, 1024, 1333, 4095, 4096, 4097)
if !testing.Short() {
if !testing.Short() || testenv.Builder() != "" {
lengths = append(lengths, 65535, 65536, 65537, 99999)
}

View File

@@ -2,10 +2,9 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This package computes the exported API of a set of Go packages.
// Package api computes the exported API of a set of Go packages.
// It is only a test, not a command, nor a usefully importable package.
package main
package api
import (
"bufio"

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
package api
import (
"flag"

View File

@@ -4,7 +4,7 @@
//go:build boringcrypto
package main
package api
import (
"fmt"

View File

@@ -183,28 +183,28 @@ start:
// 8.2: Load-Reserved/Store-Conditional
LRW (X5), X6 // 2fa30214
LRD (X5), X6 // 2fb30214
SCW X5, (X6), X7 // af23531a
SCD X5, (X6), X7 // af33531a
SCW X5, (X6), X7 // af23531c
SCD X5, (X6), X7 // af33531c
// 8.3: Atomic Memory Operations
AMOSWAPW X5, (X6), X7 // af23530e
AMOSWAPD X5, (X6), X7 // af33530e
AMOADDW X5, (X6), X7 // af235306
AMOADDD X5, (X6), X7 // af335306
AMOANDW X5, (X6), X7 // af235366
AMOANDD X5, (X6), X7 // af335366
AMOORW X5, (X6), X7 // af235346
AMOORD X5, (X6), X7 // af335346
AMOXORW X5, (X6), X7 // af235326
AMOXORD X5, (X6), X7 // af335326
AMOMAXW X5, (X6), X7 // af2353a6
AMOMAXD X5, (X6), X7 // af3353a6
AMOMAXUW X5, (X6), X7 // af2353e6
AMOMAXUD X5, (X6), X7 // af3353e6
AMOMINW X5, (X6), X7 // af235386
AMOMIND X5, (X6), X7 // af335386
AMOMINUW X5, (X6), X7 // af2353c6
AMOMINUD X5, (X6), X7 // af3353c6
AMOSWAPW X5, (X6), X7 // af23530c
AMOSWAPD X5, (X6), X7 // af33530c
AMOADDW X5, (X6), X7 // af235304
AMOADDD X5, (X6), X7 // af335304
AMOANDW X5, (X6), X7 // af235364
AMOANDD X5, (X6), X7 // af335364
AMOORW X5, (X6), X7 // af235344
AMOORD X5, (X6), X7 // af335344
AMOXORW X5, (X6), X7 // af235324
AMOXORD X5, (X6), X7 // af335324
AMOMAXW X5, (X6), X7 // af2353a4
AMOMAXD X5, (X6), X7 // af3353a4
AMOMAXUW X5, (X6), X7 // af2353e4
AMOMAXUD X5, (X6), X7 // af3353e4
AMOMINW X5, (X6), X7 // af235384
AMOMIND X5, (X6), X7 // af335384
AMOMINUW X5, (X6), X7 // af2353c4
AMOMINUD X5, (X6), X7 // af3353c4
// 10.1: Base Counters and Timers
RDCYCLE X5 // f32200c0

View File

@@ -1,7 +1,7 @@
package cgotest
/*
#cgo !darwin LDFLAGS: -lm
#cgo LDFLAGS: -lm
#include <math.h>
*/
import "C"

View File

@@ -1,7 +1,7 @@
package issue8756
/*
#cgo !darwin LDFLAGS: -lm
#cgo LDFLAGS: -lm
#include <math.h>
*/
import "C"

View File

@@ -23,7 +23,7 @@ package cgotest
#include <unistd.h>
#include <sys/stat.h>
#include <errno.h>
#cgo !darwin LDFLAGS: -lm
#cgo LDFLAGS: -lm
#ifndef WIN32
#include <pthread.h>

View File

@@ -389,11 +389,9 @@ func TestForkExec(t *testing.T) {
}
}
func TestSymbolNameMangle(t *testing.T) {
func TestGeneric(t *testing.T) {
// Issue 58800: generic function name may contain weird characters
// that confuse the external linker.
// Issue 62098: the name mangling code doesn't handle some string
// symbols correctly.
globalSkip(t)
goCmd(t, "build", "-buildmode=plugin", "-o", "mangle.so", "./mangle/plugin.go")
goCmd(t, "build", "-buildmode=plugin", "-o", "generic.so", "./generic/plugin.go")
}

View File

@@ -2,37 +2,21 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Test cases for symbol name mangling.
package main
import (
"fmt"
"strings"
)
// Issue 58800:
// Instantiated function name may contain weird characters
// that confuse the external linker, so it needs to be
// mangled.
type S struct {
X int `parser:"|@@)"`
}
package main
//go:noinline
func F[T any]() {}
type S struct {
X int `parser:"|@@)"`
}
func P() {
F[S]()
}
// Issue 62098: the name mangling code doesn't handle some string
// symbols correctly.
func G(id string) error {
if strings.ContainsAny(id, "&$@;/:+,?\\{^}%`]\">[~<#|") {
return fmt.Errorf("invalid")
}
return nil
}
func main() {}

View File

@@ -1571,6 +1571,14 @@
// zero upper bit of the register; no need to zero-extend
(MOVBUreg x:((Equal|NotEqual|LessThan|LessThanU|LessThanF|LessEqual|LessEqualU|LessEqualF|GreaterThan|GreaterThanU|GreaterThanF|GreaterEqual|GreaterEqualU|GreaterEqualF) _)) => (MOVDreg x)
// omit unsign extension
(MOVWUreg x) && zeroUpper32Bits(x, 3) => x
// omit sign extension
(MOVWreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffff80000000) == 0 => (ANDconst <t> x [c])
(MOVHreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffffffff8000) == 0 => (ANDconst <t> x [c])
(MOVBreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffffffffff80) == 0 => (ANDconst <t> x [c])
// absorb flag constants into conditional instructions
(CSEL [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
(CSEL [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => y

View File

@@ -13,6 +13,7 @@ import "strings"
// - *const instructions may use a constant larger than the instruction can encode.
// In this case the assembler expands to multiple instructions and uses tmp
// register (R27).
// - All 32-bit Ops will zero the upper 32 bits of the destination register.
// Suffixes encode the bit width of various instructions.
// D (double word) = 64 bit

View File

@@ -588,16 +588,16 @@
// small and of zero-extend => either zero-extend or small and
(Select0 (ANDCCconst [c] y:(MOVBZreg _))) && c&0xFF == 0xFF => y
(Select0 (ANDCCconst [0xFF] (MOVBreg x))) => (MOVBZreg x)
(Select0 (ANDCCconst [0xFF] y:(MOVBreg _))) => y
(Select0 (ANDCCconst [c] y:(MOVHZreg _))) && c&0xFFFF == 0xFFFF => y
(Select0 (ANDCCconst [0xFFFF] (MOVHreg x))) => (MOVHZreg x)
(Select0 (ANDCCconst [0xFFFF] y:(MOVHreg _))) => y
(AND (MOVDconst [c]) y:(MOVWZreg _)) && c&0xFFFFFFFF == 0xFFFFFFFF => y
(AND (MOVDconst [0xFFFFFFFF]) y:(MOVWreg x)) => (MOVWZreg x)
// normal case
(Select0 (ANDCCconst [c] (MOVBZreg x))) => (Select0 (ANDCCconst [c&0xFF] x))
(Select0 (ANDCCconst [c] (MOVHZreg x))) => (Select0 (ANDCCconst [c&0xFFFF] x))
(Select0 (ANDCCconst [c] (MOVWZreg x))) => (Select0 (ANDCCconst [c&0xFFFFFFFF] x))
(Select0 (ANDCCconst [c] (MOV(B|BZ)reg x))) => (Select0 (ANDCCconst [c&0xFF] x))
(Select0 (ANDCCconst [c] (MOV(H|HZ)reg x))) => (Select0 (ANDCCconst [c&0xFFFF] x))
(Select0 (ANDCCconst [c] (MOV(W|WZ)reg x))) => (Select0 (ANDCCconst [c&0xFFFFFFFF] x))
// Eliminate unnecessary sign/zero extend following right shift
(MOV(B|H|W)Zreg (SRWconst [c] (MOVBZreg x))) => (SRWconst [c] (MOVBZreg x))

View File

@@ -855,7 +855,7 @@ func storeOneArg(x *expandState, pos src.XPos, b *Block, locs []*LocalSlot, suff
// storeOneLoad creates a decomposed (one step) load that is then stored.
func storeOneLoad(x *expandState, pos src.XPos, b *Block, source, mem *Value, t *types.Type, offArg, offStore int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value {
from := x.offsetFrom(source.Block, source.Args[0], offArg, types.NewPtr(t))
w := b.NewValue2(source.Pos, OpLoad, t, from, mem)
w := source.Block.NewValue2(source.Pos, OpLoad, t, from, mem)
return x.storeArgOrLoad(pos, b, w, mem, t, offStore, loadRegOffset, storeRc)
}
@@ -962,7 +962,7 @@ func (x *expandState) storeArgOrLoad(pos src.XPos, b *Block, source, mem *Value,
eltRO := x.regWidth(elt)
source.Type = t
for i := int64(0); i < t.NumElem(); i++ {
sel := b.NewValue1I(pos, OpArraySelect, elt, i, source)
sel := source.Block.NewValue1I(pos, OpArraySelect, elt, i, source)
mem = x.storeArgOrLoad(pos, b, sel, mem, elt, storeOffset+i*elt.Size(), loadRegOffset, storeRc.at(t, 0))
loadRegOffset += eltRO
pos = pos.WithNotStmt()
@@ -997,7 +997,7 @@ func (x *expandState) storeArgOrLoad(pos src.XPos, b *Block, source, mem *Value,
source.Type = t
for i := 0; i < t.NumFields(); i++ {
fld := t.Field(i)
sel := b.NewValue1I(pos, OpStructSelect, fld.Type, int64(i), source)
sel := source.Block.NewValue1I(pos, OpStructSelect, fld.Type, int64(i), source)
mem = x.storeArgOrLoad(pos, b, sel, mem, fld.Type, storeOffset+fld.Offset, loadRegOffset, storeRc.next(fld.Type))
loadRegOffset += x.regWidth(fld.Type)
pos = pos.WithNotStmt()
@@ -1009,48 +1009,48 @@ func (x *expandState) storeArgOrLoad(pos src.XPos, b *Block, source, mem *Value,
break
}
tHi, tLo := x.intPairTypes(t.Kind())
sel := b.NewValue1(pos, OpInt64Hi, tHi, source)
sel := source.Block.NewValue1(pos, OpInt64Hi, tHi, source)
mem = x.storeArgOrLoad(pos, b, sel, mem, tHi, storeOffset+x.hiOffset, loadRegOffset+x.hiRo, storeRc.plus(x.hiRo))
pos = pos.WithNotStmt()
sel = b.NewValue1(pos, OpInt64Lo, tLo, source)
sel = source.Block.NewValue1(pos, OpInt64Lo, tLo, source)
return x.storeArgOrLoad(pos, b, sel, mem, tLo, storeOffset+x.lowOffset, loadRegOffset+x.loRo, storeRc.plus(x.hiRo))
case types.TINTER:
sel := b.NewValue1(pos, OpITab, x.typs.BytePtr, source)
sel := source.Block.NewValue1(pos, OpITab, x.typs.BytePtr, source)
mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.BytePtr, storeOffset, loadRegOffset, storeRc.next(x.typs.BytePtr))
pos = pos.WithNotStmt()
sel = b.NewValue1(pos, OpIData, x.typs.BytePtr, source)
sel = source.Block.NewValue1(pos, OpIData, x.typs.BytePtr, source)
return x.storeArgOrLoad(pos, b, sel, mem, x.typs.BytePtr, storeOffset+x.ptrSize, loadRegOffset+RO_iface_data, storeRc)
case types.TSTRING:
sel := b.NewValue1(pos, OpStringPtr, x.typs.BytePtr, source)
sel := source.Block.NewValue1(pos, OpStringPtr, x.typs.BytePtr, source)
mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.BytePtr, storeOffset, loadRegOffset, storeRc.next(x.typs.BytePtr))
pos = pos.WithNotStmt()
sel = b.NewValue1(pos, OpStringLen, x.typs.Int, source)
sel = source.Block.NewValue1(pos, OpStringLen, x.typs.Int, source)
return x.storeArgOrLoad(pos, b, sel, mem, x.typs.Int, storeOffset+x.ptrSize, loadRegOffset+RO_string_len, storeRc)
case types.TSLICE:
et := types.NewPtr(t.Elem())
sel := b.NewValue1(pos, OpSlicePtr, et, source)
sel := source.Block.NewValue1(pos, OpSlicePtr, et, source)
mem = x.storeArgOrLoad(pos, b, sel, mem, et, storeOffset, loadRegOffset, storeRc.next(et))
pos = pos.WithNotStmt()
sel = b.NewValue1(pos, OpSliceLen, x.typs.Int, source)
sel = source.Block.NewValue1(pos, OpSliceLen, x.typs.Int, source)
mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.Int, storeOffset+x.ptrSize, loadRegOffset+RO_slice_len, storeRc.next(x.typs.Int))
sel = b.NewValue1(pos, OpSliceCap, x.typs.Int, source)
sel = source.Block.NewValue1(pos, OpSliceCap, x.typs.Int, source)
return x.storeArgOrLoad(pos, b, sel, mem, x.typs.Int, storeOffset+2*x.ptrSize, loadRegOffset+RO_slice_cap, storeRc)
case types.TCOMPLEX64:
sel := b.NewValue1(pos, OpComplexReal, x.typs.Float32, source)
sel := source.Block.NewValue1(pos, OpComplexReal, x.typs.Float32, source)
mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.Float32, storeOffset, loadRegOffset, storeRc.next(x.typs.Float32))
pos = pos.WithNotStmt()
sel = b.NewValue1(pos, OpComplexImag, x.typs.Float32, source)
sel = source.Block.NewValue1(pos, OpComplexImag, x.typs.Float32, source)
return x.storeArgOrLoad(pos, b, sel, mem, x.typs.Float32, storeOffset+4, loadRegOffset+RO_complex_imag, storeRc)
case types.TCOMPLEX128:
sel := b.NewValue1(pos, OpComplexReal, x.typs.Float64, source)
sel := source.Block.NewValue1(pos, OpComplexReal, x.typs.Float64, source)
mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.Float64, storeOffset, loadRegOffset, storeRc.next(x.typs.Float64))
pos = pos.WithNotStmt()
sel = b.NewValue1(pos, OpComplexImag, x.typs.Float64, source)
sel = source.Block.NewValue1(pos, OpComplexImag, x.typs.Float64, source)
return x.storeArgOrLoad(pos, b, sel, mem, x.typs.Float64, storeOffset+8, loadRegOffset+RO_complex_imag, storeRc)
}
@@ -1113,9 +1113,6 @@ func (x *expandState) rewriteArgs(v *Value, firstArg int) {
}
}
}
if x.debug > 1 {
x.Printf("...storeArg %s, %v, %d\n", a.LongString(), aType, aOffset)
}
// "Dereference" of addressed (probably not-SSA-eligible) value becomes Move
// TODO(register args) this will be more complicated with registers in the picture.
mem = x.rewriteDereference(v.Block, sp, a, mem, aOffset, aux.SizeOfArg(auxI), aType, v.Pos)

View File

@@ -1281,6 +1281,10 @@ func zeroUpper32Bits(x *Value, depth int) bool {
OpAMD64SHRL, OpAMD64SHRLconst, OpAMD64SARL, OpAMD64SARLconst,
OpAMD64SHLL, OpAMD64SHLLconst:
return true
case OpARM64REV16W, OpARM64REVW, OpARM64RBITW, OpARM64CLZW, OpARM64EXTRWconst,
OpARM64MULW, OpARM64MNEGW, OpARM64UDIVW, OpARM64DIVW, OpARM64UMODW,
OpARM64MADDW, OpARM64MSUBW, OpARM64RORW, OpARM64RORWconst:
return true
case OpArg:
return x.Type.Size() == 4
case OpPhi, OpSelect0, OpSelect1:

View File

@@ -8668,6 +8668,25 @@ func rewriteValueARM64_OpARM64MOVBreg(v *Value) bool {
v.AuxInt = int64ToAuxInt(int64(int8(c)))
return true
}
// match: (MOVBreg <t> (ANDconst x [c]))
// cond: uint64(c) & uint64(0xffffffffffffff80) == 0
// result: (ANDconst <t> x [c])
for {
t := v.Type
if v_0.Op != OpARM64ANDconst {
break
}
c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(uint64(c)&uint64(0xffffffffffffff80) == 0) {
break
}
v.reset(OpARM64ANDconst)
v.Type = t
v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
// match: (MOVBreg (SLLconst [lc] x))
// cond: lc < 8
// result: (SBFIZ [armBFAuxInt(lc, 8-lc)] x)
@@ -10746,6 +10765,25 @@ func rewriteValueARM64_OpARM64MOVHreg(v *Value) bool {
v.AuxInt = int64ToAuxInt(int64(int16(c)))
return true
}
// match: (MOVHreg <t> (ANDconst x [c]))
// cond: uint64(c) & uint64(0xffffffffffff8000) == 0
// result: (ANDconst <t> x [c])
for {
t := v.Type
if v_0.Op != OpARM64ANDconst {
break
}
c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(uint64(c)&uint64(0xffffffffffff8000) == 0) {
break
}
v.reset(OpARM64ANDconst)
v.Type = t
v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
// match: (MOVHreg (SLLconst [lc] x))
// cond: lc < 16
// result: (SBFIZ [armBFAuxInt(lc, 16-lc)] x)
@@ -11905,6 +11943,17 @@ func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool {
v.AuxInt = int64ToAuxInt(int64(uint32(c)))
return true
}
// match: (MOVWUreg x)
// cond: zeroUpper32Bits(x, 3)
// result: x
for {
x := v_0
if !(zeroUpper32Bits(x, 3)) {
break
}
v.copyOf(x)
return true
}
// match: (MOVWUreg (SLLconst [lc] x))
// cond: lc >= 32
// result: (MOVDconst [0])
@@ -12409,6 +12458,25 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool {
v.AuxInt = int64ToAuxInt(int64(int32(c)))
return true
}
// match: (MOVWreg <t> (ANDconst x [c]))
// cond: uint64(c) & uint64(0xffffffff80000000) == 0
// result: (ANDconst <t> x [c])
for {
t := v.Type
if v_0.Op != OpARM64ANDconst {
break
}
c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(uint64(c)&uint64(0xffffffff80000000) == 0) {
break
}
v.reset(OpARM64ANDconst)
v.Type = t
v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
// match: (MOVWreg (SLLconst [lc] x))
// cond: lc < 32
// result: (SBFIZ [armBFAuxInt(lc, 32-lc)] x)

View File

@@ -14410,19 +14410,17 @@ func rewriteValuePPC64_OpSelect0(v *Value) bool {
v.copyOf(y)
return true
}
// match: (Select0 (ANDCCconst [0xFF] (MOVBreg x)))
// result: (MOVBZreg x)
// match: (Select0 (ANDCCconst [0xFF] y:(MOVBreg _)))
// result: y
for {
if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 0xFF {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64MOVBreg {
y := v_0.Args[0]
if y.Op != OpPPC64MOVBreg {
break
}
x := v_0_0.Args[0]
v.reset(OpPPC64MOVBZreg)
v.AddArg(x)
v.copyOf(y)
return true
}
// match: (Select0 (ANDCCconst [c] y:(MOVHZreg _)))
@@ -14440,19 +14438,36 @@ func rewriteValuePPC64_OpSelect0(v *Value) bool {
v.copyOf(y)
return true
}
// match: (Select0 (ANDCCconst [0xFFFF] (MOVHreg x)))
// result: (MOVHZreg x)
// match: (Select0 (ANDCCconst [0xFFFF] y:(MOVHreg _)))
// result: y
for {
if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 0xFFFF {
break
}
y := v_0.Args[0]
if y.Op != OpPPC64MOVHreg {
break
}
v.copyOf(y)
return true
}
// match: (Select0 (ANDCCconst [c] (MOVBreg x)))
// result: (Select0 (ANDCCconst [c&0xFF] x))
for {
if v_0.Op != OpPPC64ANDCCconst {
break
}
c := auxIntToInt64(v_0.AuxInt)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64MOVHreg {
if v_0_0.Op != OpPPC64MOVBreg {
break
}
x := v_0_0.Args[0]
v.reset(OpPPC64MOVHZreg)
v.AddArg(x)
v.reset(OpSelect0)
v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
v0.AuxInt = int64ToAuxInt(c & 0xFF)
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Select0 (ANDCCconst [c] (MOVBZreg x)))
@@ -14474,6 +14489,25 @@ func rewriteValuePPC64_OpSelect0(v *Value) bool {
v.AddArg(v0)
return true
}
// match: (Select0 (ANDCCconst [c] (MOVHreg x)))
// result: (Select0 (ANDCCconst [c&0xFFFF] x))
for {
if v_0.Op != OpPPC64ANDCCconst {
break
}
c := auxIntToInt64(v_0.AuxInt)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64MOVHreg {
break
}
x := v_0_0.Args[0]
v.reset(OpSelect0)
v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
v0.AuxInt = int64ToAuxInt(c & 0xFFFF)
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Select0 (ANDCCconst [c] (MOVHZreg x)))
// result: (Select0 (ANDCCconst [c&0xFFFF] x))
for {
@@ -14493,6 +14527,25 @@ func rewriteValuePPC64_OpSelect0(v *Value) bool {
v.AddArg(v0)
return true
}
// match: (Select0 (ANDCCconst [c] (MOVWreg x)))
// result: (Select0 (ANDCCconst [c&0xFFFFFFFF] x))
for {
if v_0.Op != OpPPC64ANDCCconst {
break
}
c := auxIntToInt64(v_0.AuxInt)
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpPPC64MOVWreg {
break
}
x := v_0_0.Args[0]
v.reset(OpSelect0)
v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
v0.AuxInt = int64ToAuxInt(c & 0xFFFFFFFF)
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Select0 (ANDCCconst [c] (MOVWZreg x)))
// result: (Select0 (ANDCCconst [c&0xFFFFFFFF] x))
for {

View File

@@ -53,10 +53,7 @@ func mightContainHeapPointer(ptr *Value, size int64, mem *Value, zeroes map[ID]Z
}
ptrSize := ptr.Block.Func.Config.PtrSize
if off%ptrSize != 0 {
return true // see issue 61187
}
if size%ptrSize != 0 {
if off%ptrSize != 0 || size%ptrSize != 0 {
ptr.Fatalf("unaligned pointer write")
}
if off < 0 || off+size > 64*ptrSize {
@@ -133,7 +130,7 @@ func needWBdst(ptr, mem *Value, zeroes map[ID]ZeroRegion) bool {
}
ptrSize := ptr.Block.Func.Config.PtrSize
if off%ptrSize != 0 {
return true // see issue 61187
ptr.Fatalf("unaligned pointer write")
}
if off < 0 || off >= 64*ptrSize {
// write goes off end of tracked offsets

View File

@@ -7083,21 +7083,8 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
// for an empty block this will be used for its control
// instruction. We won't use the actual liveness map on a
// control instruction. Just mark it something that is
// preemptible, unless this function is "all unsafe", or
// the empty block is in a write barrier.
unsafe := liveness.IsUnsafe(f)
if b.Kind == ssa.BlockPlain {
// Empty blocks that are part of write barriers need
// to have their control instructions marked unsafe.
c := b.Succs[0].Block()
for _, v := range c.Values {
if v.Op == ssa.OpWBend {
unsafe = true
break
}
}
}
s.pp.NextLive = objw.LivenessIndex{StackMapIndex: -1, IsUnsafePoint: unsafe}
// preemptible, unless this function is "all unsafe".
s.pp.NextLive = objw.LivenessIndex{StackMapIndex: -1, IsUnsafePoint: liveness.IsUnsafe(f)}
if idx, ok := argLiveBlockMap[b.ID]; ok && idx != argLiveIdx {
argLiveIdx = idx

View File

@@ -110,11 +110,11 @@ type Config struct {
// type checker will initialize this field with a newly created context.
Context *Context
// GoVersion describes the accepted Go language version. The string must
// start with a prefix of the form "go%d.%d" (e.g. "go1.20", "go1.21rc1", or
// "go1.21.0") or it must be empty; an empty string disables Go language
// version checks. If the format is invalid, invoking the type checker will
// result in an error.
// GoVersion describes the accepted Go language version. The string
// must follow the format "go%d.%d" (e.g. "go1.12") or ist must be
// empty; an empty string disables Go language version checks.
// If the format is invalid, invoking the type checker will cause a
// panic.
GoVersion string
// If IgnoreFuncBodies is set, function bodies are not

View File

@@ -2070,29 +2070,6 @@ func TestIdenticalUnions(t *testing.T) {
}
}
func TestIssue61737(t *testing.T) {
// This test verifies that it is possible to construct invalid interfaces
// containing duplicate methods using the go/types API.
//
// It must be possible for importers to construct such invalid interfaces.
// Previously, this panicked.
sig1 := NewSignatureType(nil, nil, nil, NewTuple(NewParam(nopos, nil, "", Typ[Int])), nil, false)
sig2 := NewSignatureType(nil, nil, nil, NewTuple(NewParam(nopos, nil, "", Typ[String])), nil, false)
methods := []*Func{
NewFunc(nopos, nil, "M", sig1),
NewFunc(nopos, nil, "M", sig2),
}
embeddedMethods := []*Func{
NewFunc(nopos, nil, "M", sig2),
}
embedded := NewInterfaceType(embeddedMethods, nil)
iface := NewInterfaceType(methods, []Type{embedded})
iface.NumMethods() // unlike go/types, there is no Complete() method, so we complete implicitly
}
func TestIssue15305(t *testing.T) {
const src = "package p; func f() int16; var _ = f(undef)"
f := mustParse(src)

View File

@@ -576,11 +576,6 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
// If nargs == 1, make sure x.mode is either a value or a constant.
if x.mode != constant_ {
x.mode = value
// A value must not be untyped.
check.assignment(x, &emptyInterface, "argument to "+bin.name)
if x.mode == invalid {
return
}
}
// Use the final type computed above for all arguments.

View File

@@ -610,17 +610,20 @@ func (check *Checker) arguments(call *syntax.CallExpr, sig *Signature, targs []T
return // error already reported
}
// update result signature: instantiate if needed
// compute result signature: instantiate if needed
rsig = sig
if n > 0 {
rsig = check.instantiateSignature(call.Pos(), call.Fun, sig, targs[:n], xlist)
// If the callee's parameter list was adjusted we need to update (instantiate)
// it separately. Otherwise we can simply use the result signature's parameter
// list.
if adjusted {
sigParams = check.subst(call.Pos(), sigParams, makeSubstMap(tparams[:n], targs[:n]), nil, check.context()).(*Tuple)
} else {
sigParams = rsig.params
}
}
// Optimization: Only if the callee's parameter list was adjusted do we need to
// compute it from the adjusted list; otherwise we can simply use the result
// signature's parameter list. We only need the n type parameters and arguments
// of the callee.
if n > 0 && adjusted {
sigParams = check.subst(call.Pos(), sigParams, makeSubstMap(tparams[:n], targs[:n]), nil, check.context()).(*Tuple)
} else {
sigParams = rsig.params
}
// compute argument signatures: instantiate if needed

View File

@@ -73,6 +73,7 @@ func representableConst(x constant.Value, check *Checker, typ *Basic, rounded *c
sizeof := func(T Type) int64 {
s := conf.sizeof(T)
assert(s == 4 || s == 8)
return s
}

View File

@@ -96,7 +96,7 @@ func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type,
// Unify parameter and argument types for generic parameters with typed arguments
// and collect the indices of generic parameters with untyped arguments.
// Terminology: generic parameter = function parameter with a type-parameterized type
u := newUnifier(tparams, targs, check.allowVersion(check.pkg, pos, go1_21))
u := newUnifier(tparams, targs)
errorf := func(kind string, tpar, targ Type, arg *operand) {
// provide a better error message if we can

View File

@@ -900,23 +900,3 @@ func _cgoCheckResult(interface{})
*boolFieldAddr(cfg, "go115UsesCgo") = true
})
}
func TestIssue61931(t *testing.T) {
const src = `
package p
func A(func(any), ...any) {}
func B[T any](T) {}
func _() {
A(B, nil // syntax error: missing ',' before newline in argument list
}
`
f, err := syntax.Parse(syntax.NewFileBase(pkgName(src)), strings.NewReader(src), func(error) {}, nil, 0)
if err == nil {
t.Fatal("expected syntax error")
}
var conf Config
conf.Check(f.PkgName.Value, []*syntax.File{f}, nil) // must not panic
}

View File

@@ -6,6 +6,7 @@ package types2
import (
"cmd/compile/internal/syntax"
"fmt"
. "internal/types/errors"
"sort"
"strings"
@@ -211,6 +212,7 @@ func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_
// we can get rid of the mpos map below and simply use the cloned method's
// position.
var todo []*Func
var seen objset
var allMethods []*Func
mpos := make(map[*Func]syntax.Pos) // method specification or method embedding position, for good error messages
@@ -220,30 +222,36 @@ func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_
allMethods = append(allMethods, m)
mpos[m] = pos
case explicit:
if check != nil {
var err error_
err.code = DuplicateDecl
err.errorf(pos, "duplicate method %s", m.name)
err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name)
check.report(&err)
if check == nil {
panic(fmt.Sprintf("%s: duplicate method %s", m.pos, m.name))
}
// check != nil
var err error_
err.code = DuplicateDecl
err.errorf(pos, "duplicate method %s", m.name)
err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name)
check.report(&err)
default:
// We have a duplicate method name in an embedded (not explicitly declared) method.
// Check method signatures after all types are computed (go.dev/issue/33656).
// If we're pre-go1.14 (overlapping embeddings are not permitted), report that
// error here as well (even though we could do it eagerly) because it's the same
// error message.
if check != nil {
check.later(func() {
if !check.allowVersion(m.pkg, pos, go1_14) || !Identical(m.typ, other.Type()) {
var err error_
err.code = DuplicateDecl
err.errorf(pos, "duplicate method %s", m.name)
err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name)
check.report(&err)
}
}).describef(pos, "duplicate method check for %s", m.name)
if check == nil {
// check method signatures after all locally embedded interfaces are computed
todo = append(todo, m, other.(*Func))
break
}
// check != nil
check.later(func() {
if !check.allowVersion(m.pkg, pos, go1_14) || !Identical(m.typ, other.Type()) {
var err error_
err.code = DuplicateDecl
err.errorf(pos, "duplicate method %s", m.name)
err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name)
check.report(&err)
}
}).describef(pos, "duplicate method check for %s", m.name)
}
}
@@ -306,6 +314,15 @@ func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_
}
ityp.embedPos = nil // not needed anymore (errors have been reported)
// process todo's (this only happens if check == nil)
for i := 0; i < len(todo); i += 2 {
m := todo[i]
other := todo[i+1]
if !Identical(m.typ, other.typ) {
panic(fmt.Sprintf("%s: duplicate method %s", m.pos, m.name))
}
}
ityp.tset.comparable = allComparable
if len(allMethods) != 0 {
sortMethods(allMethods)

View File

@@ -53,6 +53,11 @@ const (
// the core types, if any, of non-local (unbound) type parameters.
enableCoreTypeUnification = true
// If enableInterfaceInference is set, type inference uses
// shared methods for improved type inference involving
// interfaces.
enableInterfaceInference = true
// If traceInference is set, unification will print a trace of its operation.
// Interpretation of trace:
// x ≡ y attempt to unify types x and y
@@ -76,16 +81,15 @@ type unifier struct {
// that inferring the type for a given type parameter P will
// automatically infer the same type for all other parameters
// unified (joined) with P.
handles map[*TypeParam]*Type
depth int // recursion depth during unification
enableInterfaceInference bool // use shared methods for better inference
handles map[*TypeParam]*Type
depth int // recursion depth during unification
}
// newUnifier returns a new unifier initialized with the given type parameter
// and corresponding type argument lists. The type argument list may be shorter
// than the type parameter list, and it may contain nil types. Matching type
// parameters and arguments must have the same index.
func newUnifier(tparams []*TypeParam, targs []Type, enableInterfaceInference bool) *unifier {
func newUnifier(tparams []*TypeParam, targs []Type) *unifier {
assert(len(tparams) >= len(targs))
handles := make(map[*TypeParam]*Type, len(tparams))
// Allocate all handles up-front: in a correct program, all type parameters
@@ -99,7 +103,7 @@ func newUnifier(tparams []*TypeParam, targs []Type, enableInterfaceInference boo
}
handles[x] = &t
}
return &unifier{handles, 0, enableInterfaceInference}
return &unifier{handles, 0}
}
// unifyMode controls the behavior of the unifier.
@@ -335,7 +339,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
// we will fail at function instantiation or argument assignment time.
//
// If we have at least one defined type, there is one in y.
if ny, _ := y.(*Named); mode&exact == 0 && ny != nil && isTypeLit(x) && !(u.enableInterfaceInference && IsInterface(x)) {
if ny, _ := y.(*Named); mode&exact == 0 && ny != nil && isTypeLit(x) && !(enableInterfaceInference && IsInterface(x)) {
if traceInference {
u.tracef("%s ≡ under %s", x, ny)
}
@@ -401,40 +405,18 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
// Therefore, we must fail unification (go.dev/issue/60933).
return false
}
// If we have inexact unification and one of x or y is a defined type, select the
// defined type. This ensures that in a series of types, all matching against the
// same type parameter, we infer a defined type if there is one, independent of
// order. Type inference or assignment may fail, which is ok.
// Selecting a defined type, if any, ensures that we don't lose the type name;
// and since we have inexact unification, a value of equally named or matching
// undefined type remains assignable (go.dev/issue/43056).
// If y is a defined type, make sure we record that type
// for type parameter x, which may have until now only
// recorded an underlying type (go.dev/issue/43056).
// Either both types are interfaces, or neither type is.
// If both are interfaces, they have the same methods.
//
// Similarly, if we have inexact unification and there are no defined types but
// channel types, select a directed channel, if any. This ensures that in a series
// of unnamed types, all matching against the same type parameter, we infer the
// directed channel if there is one, independent of order.
// Selecting a directional channel, if any, ensures that a value of another
// inexactly unifying channel type remains assignable (go.dev/issue/62157).
//
// If we have multiple defined channel types, they are either identical or we
// have assignment conflicts, so we can ignore directionality in this case.
//
// If we have defined and literal channel types, a defined type wins to avoid
// order dependencies.
if mode&exact == 0 {
switch {
case xn:
// x is a defined type: nothing to do.
case yn:
// x is not a defined type and y is a defined type: select y.
u.set(px, y)
default:
// Neither x nor y are defined types.
if yc, _ := under(y).(*Chan); yc != nil && yc.dir != SendRecv {
// y is a directed channel type: select y.
u.set(px, y)
}
}
// Note: Changing the recorded type for a type parameter to
// a defined type is only ok when unification is inexact.
// But in exact unification, if we have a match, x and y must
// be identical, so changing the recorded type for x is a no-op.
if yn {
u.set(px, y)
}
return true
}
@@ -455,12 +437,12 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
emode |= exact
}
// If u.EnableInterfaceInference is set and we don't require exact unification,
// If EnableInterfaceInference is set and we don't require exact unification,
// if both types are interfaces, one interface must have a subset of the
// methods of the other and corresponding method signatures must unify.
// If only one type is an interface, all its methods must be present in the
// other type and corresponding method signatures must unify.
if u.enableInterfaceInference && mode&exact == 0 {
if enableInterfaceInference && mode&exact == 0 {
// One or both interfaces may be defined types.
// Look under the name, but not under type parameters (go.dev/issue/60564).
xi := asInterface(x)
@@ -523,7 +505,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
}
// All xmethods must exist in ymethods and corresponding signatures must unify.
for _, xm := range xmethods {
if ym := ymap[xm.Id()]; ym == nil || !u.nify(xm.typ, ym.typ, exact, p) {
if ym := ymap[xm.Id()]; ym == nil || !u.nify(xm.typ, ym.typ, emode, p) {
return false
}
}
@@ -544,7 +526,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
xmethods := xi.typeSet().methods
for _, xm := range xmethods {
obj, _, _ := LookupFieldOrMethod(y, false, xm.pkg, xm.name)
if ym, _ := obj.(*Func); ym == nil || !u.nify(xm.typ, ym.typ, exact, p) {
if ym, _ := obj.(*Func); ym == nil || !u.nify(xm.typ, ym.typ, emode, p) {
return false
}
}
@@ -650,7 +632,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
}
case *Interface:
assert(!u.enableInterfaceInference || mode&exact != 0) // handled before this switch
assert(!enableInterfaceInference || mode&exact != 0) // handled before this switch
// Two interface types unify if they have the same set of methods with
// the same names, and corresponding function types unify.
@@ -703,7 +685,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
}
for i, f := range a {
g := b[i]
if f.Id() != g.Id() || !u.nify(f.typ, g.typ, exact, q) {
if f.Id() != g.Id() || !u.nify(f.typ, g.typ, emode, q) {
return false
}
}

View File

@@ -135,11 +135,7 @@ func walkClear(n *ir.UnaryExpr) ir.Node {
typ := n.X.Type()
switch {
case typ.IsSlice():
if n := arrayClear(n.X.Pos(), n.X, nil); n != nil {
return n
}
// If n == nil, we are clearing an array which takes zero memory, do nothing.
return ir.NewBlockStmt(n.Pos(), nil)
return arrayClear(n.X.Pos(), n.X, nil)
case typ.IsMap():
return mapClear(n.X, reflectdata.TypePtrAt(n.X.Pos(), n.X.Type()))
}
@@ -255,10 +251,7 @@ func walkLenCap(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
return mkcall("countrunes", n.Type(), init, typecheck.Conv(n.X.(*ir.ConvExpr).X, types.Types[types.TSTRING]))
}
if isByteCount(n) {
conv := n.X.(*ir.ConvExpr)
walkStmtList(conv.Init())
init.Append(ir.TakeInit(conv)...)
_, len := backingArrayPtrLen(cheapExpr(conv.X, init))
_, len := backingArrayPtrLen(cheapExpr(n.X.(*ir.ConvExpr).X, init))
return len
}

View File

@@ -278,10 +278,8 @@ func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) {
} else {
ptr.SetType(n.Type().Elem().PtrTo())
}
ptr.SetTypecheck(1)
length = ir.NewUnaryExpr(base.Pos, ir.OLEN, n)
length.SetType(types.Types[types.TINT])
length.SetTypecheck(1)
return ptr, length
}

View File

@@ -3,78 +3,82 @@
// license that can be found in the LICENSE file.
/*
Covdata is a program for manipulating and generating reports
from 2nd-generation coverage testing output files, those produced
from running applications or integration tests. E.g.
$ mkdir ./profiledir
$ go build -cover -o myapp.exe .
$ GOCOVERDIR=./profiledir ./myapp.exe <arguments>
$ ls ./profiledir
covcounters.cce1b350af34b6d0fb59cc1725f0ee27.821598.1663006712821344241
covmeta.cce1b350af34b6d0fb59cc1725f0ee27
$
Run covdata via "go tool covdata <mode>", where 'mode' is a subcommand
selecting a specific reporting, merging, or data manipulation operation.
Descriptions on the various modes (run "go tool cover <mode> -help" for
specifics on usage of a given mode:
1. Report percent of statements covered in each profiled package
$ go tool covdata percent -i=profiledir
cov-example/p coverage: 41.1% of statements
main coverage: 87.5% of statements
$
2. Report import paths of packages profiled
$ go tool covdata pkglist -i=profiledir
cov-example/p
main
$
3. Report percent statements covered by function:
$ go tool covdata func -i=profiledir
cov-example/p/p.go:12: emptyFn 0.0%
cov-example/p/p.go:32: Small 100.0%
cov-example/p/p.go:47: Medium 90.9%
...
$
4. Convert coverage data to legacy textual format:
$ go tool covdata textfmt -i=profiledir -o=cov.txt
$ head cov.txt
mode: set
cov-example/p/p.go:12.22,13.2 0 0
cov-example/p/p.go:15.31,16.2 1 0
cov-example/p/p.go:16.3,18.3 0 0
cov-example/p/p.go:19.3,21.3 0 0
...
$ go tool cover -html=cov.txt
$
5. Merge profiles together:
$ go tool covdata merge -i=indir1,indir2 -o=outdir -modpaths=github.com/go-delve/delve
$
6. Subtract one profile from another
$ go tool covdata subtract -i=indir1,indir2 -o=outdir
$
7. Intersect profiles
$ go tool covdata intersect -i=indir1,indir2 -o=outdir
$
8. Dump a profile for debugging purposes.
$ go tool covdata debugdump -i=indir
<human readable output>
$
//
// Covdata is a program for manipulating and generating reports
// from 2nd-generation coverage testing output files, those produced
// from running applications or integration tests. E.g.
//
// $ mkdir ./profiledir
// $ go build -cover -o myapp.exe .
// $ GOCOVERDIR=./profiledir ./myapp.exe <arguments>
// $ ls ./profiledir
// covcounters.cce1b350af34b6d0fb59cc1725f0ee27.821598.1663006712821344241
// covmeta.cce1b350af34b6d0fb59cc1725f0ee27
// $
//
// Run covdata via "go tool covdata <mode>", where 'mode' is a subcommand
// selecting a specific reporting, merging, or data manipulation operation.
// Descriptions on the various modes (run "go tool cover <mode> -help" for
// specifics on usage of a given mode:
//
// 1. Report percent of statements covered in each profiled package
//
// $ go tool covdata percent -i=profiledir
// cov-example/p coverage: 41.1% of statements
// main coverage: 87.5% of statements
// $
//
//
// 2. Report import paths of packages profiled
//
// $ go tool covdata pkglist -i=profiledir
// cov-example/p
// main
// $
//
// 3. Report percent statements covered by function:
//
// $ go tool covdata func -i=profiledir
// cov-example/p/p.go:12: emptyFn 0.0%
// cov-example/p/p.go:32: Small 100.0%
// cov-example/p/p.go:47: Medium 90.9%
// ...
// $
//
// 4. Convert coverage data to legacy textual format:
//
// $ go tool covdata textfmt -i=profiledir -o=cov.txt
// $ head cov.txt
// mode: set
// cov-example/p/p.go:12.22,13.2 0 0
// cov-example/p/p.go:15.31,16.2 1 0
// cov-example/p/p.go:16.3,18.3 0 0
// cov-example/p/p.go:19.3,21.3 0 0
// ...
// $ go tool cover -html=cov.txt
// $
//
// 5. Merge profiles together:
//
// $ go tool covdata merge -i=indir1,indir2 -o=outdir -modpaths=github.com/go-delve/delve
// $
//
// 6. Subtract one profile from another
//
// $ go tool covdata subtract -i=indir1,indir2 -o=outdir
// $
//
// 7. Intersect profiles
//
// $ go tool covdata intersect -i=indir1,indir2 -o=outdir
// $
//
// 8. Dump a profile for debugging purposes.
//
// $ go tool covdata debugdump -i=indir
// <human readable output>
// $
//
*/
package main

30
src/cmd/dist/test.go vendored
View File

@@ -91,29 +91,6 @@ type work struct {
end chan bool
}
// printSkip prints a skip message for all of work.
func (w *work) printSkip(t *tester, msg string) {
if t.json {
type event struct {
Time time.Time
Action string
Package string
Output string `json:",omitempty"`
}
enc := json.NewEncoder(&w.out)
ev := event{Time: time.Now(), Package: w.dt.name, Action: "start"}
enc.Encode(ev)
ev.Action = "output"
ev.Output = msg
enc.Encode(ev)
ev.Action = "skip"
ev.Output = ""
enc.Encode(ev)
return
}
fmt.Fprintln(&w.out, msg)
}
// A distTest is a test run by dist test.
// Each test has a unique name and belongs to a group (heading)
type distTest struct {
@@ -428,9 +405,6 @@ func (opts *goTest) buildArgs(t *tester) (build, run, pkgs, testFlags []string,
if opts.timeout != 0 {
d := opts.timeout * time.Duration(t.timeoutScale)
run = append(run, "-timeout="+d.String())
} else if t.timeoutScale != 1 {
const goTestDefaultTimeout = 10 * time.Minute // Default value of go test -timeout flag.
run = append(run, "-timeout="+(goTestDefaultTimeout*time.Duration(t.timeoutScale)).String())
}
if opts.short || t.short {
run = append(run, "-short")
@@ -1261,7 +1235,7 @@ func (t *tester) runPending(nextTest *distTest) {
go func(w *work) {
if !<-w.start {
timelog("skip", w.dt.name)
w.printSkip(t, "skipped due to earlier error")
w.out.WriteString("skipped due to earlier error\n")
} else {
timelog("start", w.dt.name)
w.err = w.cmd.Run()
@@ -1272,7 +1246,7 @@ func (t *tester) runPending(nextTest *distTest) {
if isUnsupportedVMASize(w) {
timelog("skip", w.dt.name)
w.out.Reset()
w.printSkip(t, "skipped due to unsupported VMA")
w.out.WriteString("skipped due to unsupported VMA\n")
w.err = nil
}
}

View File

@@ -44,7 +44,7 @@ type fileInfo struct {
func (i fileInfo) Name() string { return path.Base(i.f.Name) }
func (i fileInfo) ModTime() time.Time { return i.f.Time }
func (i fileInfo) Mode() fs.FileMode { return i.f.Mode }
func (i fileInfo) IsDir() bool { return i.f.Mode&fs.ModeDir != 0 }
func (i fileInfo) IsDir() bool { return false }
func (i fileInfo) Size() int64 { return i.f.Size }
func (i fileInfo) Sys() any { return nil }

View File

@@ -329,47 +329,8 @@ func writeTgz(name string, a *Archive) {
zw := check(gzip.NewWriterLevel(out, gzip.BestCompression))
tw := tar.NewWriter(zw)
// Find the mode and mtime to use for directory entries,
// based on the mode and mtime of the first file we see.
// We know that modes and mtimes are uniform across the archive.
var dirMode fs.FileMode
var mtime time.Time
for _, f := range a.Files {
dirMode = fs.ModeDir | f.Mode | (f.Mode&0444)>>2 // copy r bits down to x bits
mtime = f.Time
break
}
// mkdirAll ensures that the tar file contains directory
// entries for dir and all its parents. Some programs reading
// these tar files expect that. See go.dev/issue/61862.
haveDir := map[string]bool{".": true}
var mkdirAll func(string)
mkdirAll = func(dir string) {
if dir == "/" {
panic("mkdirAll /")
}
if haveDir[dir] {
return
}
haveDir[dir] = true
mkdirAll(path.Dir(dir))
df := &File{
Name: dir + "/",
Time: mtime,
Mode: dirMode,
}
h := check(tar.FileInfoHeader(df.Info(), ""))
h.Name = dir + "/"
if err := tw.WriteHeader(h); err != nil {
panic(err)
}
}
for _, f = range a.Files {
h := check(tar.FileInfoHeader(f.Info(), ""))
mkdirAll(path.Dir(f.Name))
h.Name = f.Name
if err := tw.WriteHeader(h); err != nil {
panic(err)

View File

@@ -4,12 +4,12 @@ go 1.21
require (
github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26
golang.org/x/arch v0.4.0
golang.org/x/mod v0.12.0
golang.org/x/sync v0.3.0
golang.org/x/sys v0.10.0
golang.org/x/term v0.10.0
golang.org/x/tools v0.11.1-0.20230712164437-1ca21856af7b
golang.org/x/arch v0.3.0
golang.org/x/mod v0.10.1-0.20230606122920-62c7e578f1a7
golang.org/x/sync v0.2.1-0.20230601203510-93782cc822b6
golang.org/x/sys v0.9.0
golang.org/x/term v0.9.0
golang.org/x/tools v0.9.4-0.20230613194514-c6c983054920
)
require github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2 // indirect

View File

@@ -2,15 +2,15 @@ github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbu
github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2 h1:rcanfLhLDA8nozr/K289V1zcntHr3V+SHlXwzz1ZI2g=
github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
golang.org/x/arch v0.4.0 h1:A8WCeEWhLwPBKNbFi5Wv5UTCBx5zzubnXDlMOFAzFMc=
golang.org/x/arch v0.4.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c=
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
golang.org/x/tools v0.11.1-0.20230712164437-1ca21856af7b h1:KIZCni6lCdxd4gxHx49Zp9mhckTFRbI/ZPDbR3jKu90=
golang.org/x/tools v0.11.1-0.20230712164437-1ca21856af7b/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8=
golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k=
golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/mod v0.10.1-0.20230606122920-62c7e578f1a7 h1:OSEstGpBW1+G0wiXI0bBgOnI8nRJQKX3GCNxF75VR1s=
golang.org/x/mod v0.10.1-0.20230606122920-62c7e578f1a7/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/sync v0.2.1-0.20230601203510-93782cc822b6 h1:kiysxTbHE5FVnrNyc9BC/yeJi3DTUBHIJtNbC9uvXk4=
golang.org/x/sync v0.2.1-0.20230601203510-93782cc822b6/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s=
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.9.0 h1:GRRCnKYhdQrD8kfRAdQ6Zcw1P0OcELxGLKJvtjVMZ28=
golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo=
golang.org/x/tools v0.9.4-0.20230613194514-c6c983054920 h1:FJIPEU9owLOeJgghpx63YhobtkWkORJ3O5ZnbFr8Bzs=
golang.org/x/tools v0.9.4-0.20230613194514-c6c983054920/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=

View File

@@ -490,43 +490,25 @@ func findGOROOT(env string) string {
// depend on the executable's location.
return def
}
// canonical returns a directory path that represents
// the same directory as dir,
// preferring the spelling in def if the two are the same.
canonical := func(dir string) string {
if isSameDir(def, dir) {
return def
}
return dir
}
exe, err := os.Executable()
if err == nil {
exe, err = filepath.Abs(exe)
if err == nil {
// cmd/go may be installed in GOROOT/bin or GOROOT/bin/GOOS_GOARCH,
// depending on whether it was cross-compiled with a different
// GOHOSTOS (see https://go.dev/issue/62119). Try both.
if dir := filepath.Join(exe, "../.."); isGOROOT(dir) {
return canonical(dir)
// If def (runtime.GOROOT()) and dir are the same
// directory, prefer the spelling used in def.
if isSameDir(def, dir) {
return def
}
return dir
}
if dir := filepath.Join(exe, "../../.."); isGOROOT(dir) {
return canonical(dir)
}
// Depending on what was passed on the command line, it is possible
// that os.Executable is a symlink (like /usr/local/bin/go) referring
// to a binary installed in a real GOROOT elsewhere
// (like /usr/lib/go/bin/go).
// Try to find that GOROOT by resolving the symlinks.
exe, err = filepath.EvalSymlinks(exe)
if err == nil {
if dir := filepath.Join(exe, "../.."); isGOROOT(dir) {
return canonical(dir)
}
if dir := filepath.Join(exe, "../../.."); isGOROOT(dir) {
return canonical(dir)
if isSameDir(def, dir) {
return def
}
return dir
}
}
}

View File

@@ -179,9 +179,6 @@ func parse(x string) version {
// Parse prerelease.
i := 0
for i < len(x) && (x[i] < '0' || '9' < x[i]) {
if x[i] < 'a' || 'z' < x[i] {
return version{}
}
i++
}
if i == 0 {

View File

@@ -95,25 +95,6 @@ var prevTests = []testCase1[string, string]{
{"1.40000000000000000", "1.39999999999999999"},
}
func TestIsValid(t *testing.T) { test1(t, isValidTests, "IsValid", IsValid) }
var isValidTests = []testCase1[string, bool]{
{"1.2rc3", true},
{"1.2.3", true},
{"1.999testmod", true},
{"1.600+auto", false},
{"1.22", true},
{"1.21.0", true},
{"1.21rc2", true},
{"1.21", true},
{"1.20.0", true},
{"1.20", true},
{"1.19", true},
{"1.3", true},
{"1.2", true},
{"1", true},
}
type testCase1[In, Out any] struct {
in In
out Out

View File

@@ -15,20 +15,13 @@ import (
// FromToolchain returns the Go version for the named toolchain,
// derived from the name itself (not by running the toolchain).
// A toolchain is named "goVERSION".
// A suffix after the VERSION introduced by a -, space, or tab is removed.
// A suffix after the VERSION introduced by a +, -, space, or tab is removed.
// Examples:
//
// FromToolchain("go1.2.3") == "1.2.3"
// FromToolchain("go1.2.3-bigcorp") == "1.2.3"
// FromToolchain("invalid") == ""
func FromToolchain(name string) string {
if strings.ContainsAny(name, "\\/") {
// The suffix must not include a path separator, since that would cause
// exec.LookPath to resolve it from a relative directory instead of from
// $PATH.
return ""
}
var v string
if strings.HasPrefix(name, "go") {
v = name[2:]

View File

@@ -959,10 +959,7 @@ func collectDepsErrors(p *load.Package) {
if len(stkj) != 0 {
return true
}
return p.DepsErrors[i].Err.Error() < p.DepsErrors[j].Err.Error()
} else if len(stkj) == 0 {
return false
}
pathi, pathj := stki[len(stki)-1], stkj[len(stkj)-1]
return pathi < pathj

View File

@@ -473,7 +473,6 @@ func recompileForTest(pmain, preal, ptest, pxtest *Package) *PackageError {
p.Target = ""
p.Internal.BuildInfo = nil
p.Internal.ForceLibrary = true
p.Internal.PGOProfile = preal.Internal.PGOProfile
}
// Update p.Internal.Imports to use test copies.
@@ -497,11 +496,6 @@ func recompileForTest(pmain, preal, ptest, pxtest *Package) *PackageError {
if p.Name == "main" && p != pmain && p != ptest {
split()
}
// Split and attach PGO information to test dependencies if preal
// is built with PGO.
if preal.Internal.PGOProfile != "" && p.Internal.PGOProfile == "" {
split()
}
}
// Do search to find cycle.

View File

@@ -60,15 +60,6 @@ func (r *toolchainRepo) Versions(ctx context.Context, prefix string) (*Versions,
}
}
// Always include our own version.
// This means that the development branch of Go 1.21 (say) will allow 'go get go@1.21'
// even though there are no Go 1.21 releases yet.
// Once there is a release, 1.21 will be treated as a query matching the latest available release.
// Before then, 1.21 will be treated as a query that resolves to this entry we are adding (1.21).
if v := gover.Local(); !have[v] {
list = append(list, goPrefix+v)
}
if r.path == "go" {
sort.Slice(list, func(i, j int) bool {
return gover.Compare(list[i], list[j]) < 0
@@ -83,38 +74,21 @@ func (r *toolchainRepo) Versions(ctx context.Context, prefix string) (*Versions,
}
func (r *toolchainRepo) Stat(ctx context.Context, rev string) (*RevInfo, error) {
// If we're asking about "go" (not "toolchain"), pretend to have
// all earlier Go versions available without network access:
// we will provide those ourselves, at least in GOTOOLCHAIN=auto mode.
if r.path == "go" && gover.Compare(rev, gover.Local()) <= 0 {
return &RevInfo{Version: rev}, nil
}
// Convert rev to DL version and stat that to make sure it exists.
// In theory the go@ versions should be like 1.21.0
// and the toolchain@ versions should be like go1.21.0
// but people will type the wrong one, and so we accept
// both and silently correct it to the standard form.
prefix := ""
v := rev
v = strings.TrimPrefix(v, "go")
if r.path == "toolchain" {
prefix = "go"
}
if !gover.IsValid(v) {
return nil, fmt.Errorf("invalid %s version %s", r.path, rev)
}
// If we're asking about "go" (not "toolchain"), pretend to have
// all earlier Go versions available without network access:
// we will provide those ourselves, at least in GOTOOLCHAIN=auto mode.
if r.path == "go" && gover.Compare(v, gover.Local()) <= 0 {
return &RevInfo{Version: prefix + v}, nil
}
// Similarly, if we're asking about *exactly* the current toolchain,
// we don't need to access the network to know that it exists.
if r.path == "toolchain" && v == gover.Local() {
return &RevInfo{Version: prefix + v}, nil
}
if gover.IsLang(v) {
// We can only use a language (development) version if the current toolchain
// implements that version, and the two checks above have ruled that out.
return nil, fmt.Errorf("go language version %s is not a toolchain version", rev)
}

View File

@@ -239,13 +239,10 @@ func (q *query) matchesPath(path string) bool {
// canMatchInModule reports whether the given module path can potentially
// contain q.pattern.
func (q *query) canMatchInModule(mPath string) bool {
if gover.IsToolchain(mPath) {
return false
}
if q.canMatchWildcardInModule != nil {
return q.canMatchWildcardInModule(mPath)
}
return str.HasPathPrefix(q.pattern, mPath)
return str.HasPathPrefix(q.pattern, mPath) && !gover.IsToolchain(mPath)
}
// pathOnce invokes f to generate the pathSet for the given path,

View File

@@ -110,13 +110,7 @@ func ListModules(ctx context.Context, args []string, mode ListMode, reuseFile st
if err == nil {
requirements = rs
// TODO(#61605): The extra ListU clause fixes a problem with Go 1.21rc3
// where "go mod tidy" and "go list -m -u all" fight over whether the go.sum
// should be considered up-to-date. The fix for now is to always treat the
// go.sum as up-to-date during list -m -u. Probably the right fix is more targeted,
// but in general list -u is looking up other checksums in the checksum database
// that won't be necessary later, so it makes sense not to write the go.sum back out.
if !ExplicitWriteGoMod && mode&ListU == 0 {
if !ExplicitWriteGoMod {
err = commitRequirements(ctx, WriteOpts{})
}
}

View File

@@ -473,11 +473,7 @@ func newQueryMatcher(path string, query, current string, allowed AllowedFunc) (*
// AllowedFunc of qm.
func (qm *queryMatcher) allowsVersion(ctx context.Context, v string) bool {
if qm.prefix != "" && !strings.HasPrefix(v, qm.prefix) {
if gover.IsToolchain(qm.path) && strings.TrimSuffix(qm.prefix, ".") == v {
// Allow 1.21 to match "1.21." prefix.
} else {
return false
}
return false
}
if qm.filter != nil && !qm.filter(v) {
return false

View File

@@ -1363,87 +1363,65 @@ func (r *runTestActor) Act(b *work.Builder, ctx context.Context, a *work.Action)
ctx, cancel := context.WithTimeout(ctx, testKillTimeout)
defer cancel()
// Now we're ready to actually run the command.
//
// If the -o flag is set, or if at some point we change cmd/go to start
// copying test executables into the build cache, we may run into spurious
// ETXTBSY errors on Unix platforms (see https://go.dev/issue/22315).
//
// Since we know what causes those, and we know that they should resolve
// quickly (the ETXTBSY error will resolve as soon as the subprocess
// holding the descriptor open reaches its 'exec' call), we retry them
// in a loop.
cmd := exec.CommandContext(ctx, args[0], args[1:]...)
cmd.Dir = a.Package.Dir
env := slices.Clip(cfg.OrigEnv)
env = base.AppendPATH(env)
env = base.AppendPWD(env, cmd.Dir)
cmd.Env = env
if addToEnv != "" {
cmd.Env = append(cmd.Env, addToEnv)
}
cmd.Stdout = stdout
cmd.Stderr = stdout
// If there are any local SWIG dependencies, we want to load
// the shared library from the build directory.
if a.Package.UsesSwig() {
env := cmd.Env
found := false
prefix := "LD_LIBRARY_PATH="
for i, v := range env {
if strings.HasPrefix(v, prefix) {
env[i] = v + ":."
found = true
break
}
}
if !found {
env = append(env, "LD_LIBRARY_PATH=.")
}
cmd.Env = env
}
var (
cmd *exec.Cmd
t0 time.Time
cancelKilled = false
cancelSignaled = false
)
for {
cmd = exec.CommandContext(ctx, args[0], args[1:]...)
cmd.Dir = a.Package.Dir
env := slices.Clip(cfg.OrigEnv)
env = base.AppendPATH(env)
env = base.AppendPWD(env, cmd.Dir)
cmd.Env = env
if addToEnv != "" {
cmd.Env = append(cmd.Env, addToEnv)
}
cmd.Stdout = stdout
cmd.Stderr = stdout
// If there are any local SWIG dependencies, we want to load
// the shared library from the build directory.
if a.Package.UsesSwig() {
env := cmd.Env
found := false
prefix := "LD_LIBRARY_PATH="
for i, v := range env {
if strings.HasPrefix(v, prefix) {
env[i] = v + ":."
found = true
break
}
}
if !found {
env = append(env, "LD_LIBRARY_PATH=.")
}
cmd.Env = env
}
cmd.Cancel = func() error {
if base.SignalTrace == nil {
err := cmd.Process.Kill()
if err == nil {
cancelKilled = true
}
return err
}
// Send a quit signal in the hope that the program will print
// a stack trace and exit.
err := cmd.Process.Signal(base.SignalTrace)
cmd.Cancel = func() error {
if base.SignalTrace == nil {
err := cmd.Process.Kill()
if err == nil {
cancelSignaled = true
cancelKilled = true
}
return err
}
cmd.WaitDelay = testWaitDelay
base.StartSigHandlers()
t0 = time.Now()
err = cmd.Run()
if !isETXTBSY(err) {
// We didn't hit the race in #22315, so there is no reason to retry the
// command.
break
// Send a quit signal in the hope that the program will print
// a stack trace and exit.
err := cmd.Process.Signal(base.SignalTrace)
if err == nil {
cancelSignaled = true
}
return err
}
cmd.WaitDelay = testWaitDelay
base.StartSigHandlers()
t0 := time.Now()
err = cmd.Run()
out := buf.Bytes()
a.TestOutput = &buf
t := fmt.Sprintf("%.3fs", time.Since(t0).Seconds())

View File

@@ -1,12 +0,0 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !unix
package test
func isETXTBSY(err error) bool {
// syscall.ETXTBSY is only meaningful on Unix platforms.
return false
}

View File

@@ -1,16 +0,0 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build unix
package test
import (
"errors"
"syscall"
)
func isETXTBSY(err error) bool {
return errors.Is(err, syscall.ETXTBSY)
}

View File

@@ -61,7 +61,7 @@ func init() {
cf.String("run", "", "")
cf.Bool("short", false, "")
cf.String("skip", "", "")
cf.DurationVar(&testTimeout, "timeout", 10*time.Minute, "") // known to cmd/dist
cf.DurationVar(&testTimeout, "timeout", 10*time.Minute, "")
cf.String("fuzztime", "", "")
cf.String("fuzzminimizetime", "", "")
cf.StringVar(&testTrace, "trace", "", "")

View File

@@ -131,7 +131,7 @@ func Select() {
} else {
min, suffix, plus := strings.Cut(gotoolchain, "+") // go1.2.3+auto
if min != "local" {
v := gover.FromToolchain(min)
v := gover.FromToolchain(gotoolchain)
if v == "" {
if plus {
base.Fatalf("invalid GOTOOLCHAIN %q: invalid minimum toolchain %q", gotoolchain, min)

View File

@@ -212,22 +212,16 @@ func get(security SecurityMode, url *urlpkg.URL) (*Response, error) {
}
}
if err != nil {
// Per the docs for [net/http.Client.Do], “On error, any Response can be
// ignored. A non-nil Response with a non-nil error only occurs when
// CheckRedirect fails, and even then the returned Response.Body is
// already closed.”
if res == nil || res.Body == nil {
release()
return nil, nil, err
} else {
body := res.Body
res.Body = hookCloser{
ReadCloser: body,
afterClose: release,
}
}
// “If the returned error is nil, the Response will contain a non-nil Body
// which the user is expected to close.”
body := res.Body
res.Body = hookCloser{
ReadCloser: body,
afterClose: release,
}
return url, res, err
}

View File

@@ -175,11 +175,7 @@ func main() {
if used > 0 {
helpArg += " " + strings.Join(args[:used], " ")
}
cmdName := cfg.CmdName
if cmdName == "" {
cmdName = args[0]
}
fmt.Fprintf(os.Stderr, "go %s: unknown command\nRun 'go help%s' for usage.\n", cmdName, helpArg)
fmt.Fprintf(os.Stderr, "go %s: unknown command\nRun 'go help%s' for usage.\n", cfg.CmdName, helpArg)
base.SetExitStatus(2)
base.Exit()
}

View File

@@ -49,7 +49,6 @@ func scriptConditions() map[string]script.Cond {
add("git", lazyBool("the 'git' executable exists and provides the standard CLI", hasWorkingGit))
add("GODEBUG", script.PrefixCondition("GODEBUG contains <suffix>", hasGodebug))
add("GOEXPERIMENT", script.PrefixCondition("GOEXPERIMENT <suffix> is enabled", hasGoexperiment))
add("go-builder", script.BoolCondition("GO_BUILDER_NAME is non-empty", testenv.Builder() != ""))
add("link", lazyBool("testenv.HasLink()", testenv.HasLink))
add("mismatched-goroot", script.Condition("test's GOROOT_FINAL does not match the real GOROOT", isMismatchedGoroot))
add("msan", sysCondition("-msan", platform.MSanSupported, true))

View File

@@ -398,8 +398,6 @@ The available conditions are:
GOOS/GOARCH supports -fuzz with instrumentation
[git]
the 'git' executable exists and provides the standard CLI
[go-builder]
GO_BUILDER_NAME is non-empty
[link]
testenv.HasLink()
[mismatched-goroot]

View File

@@ -45,12 +45,6 @@ stderr 'compile.*-pgoprofile=.*b(/|\\\\)default\.pgo.*b(/|\\\\)b_test\.go'
stderr 'compile.*-pgoprofile=.*b(/|\\\\)default\.pgo.*dep(/|\\\\)dep\.go'
! stderr 'compile.*-pgoprofile=.*nopgo(/|\\\\)nopgo_test\.go'
# test-only dependencies also have profiles attached
stderr 'compile.*-pgoprofile=.*a(/|\\\\)default\.pgo.*testdep(/|\\\\)testdep\.go'
stderr 'compile.*-pgoprofile=.*b(/|\\\\)default\.pgo.*testdep(/|\\\\)testdep\.go'
stderr 'compile.*-pgoprofile=.*a(/|\\\\)default\.pgo.*testdep2(/|\\\\)testdep2\.go'
stderr 'compile.*-pgoprofile=.*b(/|\\\\)default\.pgo.*testdep2(/|\\\\)testdep2\.go'
# go list -deps prints packages built multiple times.
go list -pgo=auto -deps ./a ./b ./nopgo
stdout 'test/dep \[test/a\]'
@@ -72,7 +66,6 @@ func main() {}
-- a/a_test.go --
package main
import "testing"
import _ "test/testdep"
func TestA(*testing.T) {}
-- a/default.pgo --
-- b/b.go --
@@ -83,7 +76,6 @@ func main() {}
-- b/b_test.go --
package main
import "testing"
import _ "test/testdep"
func TestB(*testing.T) {}
-- b/default.pgo --
-- nopgo/nopgo.go --
@@ -102,8 +94,3 @@ import _ "test/dep3"
package dep2
-- dep3/dep3.go --
package dep3
-- testdep/testdep.go --
package testdep
import _ "test/testdep2"
-- testdep2/testdep2.go --
package testdep2

View File

@@ -1,2 +0,0 @@
! go asdf
stderr '^go asdf: unknown command'

View File

@@ -1,5 +1,4 @@
[compiler:gccgo] skip
[short] skip 'builds and links another cmd/go'
mkdir $WORK/new/bin
@@ -10,18 +9,15 @@ mkdir $WORK/new/bin
# new cmd/go is built.
env GOROOT_FINAL=
# $GOROOT/bin/go is whatever the user has already installed
# (using make.bash or similar). We can't make assumptions about what
# options it may have been built with, such as -trimpath or GOROOT_FINAL.
# Instead, we build a fresh copy of the binary with known settings.
go build -o $WORK/new/bin/go$GOEXE cmd/go &
go build -trimpath -o $WORK/bin/check$GOEXE check.go &
go build -o $WORK/bin/check$GOEXE check.go &
wait
env TESTGOROOT=$GOROOT
env GOROOT=
# Relocated Executable
# cp $TESTGOROOT/bin/go$GOEXE $WORK/new/bin/go$GOEXE
exec $WORK/bin/check$GOEXE $WORK/new/bin/go$GOEXE $TESTGOROOT
# Relocated Tree:

View File

@@ -1,91 +0,0 @@
# Regression test for https://go.dev/issue/62119:
# A 'go' command cross-compiled with a different GOHOSTOS
# should be able to locate its GOROOT using os.Executable.
#
# (This also tests a 'go' command built with -trimpath
# that is not cross-compiled, since we need to build that
# configuration for the test anyway.)
[short] skip 'builds and links another cmd/go'
mkdir $WORK/new/bin
mkdir $WORK/new/bin/${GOOS}_${GOARCH}
# In this test, we are specifically checking the logic for deriving
# the value of GOROOT from os.Executable when runtime.GOROOT is
# trimmed away.
# GOROOT_FINAL changes the default behavior of runtime.GOROOT,
# so we explicitly clear it to remove it as a confounding variable.
env GOROOT_FINAL=
# $GOROOT/bin/go is whatever the user has already installed
# (using make.bash or similar). We can't make assumptions about what
# options it may have been built with, such as -trimpath or GOROOT_FINAL.
# Instead, we build a fresh copy of the binary with known settings.
go build -trimpath -o $WORK/new/bin/go$GOEXE cmd/go &
go build -trimpath -o $WORK/bin/check$GOEXE check.go &
wait
env TESTGOROOT=$GOROOT
env GOROOT=
# Relocated Executable
# Since we built with -trimpath and the binary isn't installed in a
# normal-looking GOROOT, this command should fail.
! exec $WORK/new/bin/go$GOEXE env GOROOT
stderr '^go: cannot find GOROOT directory: ''go'' binary is trimmed and GOROOT is not set$'
# Cross-compiled binaries in cmd are installed to a ${GOOS}_${GOARCH} subdirectory,
# so we also want to try a copy there.
# (Note that the script engine's 'exec' engine already works around
# https://go.dev/issue/22315, so we don't have to do that explicitly in the
# 'check' program we use later.)
cp $WORK/new/bin/go$GOEXE $WORK/new/bin/${GOOS}_${GOARCH}/go$GOEXE
! exec $WORK/new/bin/${GOOS}_${GOARCH}/go$GOEXE env GOROOT
stderr '^go: cannot find GOROOT directory: ''go'' binary is trimmed and GOROOT is not set$'
# Relocated Tree:
# If the binary is sitting in a bin dir next to ../pkg/tool, that counts as a GOROOT,
# so it should find the new tree.
mkdir $WORK/new/pkg/tool
exec $WORK/bin/check$GOEXE $WORK/new/bin/go$GOEXE $WORK/new
exec $WORK/bin/check$GOEXE $WORK/new/bin/${GOOS}_${GOARCH}/go$GOEXE $WORK/new
-- check.go --
package main
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
)
func main() {
exe := os.Args[1]
want := os.Args[2]
cmd := exec.Command(exe, "env", "GOROOT")
out, err := cmd.CombinedOutput()
if err != nil {
fmt.Fprintf(os.Stderr, "%s env GOROOT: %v, %s\n", exe, err, out)
os.Exit(1)
}
goroot, err := filepath.EvalSymlinks(strings.TrimSpace(string(out)))
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
want, err = filepath.EvalSymlinks(want)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
if !strings.EqualFold(goroot, want) {
fmt.Fprintf(os.Stderr, "go env GOROOT:\nhave %s\nwant %s\n", goroot, want)
os.Exit(1)
}
fmt.Fprintf(os.Stderr, "go env GOROOT: %s\n", goroot)
}

View File

@@ -34,9 +34,9 @@ env GOTOOLCHAIN=go1.600+auto
go version
stdout go1.600
env GOTOOLCHAIN=go1.400.0+auto
env GOTOOLCHAIN=go1.400+auto
go version
stdout go1.400.0
stdout go1.400
# GOTOOLCHAIN=version+path sets a minimum too.
env GOTOOLCHAIN=go1.600+path

View File

@@ -43,13 +43,11 @@ env GOSUMDB=$oldsumdb
# Test a real GOTOOLCHAIN
[short] skip
[!net:golang.org] skip
[!net:sum.golang.org] skip
[!GOOS:darwin] [!GOOS:windows] [!GOOS:linux] skip
[!GOARCH:amd64] [!GOARCH:arm64] skip
env GOPROXY=
[go-builder] env GOSUMDB=
[!go-builder] env GOSUMDB=sum.golang.org # Set explicitly in case GOROOT/go.env is modified.
env GOSUMDB=
env GOTOOLCHAIN=go1.20.1
# Avoid resolving a "go1.20.1" from the user's real $PATH.

View File

@@ -1,15 +1,6 @@
[!net:proxy.golang.org] skip
[!net:golang.org] skip
# In the Go project's official release GOPROXY defaults to proxy.golang.org,
# but it may be changed in GOROOT/go.env (such as in third-party
# distributions).
#
# Make sure it is in use here, because the server for releases not served
# through the proxy (https://golang.org/toolchain?go-get=1) currently only
# serves the latest patch release for each of the supported stable releases.
[go-builder] env GOPROXY=
[!go-builder] env GOPROXY=https://proxy.golang.org
env GOPROXY=
go list -m -versions go
stdout 1.20.1 # among others

View File

@@ -1,13 +1,8 @@
# Expect no panic
go list -f '{{if .DepsErrors}}{{.DepsErrors}}{{end}}' -export -e -deps
cmpenv stdout wanterr_59905
cmpenv stdout wanterr
# Expect no panic (Issue 61816)
cp level1b_61816.txt level1b/pkg.go
go list -f '{{if .DepsErrors}}{{.DepsErrors}}{{end}}' -export -e -deps
cmpenv stdout wanterr_61816
-- wanterr_59905 --
-- wanterr --
[# test/main/level1a
level1a${/}pkg.go:5:2: level2x redeclared in this block
level1a${/}pkg.go:4:2: other declaration of level2x
@@ -19,23 +14,6 @@ level1b${/}pkg.go:5:2: level2x redeclared in this block
level1b${/}pkg.go:5:2: "test/main/level1b/level2y" imported as level2x and not used
level1b${/}pkg.go:8:39: undefined: level2y
]
-- wanterr_61816 --
[level1b${/}pkg.go:4:2: package foo is not in std ($GOROOT${/}src${/}foo)]
[# test/main/level1a
level1a${/}pkg.go:5:2: level2x redeclared in this block
level1a${/}pkg.go:4:2: other declaration of level2x
level1a${/}pkg.go:5:2: "test/main/level1a/level2y" imported as level2x and not used
level1a${/}pkg.go:8:39: undefined: level2y
level1b${/}pkg.go:4:2: package foo is not in std ($GOROOT${/}src${/}foo)]
-- level1b_61816.txt --
package level1b
import (
"foo"
)
func Print() { println(level2x.Value, level2y.Value) }
-- go.mod --
module test/main

View File

@@ -1,19 +0,0 @@
# golang.org/issue/29591: 'go get' was following plain-HTTP redirects even without -insecure (now replaced by GOINSECURE).
# golang.org/issue/61877: 'go get' would panic in case of an insecure redirect in module mode
[!git] skip
env GOPRIVATE=vcs-test.golang.org
! go get -d vcs-test.golang.org/insecure/go/insecure
stderr 'redirected .* to insecure URL'
[short] stop 'builds a git repo'
env GOINSECURE=vcs-test.golang.org/insecure/go/insecure
go get -d vcs-test.golang.org/insecure/go/insecure
-- go.mod --
module example
go 1.21

View File

@@ -1,5 +1,5 @@
# setup
env TESTGO_VERSION=go1.99rc1
env TESTGO_VERSION=go1.99.0
env TESTGO_VERSION_SWITCH=switch
# go get go should use the latest Go 1.23
@@ -7,28 +7,28 @@ cp go.mod.orig go.mod
go get go
stderr '^go: upgraded go 1.21 => 1.23.9$'
grep 'go 1.23.9' go.mod
grep 'toolchain go1.99rc1' go.mod
grep 'toolchain go1.99.0' go.mod
# go get go@1.23 should use the latest Go 1.23
cp go.mod.orig go.mod
go get go@1.23
stderr '^go: upgraded go 1.21 => 1.23.9$'
grep 'go 1.23.9' go.mod
grep 'toolchain go1.99rc1' go.mod
grep 'toolchain go1.99.0' go.mod
# go get go@1.22 should use the latest Go 1.22
cp go.mod.orig go.mod
go get go@1.22
stderr '^go: upgraded go 1.21 => 1.22.9$'
grep 'go 1.22.9' go.mod
grep 'toolchain go1.99rc1' go.mod
grep 'toolchain go1.99.0' go.mod
# go get go@patch should use the latest patch release
go get go@1.22.1
go get go@patch
stderr '^go: upgraded go 1.22.1 => 1.22.9$'
grep 'go 1.22.9' go.mod
grep 'toolchain go1.99rc1' go.mod
grep 'toolchain go1.99.0' go.mod
# go get go@1.24 does NOT find the release candidate
cp go.mod.orig go.mod
@@ -40,20 +40,20 @@ cp go.mod.orig go.mod
go get go@1.24rc1
stderr '^go: upgraded go 1.21 => 1.24rc1$'
grep 'go 1.24rc1' go.mod
grep 'toolchain go1.99rc1' go.mod
grep 'toolchain go1.99.0' go.mod
# go get go@latest finds the latest Go 1.23
cp go.mod.orig go.mod
go get go@latest
stderr '^go: upgraded go 1.21 => 1.23.9$'
grep 'go 1.23.9' go.mod
grep 'toolchain go1.99rc1' go.mod
grep 'toolchain go1.99.0' go.mod
# Again, with toolchains.
# go get toolchain should find go1.999testmod.
go get toolchain
stderr '^go: upgraded toolchain go1.99rc1 => go1.999testmod$'
stderr '^go: upgraded toolchain go1.99.0 => go1.999testmod$'
grep 'go 1.23.9' go.mod
grep 'toolchain go1.999testmod' go.mod
@@ -96,33 +96,6 @@ stderr '^go: added toolchain go1.999testmod$'
grep 'go 1.21' go.mod
grep 'toolchain go1.999testmod' go.mod
# Bug fixes.
# go get go@garbage should fail but not crash
! go get go@garbage
! stderr panic
stderr '^go: invalid go version garbage$'
# go get go@go1.21.0 is OK - we silently correct to 1.21.0
go get go@1.19
go get go@go1.21.0
stderr '^go: upgraded go 1.19 => 1.21.0'
# go get toolchain@1.24rc1 is OK too.
go get toolchain@1.24rc1
stderr '^go: downgraded toolchain go1.999testmod => go1.24rc1$'
# go get go@1.21 should work if we are the Go 1.21 language version,
# even though there's no toolchain for it.
# (Older versions resolve to the latest release in that version, so for example
# go get go@1.20 might resolve to 1.20.9, but if we're the devel copy of
# Go 1.21, there's no release yet to resolve to, so we resolve to ourselves.)
env TESTGO_VERSION=go1.21
go get go@1.19 toolchain@none
go get go@1.21
grep 'go 1.21$' go.mod
! grep toolchain go.mod
-- go.mod.orig --
module m

View File

@@ -10,9 +10,9 @@ env GOPROXY=
env GOSUMDB=
# github.com/russross/blackfriday v2.0.0+incompatible exists,
# and should be resolved if we ask for it explicitly.
# and should be resolved if we ask for v2.0 explicitly.
go list -m github.com/russross/blackfriday@v2.0.0+incompatible
go list -m github.com/russross/blackfriday@v2.0
stdout '^github.com/russross/blackfriday v2\.0\.0\+incompatible$'
# blackfriday v1.5.2 has a go.mod file, so v1.5.2 should be preferred over
@@ -27,7 +27,6 @@ stdout '^github.com/russross/blackfriday v1\.'
! go list -m github.com/russross/blackfriday@patch
stderr '^go: github.com/russross/blackfriday@patch: can''t query version "patch" of module github.com/russross/blackfriday: no existing version is required$'
# If we're fetching directly from version control, ignored +incompatible
# versions should also be omitted by 'go list'.
@@ -39,23 +38,10 @@ stderr '^go: github.com/russross/blackfriday@patch: can''t query version "patch"
[!git] stop
env GOPROXY=direct
go list -versions -m github.com/russross/blackfriday
go list -versions -m github.com/russross/blackfriday github.com/russross/blackfriday
stdout '^github.com/russross/blackfriday v1\.5\.1 v1\.5\.2' # and possibly others
! stdout ' v2\.'
# For this module, v2.1.0 exists and has a go.mod file.
# 'go list -m github.com/russross/blackfriday@v2.0' will check
# the latest v2.0 tag, discover that it isn't the right module, and stop there
# (instead of spending the time to check O(N) previous tags).
! go list -m github.com/russross/blackfriday@v2.0
stderr '^go: module github.com/russross/blackfriday: no matching versions for query "v2\.0\"'
# (But asking for exactly v2.0.0+incompatible should still succeed.)
go list -m github.com/russross/blackfriday@v2.0.0+incompatible
stdout '^github.com/russross/blackfriday v2\.0\.0\+incompatible$'
# However, if the latest compatible version does not include a go.mod file,
# +incompatible versions should still be listed, as they may still reflect the
# intent of the module author.

View File

@@ -1,9 +1,7 @@
[!net:proxy.golang.org] skip
[!net:sum.golang.org] skip
env GO111MODULE=on
[go-builder] env GOSUMDB=
[!go-builder] env GOSUMDB=sum.golang.org # Set explicitly in case GOROOT/go.env is modified.
env GOSUMDB=
env GOPATH=$WORK/gopath1
# With a file-based proxy with an empty checksum directory,

View File

@@ -1,13 +1,13 @@
# Test default GOPROXY and GOSUMDB
[go-builder] env GOPROXY=
[go-builder] env GOSUMDB=
[go-builder] go env GOPROXY
[go-builder] stdout '^https://proxy.golang.org,direct$'
[go-builder] go env GOSUMDB
[go-builder] stdout '^sum.golang.org$'
[go-builder] env GOPROXY=https://proxy.golang.org
[go-builder] go env GOSUMDB
[go-builder] stdout '^sum.golang.org$'
env GOPROXY=
env GOSUMDB=
go env GOPROXY
stdout '^https://proxy.golang.org,direct$'
go env GOSUMDB
stdout '^sum.golang.org$'
env GOPROXY=https://proxy.golang.org
go env GOSUMDB
stdout '^sum.golang.org$'
# Download direct from github.
@@ -26,8 +26,8 @@ cp go.sum saved.sum
# files not listed in go.sum.
go clean -modcache
env GOSUMDB=sum.golang.org
env GOPROXY=https://proxy.golang.org,direct
env GOSUMDB=
env GOPROXY=
go list -x -m all # Download go.mod files.
! stderr github

View File

@@ -1,32 +0,0 @@
[!exec:/bin/sh] skip
chmod 0777 go1.999999-/run.sh
chmod 0777 run.sh
! go list all
! stdout 'RAN SCRIPT'
cd subdir
! go list all
! stdout 'RAN SCRIPT'
-- go.mod --
module exploit
go 1.21
toolchain go1.999999-/run.sh
-- go1.999999-/run.sh --
#!/bin/sh
printf 'RAN SCRIPT\n'
exit 1
-- run.sh --
#!/bin/sh
printf 'RAN SCRIPT\n'
exit 1
-- subdir/go.mod --
module exploit
go 1.21
toolchain go1.999999-/../../run.sh
-- subdir/go1.999999-/README.txt --
heh heh heh

View File

@@ -2,10 +2,6 @@
# Verify test -c can output multiple executables to a directory.
# This test also serves as a regression test for https://go.dev/issue/62221:
# prior to the fix for that issue, it occasionally failed with ETXTBSY when
# run on Unix platforms.
go test -c -o $WORK/some/nonexisting/directory/ ./pkg/...
exists -exec $WORK/some/nonexisting/directory/pkg1.test$GOEXE
exists -exec $WORK/some/nonexisting/directory/pkg2.test$GOEXE
@@ -47,4 +43,4 @@ package pkg1
package pkg2
-- anotherpkg/pkg1/pkg1_test.go --
package pkg1
package pkg1

View File

@@ -2067,22 +2067,17 @@ func instructionsForProg(p *obj.Prog) []*instruction {
return instructionsForStore(p, ins.as, p.To.Reg)
case ALRW, ALRD:
// Set aq to use acquire access ordering
// Set aq to use acquire access ordering, which matches Go's memory requirements.
ins.funct7 = 2
ins.rs1, ins.rs2 = uint32(p.From.Reg), REG_ZERO
case AADDI, AANDI, AORI, AXORI:
inss = instructionsForOpImmediate(p, ins.as, p.Reg)
case ASCW, ASCD:
// Set release access ordering
ins.funct7 = 1
ins.rd, ins.rs1, ins.rs2 = uint32(p.RegTo2), uint32(p.To.Reg), uint32(p.From.Reg)
case AAMOSWAPW, AAMOSWAPD, AAMOADDW, AAMOADDD, AAMOANDW, AAMOANDD, AAMOORW, AAMOORD,
case ASCW, ASCD, AAMOSWAPW, AAMOSWAPD, AAMOADDW, AAMOADDD, AAMOANDW, AAMOANDD, AAMOORW, AAMOORD,
AAMOXORW, AAMOXORD, AAMOMINW, AAMOMIND, AAMOMINUW, AAMOMINUD, AAMOMAXW, AAMOMAXD, AAMOMAXUW, AAMOMAXUD:
// Set aqrl to use acquire & release access ordering
ins.funct7 = 3
// Set aq to use acquire access ordering, which matches Go's memory requirements.
ins.funct7 = 2
ins.rd, ins.rs1, ins.rs2 = uint32(p.RegTo2), uint32(p.To.Reg), uint32(p.From.Reg)
case AECALL, AEBREAK, ARDCYCLE, ARDTIME, ARDINSTRET:

View File

@@ -446,7 +446,7 @@ func machoreloc1(arch *sys.Arch, out *ld.OutBuf, ldr *loader.Loader, s loader.Sy
rs := r.Xsym
rt := r.Type
if rt == objabi.R_PCREL || rt == objabi.R_GOTPCREL || rt == objabi.R_CALL || ldr.SymType(rs) == sym.SHOSTOBJ || ldr.SymType(s) == sym.SINITARR {
if ldr.SymType(rs) == sym.SHOSTOBJ || rt == objabi.R_PCREL || rt == objabi.R_GOTPCREL || rt == objabi.R_CALL {
if ldr.SymDynid(rs) < 0 {
ldr.Errorf(s, "reloc %d (%s) to non-macho symbol %s type=%d (%s)", rt, sym.RelocName(arch, rt), ldr.SymName(rs), ldr.SymType(rs), ldr.SymType(rs))
return false

View File

@@ -545,11 +545,10 @@ func machoreloc1(arch *sys.Arch, out *ld.OutBuf, ldr *loader.Loader, s loader.Sy
}
}
if rt == objabi.R_CALLARM64 ||
if ldr.SymType(rs) == sym.SHOSTOBJ || rt == objabi.R_CALLARM64 ||
rt == objabi.R_ARM64_PCREL_LDST8 || rt == objabi.R_ARM64_PCREL_LDST16 ||
rt == objabi.R_ARM64_PCREL_LDST32 || rt == objabi.R_ARM64_PCREL_LDST64 ||
rt == objabi.R_ADDRARM64 || rt == objabi.R_ARM64_GOTPCREL ||
ldr.SymType(rs) == sym.SHOSTOBJ || ldr.SymType(s) == sym.SINITARR {
rt == objabi.R_ADDRARM64 || rt == objabi.R_ARM64_GOTPCREL {
if ldr.SymDynid(rs) < 0 {
ldr.Errorf(s, "reloc %d (%s) to non-macho symbol %s type=%d (%s)", rt, sym.RelocName(arch, rt), ldr.SymName(rs), ldr.SymType(rs), ldr.SymType(rs))
return false

View File

@@ -368,9 +368,7 @@ func (st *relocSymState) relocsym(s loader.Sym, P []byte) {
o = 0
}
} else if target.IsDarwin() {
if ldr.SymType(rs) != sym.SHOSTOBJ && ldr.SymType(s) != sym.SINITARR {
// ld-prime drops the offset in data for SINITARR. We need to use
// symbol-targeted relocation. See also machoreloc1.
if ldr.SymType(rs) != sym.SHOSTOBJ {
o += ldr.SymValue(rs)
}
} else if target.IsWindows() {

View File

@@ -992,11 +992,6 @@ func typeSymbolMangle(name string) string {
if strings.HasPrefix(name, "type:runtime.") {
return name
}
if strings.HasPrefix(name, "go:string.") {
// String symbols will be grouped to a single go:string.* symbol.
// No need to mangle individual symbol names.
return name
}
if len(name) <= 14 && !strings.Contains(name, "@") { // Issue 19529
return name
}
@@ -1011,7 +1006,7 @@ func typeSymbolMangle(name string) string {
// instantiated symbol, replace type name in []
i := strings.IndexByte(name, '[')
j := strings.LastIndexByte(name, ']')
if j == -1 || j <= i {
if j == -1 {
j = len(name)
}
hash := notsha256.Sum256([]byte(name[i+1 : j]))

View File

@@ -833,9 +833,9 @@ func asmbMacho(ctxt *Link) {
ml.data[2] = uint32(linkoff + s1 + s2 + s3 + s4 + s5) /* stroff */
ml.data[3] = uint32(s6) /* strsize */
if ctxt.LinkMode != LinkExternal {
machodysymtab(ctxt, linkoff+s1+s2)
machodysymtab(ctxt, linkoff+s1+s2)
if ctxt.LinkMode != LinkExternal {
ml := newMachoLoad(ctxt.Arch, LC_LOAD_DYLINKER, 6)
ml.data[0] = 12 /* offset to string */
stringtouint32(ml.data[1:], "/usr/lib/dyld")

View File

@@ -222,7 +222,7 @@ type peLoaderState struct {
var comdatDefinitions = make(map[string]int64)
// Load loads the PE file pn from input.
// Symbols from the object file are created via the loader 'l',
// Symbols from the object file are created via the loader 'l', and
// and a slice of the text symbols is returned.
// If an .rsrc section or set of .rsrc$xx sections is found, its symbols are
// returned as rsrc.

View File

@@ -83,12 +83,6 @@ func GoSyntax(inst Inst, pc uint64, symname SymLookup) string {
}
}
if inst.Op == CMP {
// Use reads-left-to-right ordering for comparisons.
// See issue 60920.
args[0], args[1] = args[1], args[0]
}
if args != nil {
op += " " + strings.Join(args, ", ")
}

View File

@@ -13,7 +13,7 @@ import (
"sync"
)
// Regexp is a wrapper around [regexp.Regexp], where the underlying regexp will be
// Regexp is a wrapper around regexp.Regexp, where the underlying regexp will be
// compiled the first time it is needed.
type Regexp struct {
str string

View File

@@ -65,7 +65,7 @@ type Comments struct {
}
// Comment returns the receiver. This isn't useful by itself, but
// a [Comments] struct is embedded into all the expression
// a Comments struct is embedded into all the expression
// implementation types, and this gives each of those a Comment
// method to satisfy the Expr interface.
func (c *Comments) Comment() *Comments {

View File

@@ -5,17 +5,17 @@
// Package modfile implements a parser and formatter for go.mod files.
//
// The go.mod syntax is described in
// https://pkg.go.dev/cmd/go/#hdr-The_go_mod_file.
// https://golang.org/cmd/go/#hdr-The_go_mod_file.
//
// The [Parse] and [ParseLax] functions both parse a go.mod file and return an
// The Parse and ParseLax functions both parse a go.mod file and return an
// abstract syntax tree. ParseLax ignores unknown statements and may be used to
// parse go.mod files that may have been developed with newer versions of Go.
//
// The [File] struct returned by Parse and ParseLax represent an abstract
// go.mod file. File has several methods like [File.AddNewRequire] and
// [File.DropReplace] that can be used to programmatically edit a file.
// The File struct returned by Parse and ParseLax represent an abstract
// go.mod file. File has several methods like AddNewRequire and DropReplace
// that can be used to programmatically edit a file.
//
// The [Format] function formats a File back to a byte slice which can be
// The Format function formats a File back to a byte slice which can be
// written to a file.
package modfile
@@ -226,7 +226,7 @@ var dontFixRetract VersionFixer = func(_, vers string) (string, error) {
// data is the content of the file.
//
// fix is an optional function that canonicalizes module versions.
// If fix is nil, all module versions must be canonical ([module.CanonicalVersion]
// If fix is nil, all module versions must be canonical (module.CanonicalVersion
// must return the same string).
func Parse(file string, data []byte, fix VersionFixer) (*File, error) {
return parseToFile(file, data, fix, true)
@@ -923,7 +923,7 @@ func (f *File) Format() ([]byte, error) {
}
// Cleanup cleans up the file f after any edit operations.
// To avoid quadratic behavior, modifications like [File.DropRequire]
// To avoid quadratic behavior, modifications like DropRequire
// clear the entry but do not remove it from the slice.
// Cleanup cleans out all the cleared entries.
func (f *File) Cleanup() {
@@ -1075,8 +1075,8 @@ func (f *File) AddNewRequire(path, vers string, indirect bool) {
// The requirements in req must specify at most one distinct version for each
// module path.
//
// If any existing requirements may be removed, the caller should call
// [File.Cleanup] after all edits are complete.
// If any existing requirements may be removed, the caller should call Cleanup
// after all edits are complete.
func (f *File) SetRequire(req []*Require) {
type elem struct {
version string

View File

@@ -34,7 +34,7 @@ type Use struct {
// data is the content of the file.
//
// fix is an optional function that canonicalizes module versions.
// If fix is nil, all module versions must be canonical ([module.CanonicalVersion]
// If fix is nil, all module versions must be canonical (module.CanonicalVersion
// must return the same string).
func ParseWork(file string, data []byte, fix VersionFixer) (*WorkFile, error) {
fs, err := parse(file, data)
@@ -83,7 +83,7 @@ func ParseWork(file string, data []byte, fix VersionFixer) (*WorkFile, error) {
}
// Cleanup cleans up the file f after any edit operations.
// To avoid quadratic behavior, modifications like [WorkFile.DropRequire]
// To avoid quadratic behavior, modifications like DropRequire
// clear the entry but do not remove it from the slice.
// Cleanup cleans out all the cleared entries.
func (f *WorkFile) Cleanup() {

View File

@@ -4,7 +4,7 @@
// Package module defines the module.Version type along with support code.
//
// The [module.Version] type is a simple Path, Version pair:
// The module.Version type is a simple Path, Version pair:
//
// type Version struct {
// Path string
@@ -12,7 +12,7 @@
// }
//
// There are no restrictions imposed directly by use of this structure,
// but additional checking functions, most notably [Check], verify that
// but additional checking functions, most notably Check, verify that
// a particular path, version pair is valid.
//
// # Escaped Paths
@@ -140,7 +140,7 @@ type ModuleError struct {
Err error
}
// VersionError returns a [ModuleError] derived from a [Version] and error,
// VersionError returns a ModuleError derived from a Version and error,
// or err itself if it is already such an error.
func VersionError(v Version, err error) error {
var mErr *ModuleError
@@ -169,7 +169,7 @@ func (e *ModuleError) Unwrap() error { return e.Err }
// An InvalidVersionError indicates an error specific to a version, with the
// module path unknown or specified externally.
//
// A [ModuleError] may wrap an InvalidVersionError, but an InvalidVersionError
// A ModuleError may wrap an InvalidVersionError, but an InvalidVersionError
// must not wrap a ModuleError.
type InvalidVersionError struct {
Version string
@@ -193,8 +193,8 @@ func (e *InvalidVersionError) Error() string {
func (e *InvalidVersionError) Unwrap() error { return e.Err }
// An InvalidPathError indicates a module, import, or file path doesn't
// satisfy all naming constraints. See [CheckPath], [CheckImportPath],
// and [CheckFilePath] for specific restrictions.
// satisfy all naming constraints. See CheckPath, CheckImportPath,
// and CheckFilePath for specific restrictions.
type InvalidPathError struct {
Kind string // "module", "import", or "file"
Path string
@@ -294,7 +294,7 @@ func fileNameOK(r rune) bool {
}
// CheckPath checks that a module path is valid.
// A valid module path is a valid import path, as checked by [CheckImportPath],
// A valid module path is a valid import path, as checked by CheckImportPath,
// with three additional constraints.
// First, the leading path element (up to the first slash, if any),
// by convention a domain name, must contain only lower-case ASCII letters,
@@ -380,7 +380,7 @@ const (
// checkPath returns an error describing why the path is not valid.
// Because these checks apply to module, import, and file paths,
// and because other checks may be applied, the caller is expected to wrap
// this error with [InvalidPathError].
// this error with InvalidPathError.
func checkPath(path string, kind pathKind) error {
if !utf8.ValidString(path) {
return fmt.Errorf("invalid UTF-8")
@@ -532,7 +532,7 @@ var badWindowsNames = []string{
// they require ".vN" instead of "/vN", and for all N, not just N >= 2.
// SplitPathVersion returns with ok = false when presented with
// a path whose last path element does not satisfy the constraints
// applied by [CheckPath], such as "example.com/pkg/v1" or "example.com/pkg/v1.2".
// applied by CheckPath, such as "example.com/pkg/v1" or "example.com/pkg/v1.2".
func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) {
if strings.HasPrefix(path, "gopkg.in/") {
return splitGopkgIn(path)
@@ -582,7 +582,7 @@ func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) {
// MatchPathMajor reports whether the semantic version v
// matches the path major version pathMajor.
//
// MatchPathMajor returns true if and only if [CheckPathMajor] returns nil.
// MatchPathMajor returns true if and only if CheckPathMajor returns nil.
func MatchPathMajor(v, pathMajor string) bool {
return CheckPathMajor(v, pathMajor) == nil
}
@@ -622,7 +622,7 @@ func CheckPathMajor(v, pathMajor string) error {
// PathMajorPrefix returns the major-version tag prefix implied by pathMajor.
// An empty PathMajorPrefix allows either v0 or v1.
//
// Note that [MatchPathMajor] may accept some versions that do not actually begin
// Note that MatchPathMajor may accept some versions that do not actually begin
// with this prefix: namely, it accepts a 'v0.0.0-' prefix for a '.v1'
// pathMajor, even though that pathMajor implies 'v1' tagging.
func PathMajorPrefix(pathMajor string) string {
@@ -643,7 +643,7 @@ func PathMajorPrefix(pathMajor string) string {
}
// CanonicalVersion returns the canonical form of the version string v.
// It is the same as [semver.Canonical] except that it preserves the special build suffix "+incompatible".
// It is the same as semver.Canonical(v) except that it preserves the special build suffix "+incompatible".
func CanonicalVersion(v string) string {
cv := semver.Canonical(v)
if semver.Build(v) == "+incompatible" {
@@ -652,8 +652,8 @@ func CanonicalVersion(v string) string {
return cv
}
// Sort sorts the list by Path, breaking ties by comparing [Version] fields.
// The Version fields are interpreted as semantic versions (using [semver.Compare])
// Sort sorts the list by Path, breaking ties by comparing Version fields.
// The Version fields are interpreted as semantic versions (using semver.Compare)
// optionally followed by a tie-breaking suffix introduced by a slash character,
// like in "v0.0.1/go.mod".
func Sort(list []Version) {
@@ -793,7 +793,7 @@ func unescapeString(escaped string) (string, bool) {
}
// MatchPrefixPatterns reports whether any path prefix of target matches one of
// the glob patterns (as defined by [path.Match]) in the comma-separated globs
// the glob patterns (as defined by path.Match) in the comma-separated globs
// list. This implements the algorithm used when matching a module path to the
// GOPRIVATE environment variable, as described by 'go help module-private'.
//

View File

@@ -125,7 +125,7 @@ func IsPseudoVersion(v string) bool {
}
// IsZeroPseudoVersion returns whether v is a pseudo-version with a zero base,
// timestamp, and revision, as returned by [ZeroPseudoVersion].
// timestamp, and revision, as returned by ZeroPseudoVersion.
func IsZeroPseudoVersion(v string) bool {
return v == ZeroPseudoVersion(semver.Major(v))
}

View File

@@ -140,7 +140,7 @@ func Compare(v, w string) int {
// Max canonicalizes its arguments and then returns the version string
// that compares greater.
//
// Deprecated: use [Compare] instead. In most cases, returning a canonicalized
// Deprecated: use Compare instead. In most cases, returning a canonicalized
// version is not expected or desired.
func Max(v, w string) string {
v = Canonical(v)
@@ -151,7 +151,7 @@ func Max(v, w string) string {
return w
}
// ByVersion implements [sort.Interface] for sorting semantic version strings.
// ByVersion implements sort.Interface for sorting semantic version strings.
type ByVersion []string
func (vs ByVersion) Len() int { return len(vs) }
@@ -164,7 +164,7 @@ func (vs ByVersion) Less(i, j int) bool {
return vs[i] < vs[j]
}
// Sort sorts a list of semantic version strings using [ByVersion].
// Sort sorts a list of semantic version strings using ByVersion.
func Sort(list []string) {
sort.Sort(ByVersion(list))
}

View File

@@ -19,7 +19,7 @@ import (
)
// A ClientOps provides the external operations
// (file caching, HTTP fetches, and so on) needed by the [Client].
// (file caching, HTTP fetches, and so on) needed by the Client.
// The methods must be safe for concurrent use by multiple goroutines.
type ClientOps interface {
// ReadRemote reads and returns the content served at the given path
@@ -72,7 +72,7 @@ type ClientOps interface {
// ErrWriteConflict signals a write conflict during Client.WriteConfig.
var ErrWriteConflict = errors.New("write conflict")
// ErrSecurity is returned by [Client] operations that invoke Client.SecurityError.
// ErrSecurity is returned by Client operations that invoke Client.SecurityError.
var ErrSecurity = errors.New("security error: misbehaving server")
// A Client is a client connection to a checksum database.
@@ -102,7 +102,7 @@ type Client struct {
tileSaved map[tlog.Tile]bool // which tiles have been saved using c.ops.WriteCache already
}
// NewClient returns a new [Client] using the given [ClientOps].
// NewClient returns a new Client using the given Client.
func NewClient(ops ClientOps) *Client {
return &Client{
ops: ops,
@@ -155,7 +155,7 @@ func (c *Client) initWork() {
}
// SetTileHeight sets the tile height for the Client.
// Any call to SetTileHeight must happen before the first call to [Client.Lookup].
// Any call to SetTileHeight must happen before the first call to Lookup.
// If SetTileHeight is not called, the Client defaults to tile height 8.
// SetTileHeight can be called at most once,
// and if so it must be called before the first call to Lookup.
@@ -174,7 +174,7 @@ func (c *Client) SetTileHeight(height int) {
// SetGONOSUMDB sets the list of comma-separated GONOSUMDB patterns for the Client.
// For any module path matching one of the patterns,
// [Client.Lookup] will return ErrGONOSUMDB.
// Lookup will return ErrGONOSUMDB.
// SetGONOSUMDB can be called at most once,
// and if so it must be called before the first call to Lookup.
func (c *Client) SetGONOSUMDB(list string) {
@@ -187,8 +187,8 @@ func (c *Client) SetGONOSUMDB(list string) {
c.nosumdb = list
}
// ErrGONOSUMDB is returned by [Client.Lookup] for paths that match
// a pattern listed in the GONOSUMDB list (set by [Client.SetGONOSUMDB],
// ErrGONOSUMDB is returned by Lookup for paths that match
// a pattern listed in the GONOSUMDB list (set by SetGONOSUMDB,
// usually from the environment variable).
var ErrGONOSUMDB = errors.New("skipped (listed in GONOSUMDB)")

View File

@@ -20,45 +20,45 @@
//
// # Verifying Notes
//
// A [Verifier] allows verification of signatures by one server public key.
// A Verifier allows verification of signatures by one server public key.
// It can report the name of the server and the uint32 hash of the key,
// and it can verify a purported signature by that key.
//
// The standard implementation of a Verifier is constructed
// by [NewVerifier] starting from a verifier key, which is a
// by NewVerifier starting from a verifier key, which is a
// plain text string of the form "<name>+<hash>+<keydata>".
//
// A [Verifiers] allows looking up a Verifier by the combination
// A Verifiers allows looking up a Verifier by the combination
// of server name and key hash.
//
// The standard implementation of a Verifiers is constructed
// by VerifierList from a list of known verifiers.
//
// A [Note] represents a text with one or more signatures.
// A Note represents a text with one or more signatures.
// An implementation can reject a note with too many signatures
// (for example, more than 100 signatures).
//
// A [Signature] represents a signature on a note, verified or not.
// A Signature represents a signature on a note, verified or not.
//
// The [Open] function takes as input a signed message
// The Open function takes as input a signed message
// and a set of known verifiers. It decodes and verifies
// the message signatures and returns a [Note] structure
// the message signatures and returns a Note structure
// containing the message text and (verified or unverified) signatures.
//
// # Signing Notes
//
// A [Signer] allows signing a text with a given key.
// A Signer allows signing a text with a given key.
// It can report the name of the server and the hash of the key
// and can sign a raw text using that key.
//
// The standard implementation of a Signer is constructed
// by [NewSigner] starting from an encoded signer key, which is a
// by NewSigner starting from an encoded signer key, which is a
// plain text string of the form "PRIVATE+KEY+<name>+<hash>+<keydata>".
// Anyone with an encoded signer key can sign messages using that key,
// so it must be kept secret. The encoding begins with the literal text
// "PRIVATE+KEY" to avoid confusion with the public server key.
//
// The [Sign] function takes as input a Note and a list of Signers
// The Sign function takes as input a Note and a list of Signers
// and returns an encoded, signed message.
//
// # Signed Note Format
@@ -88,7 +88,7 @@
// although doing so will require deploying the new algorithms to all clients
// before starting to depend on them for signatures.
//
// The [GenerateKey] function generates and returns a new signer
// The GenerateKey function generates and returns a new signer
// and corresponding verifier.
//
// # Example
@@ -123,9 +123,9 @@
// base URLs, the only syntactic requirement is that they
// not contain spaces or newlines).
//
// If [Open] is given access to a [Verifiers] including the
// [Verifier] for this key, then it will succeed at verifying
// the encoded message and returning the parsed [Note]:
// If Open is given access to a Verifiers including the
// Verifier for this key, then it will succeed at verifying
// the encoded message and returning the parsed Note:
//
// vkey := "PeterNeumann+c74f20a3+ARpc2QcUPDhMQegwxbzhKqiBfsVkmqq/LDE4izWy10TW"
// msg := []byte("If you think cryptography is the answer to your problem,\n" +
@@ -238,7 +238,7 @@ func isValidName(name string) bool {
return name != "" && utf8.ValidString(name) && strings.IndexFunc(name, unicode.IsSpace) < 0 && !strings.Contains(name, "+")
}
// NewVerifier construct a new [Verifier] from an encoded verifier key.
// NewVerifier construct a new Verifier from an encoded verifier key.
func NewVerifier(vkey string) (Verifier, error) {
name, vkey := chop(vkey, "+")
hash16, key64 := chop(vkey, "+")
@@ -295,7 +295,7 @@ func (v *verifier) Name() string { return v.name }
func (v *verifier) KeyHash() uint32 { return v.hash }
func (v *verifier) Verify(msg, sig []byte) bool { return v.verify(msg, sig) }
// NewSigner constructs a new [Signer] from an encoded signer key.
// NewSigner constructs a new Signer from an encoded signer key.
func NewSigner(skey string) (Signer, error) {
priv1, skey := chop(skey, "+")
priv2, skey := chop(skey, "+")
@@ -409,7 +409,7 @@ func (e *UnknownVerifierError) Error() string {
}
// An ambiguousVerifierError indicates that the given name and hash
// match multiple keys passed to [VerifierList].
// match multiple keys passed to VerifierList.
// (If this happens, some malicious actor has taken control of the
// verifier list, at which point we may as well give up entirely,
// but we diagnose the problem instead.)
@@ -422,7 +422,7 @@ func (e *ambiguousVerifierError) Error() string {
return fmt.Sprintf("ambiguous key %s+%08x", e.name, e.hash)
}
// VerifierList returns a [Verifiers] implementation that uses the given list of verifiers.
// VerifierList returns a Verifiers implementation that uses the given list of verifiers.
func VerifierList(list ...Verifier) Verifiers {
m := make(verifierMap)
for _, v := range list {
@@ -510,7 +510,7 @@ var (
// If known.Verifier returns any other error, Open returns that error.
//
// If no known verifier has signed an otherwise valid note,
// Open returns an [UnverifiedNoteError].
// Open returns an UnverifiedNoteError.
// In this case, the unverified note can be fetched from inside the error.
func Open(msg []byte, known Verifiers) (*Note, error) {
if known == nil {

View File

@@ -17,7 +17,7 @@ import (
)
// A ServerOps provides the external operations
// (underlying database access and so on) needed by the [Server].
// (underlying database access and so on) needed by the Server.
type ServerOps interface {
// Signed returns the signed hash of the latest tree.
Signed(ctx context.Context) ([]byte, error)
@@ -36,7 +36,7 @@ type ServerOps interface {
// A Server is the checksum database HTTP server,
// which implements http.Handler and should be invoked
// to serve the paths listed in [ServerPaths].
// to serve the paths listed in ServerPaths.
type Server struct {
ops ServerOps
}

View File

@@ -14,15 +14,15 @@ import (
"golang.org/x/mod/sumdb/tlog"
)
// NewTestServer constructs a new [TestServer]
// NewTestServer constructs a new TestServer
// that will sign its tree with the given signer key
// (see [golang.org/x/mod/sumdb/note])
// (see golang.org/x/mod/sumdb/note)
// and fetch new records as needed by calling gosum.
func NewTestServer(signer string, gosum func(path, vers string) ([]byte, error)) *TestServer {
return &TestServer{signer: signer, gosum: gosum}
}
// A TestServer is an in-memory implementation of [ServerOps] for testing.
// A TestServer is an in-memory implementation of Server for testing.
type TestServer struct {
signer string
gosum func(path, vers string) ([]byte, error)

View File

@@ -28,7 +28,7 @@ import (
// is tile/3/4/x001/x234/067.p/1, and
// Tile{H: 3, L: 4, N: 1234067, W: 8}'s path
// is tile/3/4/x001/x234/067.
// See the [Tile.Path] method and the [ParseTilePath] function.
// See Tile's Path method and the ParseTilePath function.
//
// The special level L=-1 holds raw record data instead of hashes.
// In this case, the level encodes into a tile path as the path element
@@ -46,7 +46,7 @@ type Tile struct {
// TileForIndex returns the tile of fixed height h ≥ 1
// and least width storing the given hash storage index.
//
// If h ≤ 0, [TileForIndex] panics.
// If h ≤ 0, TileForIndex panics.
func TileForIndex(h int, index int64) Tile {
if h <= 0 {
panic(fmt.Sprintf("TileForIndex: invalid height %d", h))
@@ -105,7 +105,7 @@ func tileHash(data []byte) Hash {
// size newTreeSize to replace a tree of size oldTreeSize.
// (No tiles need to be published for a tree of size zero.)
//
// If h ≤ 0, NewTiles panics.
// If h ≤ 0, TileForIndex panics.
func NewTiles(h int, oldTreeSize, newTreeSize int64) []Tile {
if h <= 0 {
panic(fmt.Sprintf("NewTiles: invalid height %d", h))
@@ -272,7 +272,7 @@ type TileReader interface {
// TileHashReader returns a HashReader that satisfies requests
// by loading tiles of the given tree.
//
// The returned [HashReader] checks that loaded tiles are
// The returned HashReader checks that loaded tiles are
// valid for the given tree. Therefore, any hashes returned
// by the HashReader are already proven to be in the tree.
func TileHashReader(tree Tree, tr TileReader) HashReader {

View File

@@ -131,7 +131,7 @@ func StoredHashIndex(level int, n int64) int64 {
return i + int64(level)
}
// SplitStoredHashIndex is the inverse of [StoredHashIndex].
// SplitStoredHashIndex is the inverse of StoredHashIndex.
// That is, SplitStoredHashIndex(StoredHashIndex(level, n)) == level, n.
func SplitStoredHashIndex(index int64) (level int, n int64) {
// Determine level 0 record before index.
@@ -183,7 +183,7 @@ func StoredHashes(n int64, data []byte, r HashReader) ([]Hash, error) {
return StoredHashesForRecordHash(n, RecordHash(data), r)
}
// StoredHashesForRecordHash is like [StoredHashes] but takes
// StoredHashesForRecordHash is like StoredHashes but takes
// as its second argument RecordHash(data) instead of data itself.
func StoredHashesForRecordHash(n int64, h Hash, r HashReader) ([]Hash, error) {
// Start with the record hash.
@@ -227,7 +227,7 @@ type HashReader interface {
ReadHashes(indexes []int64) ([]Hash, error)
}
// A HashReaderFunc is a function implementing [HashReader].
// A HashReaderFunc is a function implementing HashReader.
type HashReaderFunc func([]int64) ([]Hash, error)
func (f HashReaderFunc) ReadHashes(indexes []int64) ([]Hash, error) {

View File

@@ -10,31 +10,31 @@
//
// • All file paths within a zip file must start with "<module>@<version>/",
// where "<module>" is the module path and "<version>" is the version.
// The module path must be valid (see [golang.org/x/mod/module.CheckPath]).
// The module path must be valid (see golang.org/x/mod/module.CheckPath).
// The version must be valid and canonical (see
// [golang.org/x/mod/module.CanonicalVersion]). The path must have a major
// golang.org/x/mod/module.CanonicalVersion). The path must have a major
// version suffix consistent with the version (see
// [golang.org/x/mod/module.Check]). The part of the file path after the
// golang.org/x/mod/module.Check). The part of the file path after the
// "<module>@<version>/" prefix must be valid (see
// [golang.org/x/mod/module.CheckFilePath]).
// golang.org/x/mod/module.CheckFilePath).
//
// • No two file paths may be equal under Unicode case-folding (see
// [strings.EqualFold]).
// strings.EqualFold).
//
// • A go.mod file may or may not appear in the top-level directory. If present,
// it must be named "go.mod", not any other case. Files named "go.mod"
// are not allowed in any other directory.
//
// • The total size in bytes of a module zip file may be at most [MaxZipFile]
// • The total size in bytes of a module zip file may be at most MaxZipFile
// bytes (500 MiB). The total uncompressed size of the files within the
// zip may also be at most [MaxZipFile] bytes.
// zip may also be at most MaxZipFile bytes.
//
// • Each file's uncompressed size must match its declared 64-bit uncompressed
// size in the zip file header.
//
// • If the zip contains files named "<module>@<version>/go.mod" or
// "<module>@<version>/LICENSE", their sizes in bytes may be at most
// [MaxGoMod] or [MaxLICENSE], respectively (both are 16 MiB).
// MaxGoMod or MaxLICENSE, respectively (both are 16 MiB).
//
// • Empty directories are ignored. File permissions and timestamps are also
// ignored.
@@ -42,7 +42,7 @@
// • Symbolic links and other irregular files are not allowed.
//
// Note that this package does not provide hashing functionality. See
// [golang.org/x/mod/sumdb/dirhash].
// golang.org/x/mod/sumdb/dirhash.
package zip
import (
@@ -56,7 +56,6 @@ import (
"path"
"path/filepath"
"strings"
"time"
"unicode"
"unicode/utf8"
@@ -118,9 +117,8 @@ type CheckedFiles struct {
SizeError error
}
// Err returns an error if [CheckedFiles] does not describe a valid module zip
// file. [CheckedFiles.SizeError] is returned if that field is set.
// A [FileErrorList] is returned
// Err returns an error if CheckedFiles does not describe a valid module zip
// file. SizeError is returned if that field is set. A FileErrorList is returned
// if there are one or more invalid files. Other errors may be returned in the
// future.
func (cf CheckedFiles) Err() error {
@@ -323,17 +321,17 @@ func checkFiles(files []File) (cf CheckedFiles, validFiles []File, validSizes []
}
// CheckDir reports whether the files in dir satisfy the name and size
// constraints listed in the package documentation. The returned [CheckedFiles]
// constraints listed in the package documentation. The returned CheckedFiles
// record contains lists of valid, invalid, and omitted files. If a directory is
// omitted (for example, a nested module or vendor directory), it will appear in
// the omitted list, but its files won't be listed.
//
// CheckDir returns an error if it encounters an I/O error or if the returned
// [CheckedFiles] does not describe a valid module zip file (according to
// [CheckedFiles.Err]). The returned [CheckedFiles] is still populated when such
// CheckedFiles does not describe a valid module zip file (according to
// CheckedFiles.Err). The returned CheckedFiles is still populated when such
// an error is returned.
//
// Note that CheckDir will not open any files, so [CreateFromDir] may still fail
// Note that CheckDir will not open any files, so CreateFromDir may still fail
// when CheckDir is successful due to I/O errors.
func CheckDir(dir string) (CheckedFiles, error) {
// List files (as CreateFromDir would) and check which ones are omitted
@@ -364,13 +362,13 @@ func CheckDir(dir string) (CheckedFiles, error) {
// CheckZip reports whether the files contained in a zip file satisfy the name
// and size constraints listed in the package documentation.
//
// CheckZip returns an error if the returned [CheckedFiles] does not describe
// a valid module zip file (according to [CheckedFiles.Err]). The returned
// CheckZip returns an error if the returned CheckedFiles does not describe
// a valid module zip file (according to CheckedFiles.Err). The returned
// CheckedFiles is still populated when an error is returned. CheckZip will
// also return an error if the module path or version is malformed or if it
// encounters an error reading the zip file.
//
// Note that CheckZip does not read individual files, so [Unzip] may still fail
// Note that CheckZip does not read individual files, so Unzip may still fail
// when CheckZip is successful due to I/O errors.
func CheckZip(m module.Version, zipFile string) (CheckedFiles, error) {
f, err := os.Open(zipFile)
@@ -478,7 +476,7 @@ func checkZip(m module.Version, f *os.File) (*zip.Reader, CheckedFiles, error) {
// and writes it to w.
//
// Create verifies the restrictions described in the package documentation
// and should not produce an archive that [Unzip] cannot extract. Create does not
// and should not produce an archive that Unzip cannot extract. Create does not
// include files in the output archive if they don't belong in the module zip.
// In particular, Create will not include files in modules found in
// subdirectories, most files in vendor directories, or irregular files (such
@@ -545,12 +543,12 @@ func Create(w io.Writer, m module.Version, files []File) (err error) {
// a directory, dir. The zip content is written to w.
//
// CreateFromDir verifies the restrictions described in the package
// documentation and should not produce an archive that [Unzip] cannot extract.
// documentation and should not produce an archive that Unzip cannot extract.
// CreateFromDir does not include files in the output archive if they don't
// belong in the module zip. In particular, CreateFromDir will not include
// files in modules found in subdirectories, most files in vendor directories,
// or irregular files (such as symbolic links) in the output archive.
// Additionally, unlike [Create], CreateFromDir will not include directories
// Additionally, unlike Create, CreateFromDir will not include directories
// named ".bzr", ".git", ".hg", or ".svn".
func CreateFromDir(w io.Writer, m module.Version, dir string) (err error) {
defer func() {
@@ -582,8 +580,8 @@ func CreateFromDir(w io.Writer, m module.Version, dir string) (err error) {
// "sub/dir". To create a zip from the base of the repository, pass an empty
// string.
//
// If CreateFromVCS returns [UnrecognizedVCSError], consider falling back to
// [CreateFromDir].
// If CreateFromVCS returns ErrUnrecognizedVCS, consider falling back to
// CreateFromDir.
func CreateFromVCS(w io.Writer, m module.Version, repoRoot, revision, subdir string) (err error) {
defer func() {
if zerr, ok := err.(*zipError); ok {
@@ -655,7 +653,6 @@ func filesInGitRepo(dir, rev, subdir string) ([]File, error) {
return nil, err
}
haveLICENSE := false
var fs []File
for _, zf := range zipReader.File {
if !strings.HasPrefix(zf.Name, subdir) || strings.HasSuffix(zf.Name, "/") {
@@ -672,23 +669,6 @@ func filesInGitRepo(dir, rev, subdir string) ([]File, error) {
name: n,
f: zf,
})
if n == "LICENSE" {
haveLICENSE = true
}
}
if !haveLICENSE && subdir != "" {
// Note: this method of extracting the license from the root copied from
// https://go.googlesource.com/go/+/refs/tags/go1.20.4/src/cmd/go/internal/modfetch/coderepo.go#1118
// https://go.googlesource.com/go/+/refs/tags/go1.20.4/src/cmd/go/internal/modfetch/codehost/git.go#657
cmd := exec.Command("git", "cat-file", "blob", rev+":LICENSE")
cmd.Dir = dir
cmd.Env = append(os.Environ(), "PWD="+dir)
stdout := bytes.Buffer{}
cmd.Stdout = &stdout
if err := cmd.Run(); err == nil {
fs = append(fs, dataFile{name: "LICENSE", data: stdout.Bytes()})
}
}
return fs, nil
@@ -730,26 +710,6 @@ func (f zipFile) Path() string { return f.name }
func (f zipFile) Lstat() (os.FileInfo, error) { return f.f.FileInfo(), nil }
func (f zipFile) Open() (io.ReadCloser, error) { return f.f.Open() }
type dataFile struct {
name string
data []byte
}
func (f dataFile) Path() string { return f.name }
func (f dataFile) Lstat() (os.FileInfo, error) { return dataFileInfo{f}, nil }
func (f dataFile) Open() (io.ReadCloser, error) { return io.NopCloser(bytes.NewReader(f.data)), nil }
type dataFileInfo struct {
f dataFile
}
func (fi dataFileInfo) Name() string { return path.Base(fi.f.name) }
func (fi dataFileInfo) Size() int64 { return int64(len(fi.f.data)) }
func (fi dataFileInfo) Mode() os.FileMode { return 0644 }
func (fi dataFileInfo) ModTime() time.Time { return time.Time{} }
func (fi dataFileInfo) IsDir() bool { return false }
func (fi dataFileInfo) Sys() interface{} { return nil }
// isVendoredPackage attempts to report whether the given filename is contained
// in a package whose import path contains (but does not end with) the component
// "vendor".

Some files were not shown because too many files have changed in this diff Show More