mirror of
https://github.com/golang/go.git
synced 2026-01-29 23:22:06 +03:00
Compare commits
71 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2c1e5b05fe | ||
|
|
bbd043ff0d | ||
|
|
b0e1d3ea26 | ||
|
|
d25a935574 | ||
|
|
e3ba569c78 | ||
|
|
8dc6ad1c61 | ||
|
|
06df3292a8 | ||
|
|
b120517ffd | ||
|
|
0a9582163c | ||
|
|
91a4e74b98 | ||
|
|
6385a6fb18 | ||
|
|
2d07bb86f0 | ||
|
|
745b81b6e6 | ||
|
|
13339c75b8 | ||
|
|
2977709875 | ||
|
|
2d4746f37b | ||
|
|
2b8026f025 | ||
|
|
7c97cc7d97 | ||
|
|
cb6ea94996 | ||
|
|
45b98bfb79 | ||
|
|
bac083a584 | ||
|
|
70aa116c4a | ||
|
|
31c5a236bc | ||
|
|
25ec110e51 | ||
|
|
6634ce2f41 | ||
|
|
25c6dce188 | ||
|
|
4e34f2e81d | ||
|
|
d91843ff67 | ||
|
|
7437db1085 | ||
|
|
ed527ecfb2 | ||
|
|
b78e8cc145 | ||
|
|
3475e6af4c | ||
|
|
179821c9e1 | ||
|
|
9398951479 | ||
|
|
75d8be5fb4 | ||
|
|
1755d14559 | ||
|
|
c19c4c566c | ||
|
|
e973d24261 | ||
|
|
2e6276df34 | ||
|
|
aeef93cd64 | ||
|
|
35de5f2b0e | ||
|
|
a3b092d65e | ||
|
|
07c72a0915 | ||
|
|
041dd5ce05 | ||
|
|
a51957fb0b | ||
|
|
363f2594aa | ||
|
|
9b53b9b585 | ||
|
|
4a14d9c9af | ||
|
|
9786164333 | ||
|
|
6df6e61cbb | ||
|
|
b25266c58d | ||
|
|
1ea8d38517 | ||
|
|
b2ffc23a82 | ||
|
|
8472fcb62d | ||
|
|
b36e5555dd | ||
|
|
ed977e2f47 | ||
|
|
2fabb143d7 | ||
|
|
c9f01f0ec7 | ||
|
|
252f20b2c1 | ||
|
|
7ee7a21ef2 | ||
|
|
06a9034b60 | ||
|
|
03c7e96be9 | ||
|
|
c2de6836c1 | ||
|
|
4aeac326b5 | ||
|
|
9480b4adf9 | ||
|
|
cc0cb3020d | ||
|
|
d8117459c5 | ||
|
|
ebbff91f59 | ||
|
|
1c1c82432a | ||
|
|
b4a0665266 | ||
|
|
577e7b9bb9 |
@@ -60,7 +60,9 @@ pkg crypto/tls, method (*QUICConn) Close() error #44886
|
||||
pkg crypto/tls, method (*QUICConn) ConnectionState() ConnectionState #44886
|
||||
pkg crypto/tls, method (*QUICConn) HandleData(QUICEncryptionLevel, []uint8) error #44886
|
||||
pkg crypto/tls, method (*QUICConn) NextEvent() QUICEvent #44886
|
||||
pkg crypto/tls, method (*QUICConn) SendSessionTicket(bool) error #60107
|
||||
pkg crypto/tls, method (*QUICConn) SendSessionTicket(QUICSessionTicketOptions) error #60107
|
||||
pkg crypto/tls, type QUICSessionTicketOptions struct #60107
|
||||
pkg crypto/tls, type QUICSessionTicketOptions struct, EarlyData bool #60107
|
||||
pkg crypto/tls, method (*QUICConn) SetTransportParameters([]uint8) #44886
|
||||
pkg crypto/tls, method (*QUICConn) Start(context.Context) error #44886
|
||||
pkg crypto/tls, method (QUICEncryptionLevel) String() string #44886
|
||||
@@ -344,8 +346,6 @@ pkg maps, func Copy[$0 interface{ ~map[$2]$3 }, $1 interface{ ~map[$2]$3 }, $2 c
|
||||
pkg maps, func DeleteFunc[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0, func($1, $2) bool) #57436
|
||||
pkg maps, func Equal[$0 interface{ ~map[$2]$3 }, $1 interface{ ~map[$2]$3 }, $2 comparable, $3 comparable]($0, $1) bool #57436
|
||||
pkg maps, func EqualFunc[$0 interface{ ~map[$2]$3 }, $1 interface{ ~map[$2]$4 }, $2 comparable, $3 interface{}, $4 interface{}]($0, $1, func($3, $4) bool) bool #57436
|
||||
pkg maps, func Keys[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0) []$1 #57436
|
||||
pkg maps, func Values[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0) []$2 #57436
|
||||
pkg math/big, method (*Int) Float64() (float64, Accuracy) #56984
|
||||
pkg net/http, method (*ProtocolError) Is(error) bool #41198
|
||||
pkg net/http, method (*ResponseController) EnableFullDuplex() error #57786
|
||||
|
||||
@@ -1 +1,2 @@
|
||||
branch: master
|
||||
branch: release-branch.go1.21
|
||||
parent-branch: master
|
||||
|
||||
1264
doc/go1.21.html
1264
doc/go1.21.html
File diff suppressed because it is too large
Load Diff
756
doc/go_spec.html
756
doc/go_spec.html
@@ -1,6 +1,6 @@
|
||||
<!--{
|
||||
"Title": "The Go Programming Language Specification",
|
||||
"Subtitle": "Version of June 14, 2023",
|
||||
"Subtitle": "Version of Aug 2, 2023",
|
||||
"Path": "/ref/spec"
|
||||
}-->
|
||||
|
||||
@@ -2511,7 +2511,7 @@ type (
|
||||
|
||||
<p>
|
||||
A type definition creates a new, distinct type with the same
|
||||
<a href="#Types">underlying type</a> and operations as the given type
|
||||
<a href="#Underlying_types">underlying type</a> and operations as the given type
|
||||
and binds an identifier, the <i>type name</i>, to it.
|
||||
</p>
|
||||
|
||||
@@ -4343,7 +4343,7 @@ type parameter list type arguments after substitution
|
||||
When using a generic function, type arguments may be provided explicitly,
|
||||
or they may be partially or completely <a href="#Type_inference">inferred</a>
|
||||
from the context in which the function is used.
|
||||
Provided that they can be inferred, type arguments may be omitted entirely if the function is:
|
||||
Provided that they can be inferred, type argument lists may be omitted entirely if the function is:
|
||||
</p>
|
||||
|
||||
<ul>
|
||||
@@ -4351,7 +4351,7 @@ Provided that they can be inferred, type arguments may be omitted entirely if th
|
||||
<a href="#Calls">called</a> with ordinary arguments,
|
||||
</li>
|
||||
<li>
|
||||
<a href="#Assignment_statements">assigned</a> to a variable with an explicitly declared type,
|
||||
<a href="#Assignment_statements">assigned</a> to a variable with a known type
|
||||
</li>
|
||||
<li>
|
||||
<a href="#Calls">passed as an argument</a> to another function, or
|
||||
@@ -4371,7 +4371,7 @@ must be inferrable from the context in which the function is used.
|
||||
// sum returns the sum (concatenation, for strings) of its arguments.
|
||||
func sum[T ~int | ~float64 | ~string](x... T) T { … }
|
||||
|
||||
x := sum // illegal: sum must have a type argument (x is a variable without a declared type)
|
||||
x := sum // illegal: the type of x is unknown
|
||||
intSum := sum[int] // intSum has type func(x... int) int
|
||||
a := intSum(2, 3) // a has value 5 of type int
|
||||
b := sum[float64](2.0, 3) // b has value 5.0 of type float64
|
||||
@@ -4406,402 +4406,323 @@ For a generic type, all type arguments must always be provided explicitly.
|
||||
<h3 id="Type_inference">Type inference</h3>
|
||||
|
||||
<p>
|
||||
<em>NOTE: This section is not yet up-to-date for Go 1.21.</em>
|
||||
A use of a generic function may omit some or all type arguments if they can be
|
||||
<i>inferred</i> from the context within which the function is used, including
|
||||
the constraints of the function's type parameters.
|
||||
Type inference succeeds if it can infer the missing type arguments
|
||||
and <a href="#Instantiations">instantiation</a> succeeds with the
|
||||
inferred type arguments.
|
||||
Otherwise, type inference fails and the program is invalid.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Missing function type arguments may be <i>inferred</i> by a series of steps, described below.
|
||||
Each step attempts to use known information to infer additional type arguments.
|
||||
Type inference stops as soon as all type arguments are known.
|
||||
After type inference is complete, it is still necessary to substitute all type arguments
|
||||
for type parameters and verify that each type argument
|
||||
<a href="#Implementing_an_interface">implements</a> the relevant constraint;
|
||||
it is possible for an inferred type argument to fail to implement a constraint, in which
|
||||
case instantiation fails.
|
||||
Type inference uses the type relationships between pairs of types for inference:
|
||||
For instance, a function argument must be <a href="#Assignability">assignable</a>
|
||||
to its respective function parameter; this establishes a relationship between the
|
||||
type of the argument and the type of the parameter.
|
||||
If either of these two types contains type parameters, type inference looks for the
|
||||
type arguments to substitute the type parameters with such that the assignability
|
||||
relationship is satisfied.
|
||||
Similarly, type inference uses the fact that a type argument must
|
||||
<a href="#Satisfying_a_type_constraint">satisfy</a> the constraint of its respective
|
||||
type parameter.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Type inference is based on
|
||||
Each such pair of matched types corresponds to a <i>type equation</i> containing
|
||||
one or multiple type parameters, from one or possibly multiple generic functions.
|
||||
Inferring the missing type arguments means solving the resulting set of type
|
||||
equations for the respective type parameters.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
For example, given
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
// dedup returns a copy of the argument slice with any duplicate entries removed.
|
||||
func dedup[S ~[]E, E comparable](S) S { … }
|
||||
|
||||
type Slice []int
|
||||
var s Slice
|
||||
s = dedup(s) // same as s = dedup[Slice, int](s)
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
the variable <code>s</code> of type <code>Slice</code> must be assignable to
|
||||
the function parameter type <code>S</code> for the program to be valid.
|
||||
To reduce complexity, type inference ignores the directionality of assignments,
|
||||
so the type relationship between <code>Slice</code> and <code>S</code> can be
|
||||
expressed via the (symmetric) type equation <code>Slice ≡<sub>A</sub> S</code>
|
||||
(or <code>S ≡<sub>A</sub> Slice</code> for that matter),
|
||||
where the <code><sub>A</sub></code> in <code>≡<sub>A</sub></code>
|
||||
indicates that the LHS and RHS types must match per assignability rules
|
||||
(see the section on <a href="#Type_unification">type unification</a> for
|
||||
details).
|
||||
Similarly, the type parameter <code>S</code> must satisfy its constraint
|
||||
<code>~[]E</code>. This can be expressed as <code>S ≡<sub>C</sub> ~[]E</code>
|
||||
where <code>X ≡<sub>C</sub> Y</code> stands for
|
||||
"<code>X</code> satisfies constraint <code>Y</code>".
|
||||
These observations lead to a set of two equations
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
Slice ≡<sub>A</sub> S (1)
|
||||
S ≡<sub>C</sub> ~[]E (2)
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
which now can be solved for the type parameters <code>S</code> and <code>E</code>.
|
||||
From (1) a compiler can infer that the type argument for <code>S</code> is <code>Slice</code>.
|
||||
Similarly, because the underlying type of <code>Slice</code> is <code>[]int</code>
|
||||
and <code>[]int</code> must match <code>[]E</code> of the constraint,
|
||||
a compiler can infer that <code>E</code> must be <code>int</code>.
|
||||
Thus, for these two equations, type inference infers
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
S ➞ Slice
|
||||
E ➞ int
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
Given a set of type equations, the type parameters to solve for are
|
||||
the type parameters of the functions that need to be instantiated
|
||||
and for which no explicit type arguments is provided.
|
||||
These type parameters are called <i>bound</i> type parameters.
|
||||
For instance, in the <code>dedup</code> example above, the type parameters
|
||||
<code>P</code> and <code>E</code> are bound to <code>dedup</code>.
|
||||
An argument to a generic function call may be a generic function itself.
|
||||
The type parameters of that function are included in the set of bound
|
||||
type parameters.
|
||||
The types of function arguments may contain type parameters from other
|
||||
functions (such as a generic function enclosing a function call).
|
||||
Those type parameters may also appear in type equations but they are
|
||||
not bound in that context.
|
||||
Type equations are always solved for the bound type parameters only.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Type inference supports calls of generic functions and assignments
|
||||
of generic functions to (explicitly function-typed) variables.
|
||||
This includes passing generic functions as arguments to other
|
||||
(possibly also generic) functions, and returning generic functions
|
||||
as results.
|
||||
Type inference operates on a set of equations specific to each of
|
||||
these cases.
|
||||
The equations are as follows (type argument lists are omitted for clarity):
|
||||
</p>
|
||||
|
||||
<ul>
|
||||
<li>
|
||||
a <a href="#Type_parameter_declarations">type parameter list</a>
|
||||
<p>
|
||||
For a function call <code>f(a<sub>0</sub>, a<sub>1</sub>, …)</code> where
|
||||
<code>f</code> or a function argument <code>a<sub>i</sub></code> is
|
||||
a generic function:
|
||||
<br>
|
||||
Each pair <code>(a<sub>i</sub>, p<sub>i</sub>)</code> of corresponding
|
||||
function arguments and parameters where <code>a<sub>i</sub></code> is not an
|
||||
<a href="#Constants">untyped constant</a> yields an equation
|
||||
<code>typeof(p<sub>i</sub>) ≡<sub>A</sub> typeof(a<sub>i</sub>)</code>.
|
||||
<br>
|
||||
If <code>a<sub>i</sub></code> is an untyped constant <code>c<sub>j</sub></code>,
|
||||
and <code>typeof(p<sub>i</sub>)</code> is a bound type parameter <code>P<sub>k</sub></code>,
|
||||
the pair <code>(c<sub>j</sub>, P<sub>k</sub>)</code> is collected separately from
|
||||
the type equations.
|
||||
</p>
|
||||
</li>
|
||||
<li>
|
||||
a substitution map <i>M</i> initialized with the known type arguments, if any
|
||||
<p>
|
||||
For an assignment <code>v = f</code> of a generic function <code>f</code> to a
|
||||
(non-generic) variable <code>v</code> of function type:
|
||||
<br>
|
||||
<code>typeof(v) ≡<sub>A</sub> typeof(f)</code>.
|
||||
</p>
|
||||
</li>
|
||||
<li>
|
||||
a (possibly empty) list of ordinary function arguments (in case of a function call only)
|
||||
<p>
|
||||
For a return statement <code>return …, f, … </code> where <code>f</code> is a
|
||||
generic function returned as a result to a (non-generic) result variable
|
||||
<code>r</code> of function type:
|
||||
<br>
|
||||
<code>typeof(r) ≡<sub>A</sub> typeof(f)</code>.
|
||||
</p>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<p>
|
||||
and then proceeds with the following steps:
|
||||
Additionally, each type parameter <code>P<sub>k</sub></code> and corresponding type constraint
|
||||
<code>C<sub>k</sub></code> yields the type equation
|
||||
<code>P<sub>k</sub> ≡<sub>C</sub> C<sub>k</sub></code>.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Type inference gives precedence to type information obtained from typed operands
|
||||
before considering untyped constants.
|
||||
Therefore, inference proceeds in two phases:
|
||||
</p>
|
||||
|
||||
<ol>
|
||||
<li>
|
||||
apply <a href="#Function_argument_type_inference"><i>function argument type inference</i></a>
|
||||
to all <i>typed</i> ordinary function arguments
|
||||
<p>
|
||||
The type equations are solved for the bound
|
||||
type parameters using <a href="#Type_unification">type unification</a>.
|
||||
If unification fails, type inference fails.
|
||||
</p>
|
||||
</li>
|
||||
<li>
|
||||
apply <a href="#Constraint_type_inference"><i>constraint type inference</i></a>
|
||||
</li>
|
||||
<li>
|
||||
apply function argument type inference to all <i>untyped</i> ordinary function arguments
|
||||
using the default type for each of the untyped function arguments
|
||||
</li>
|
||||
<li>
|
||||
apply constraint type inference
|
||||
<p>
|
||||
For each bound type parameter <code>P<sub>k</sub></code> for which no type argument
|
||||
has been inferred yet and for which one or more pairs
|
||||
<code>(c<sub>j</sub>, P<sub>k</sub>)</code> with that same type parameter
|
||||
were collected, determine the <a href="#Constant_expressions">constant kind</a>
|
||||
of the constants <code>c<sub>j</sub></code> in all those pairs the same way as for
|
||||
<a href="#Constant_expressions">constant expressions</a>.
|
||||
The type argument for <code>P<sub>k</sub></code> is the
|
||||
<a href="#Constants">default type</a> for the determined constant kind.
|
||||
If a constant kind cannot be determined due to conflicting constant kinds,
|
||||
type inference fails.
|
||||
</p>
|
||||
</li>
|
||||
</ol>
|
||||
|
||||
<p>
|
||||
If there are no ordinary or untyped function arguments, the respective steps are skipped.
|
||||
Constraint type inference is skipped if the previous step didn't infer any new type arguments,
|
||||
but it is run at least once if there are missing type arguments.
|
||||
If not all type arguments have been found after these two phases, type inference fails.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
The substitution map <i>M</i> is carried through all steps, and each step may add entries to <i>M</i>.
|
||||
The process stops as soon as <i>M</i> has a type argument for each type parameter or if an inference step fails.
|
||||
If an inference step fails, or if <i>M</i> is still missing type arguments after the last step, type inference fails.
|
||||
If the two phases are successful, type inference determined a type argument for each
|
||||
bound type parameter:
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
P<sub>k</sub> ➞ A<sub>k</sub>
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
A type argument <code>A<sub>k</sub></code> may be a composite type,
|
||||
containing other bound type parameters <code>P<sub>k</sub></code> as element types
|
||||
(or even be just another bound type parameter).
|
||||
In a process of repeated simplification, the bound type parameters in each type
|
||||
argument are substituted with the respective type arguments for those type
|
||||
parameters until each type argument is free of bound type parameters.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
If type arguments contain cyclic references to themselves
|
||||
through bound type parameters, simplification and thus type
|
||||
inference fails.
|
||||
Otherwise, type inference succeeds.
|
||||
</p>
|
||||
|
||||
<h4 id="Type_unification">Type unification</h4>
|
||||
|
||||
<p>
|
||||
Type inference is based on <i>type unification</i>. A single unification step
|
||||
applies to a <a href="#Type_inference">substitution map</a> and two types, either
|
||||
or both of which may be or contain type parameters. The substitution map tracks
|
||||
the known (explicitly provided or already inferred) type arguments: the map
|
||||
contains an entry <code>P</code> → <code>A</code> for each type
|
||||
parameter <code>P</code> and corresponding known type argument <code>A</code>.
|
||||
During unification, known type arguments take the place of their corresponding type
|
||||
parameters when comparing types. Unification is the process of finding substitution
|
||||
map entries that make the two types equivalent.
|
||||
Type inference solves type equations through <i>type unification</i>.
|
||||
Type unification recursively compares the LHS and RHS types of an
|
||||
equation, where either or both types may be or contain bound type parameters,
|
||||
and looks for type arguments for those type parameters such that the LHS
|
||||
and RHS match (become identical or assignment-compatible, depending on
|
||||
context).
|
||||
To that effect, type inference maintains a map of bound type parameters
|
||||
to inferred type arguments; this map is consulted and updated during type unification.
|
||||
Initially, the bound type parameters are known but the map is empty.
|
||||
During type unification, if a new type argument <code>A</code> is inferred,
|
||||
the respective mapping <code>P ➞ A</code> from type parameter to argument
|
||||
is added to the map.
|
||||
Conversely, when comparing types, a known type argument
|
||||
(a type argument for which a map entry already exists)
|
||||
takes the place of its corresponding type parameter.
|
||||
As type inference progresses, the map is populated more and more
|
||||
until all equations have been considered, or until unification fails.
|
||||
Type inference succeeds if no unification step fails and the map has
|
||||
an entry for each type parameter.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
For unification, two types that don't contain any type parameters from the current type
|
||||
parameter list are <i>equivalent</i>
|
||||
if they are identical, or if they are channel types that are identical ignoring channel
|
||||
direction, or if their underlying types are equivalent.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Unification works by comparing the structure of pairs of types: their structure
|
||||
disregarding type parameters must be identical, and types other than type parameters
|
||||
must be equivalent.
|
||||
A type parameter in one type may match any complete subtype in the other type;
|
||||
each successful match causes an entry to be added to the substitution map.
|
||||
If the structure differs, or types other than type parameters are not equivalent,
|
||||
unification fails.
|
||||
</p>
|
||||
|
||||
<!--
|
||||
TODO(gri) Somewhere we need to describe the process of adding an entry to the
|
||||
substitution map: if the entry is already present, the type argument
|
||||
values are themselves unified.
|
||||
-->
|
||||
|
||||
<p>
|
||||
For example, if <code>T1</code> and <code>T2</code> are type parameters,
|
||||
<code>[]map[int]bool</code> can be unified with any of the following:
|
||||
</pre>
|
||||
For example, given the type equation with the bound type parameter
|
||||
<code>P</code>
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
[]map[int]bool // types are identical
|
||||
T1 // adds T1 → []map[int]bool to substitution map
|
||||
[]T1 // adds T1 → map[int]bool to substitution map
|
||||
[]map[T1]T2 // adds T1 → int and T2 → bool to substitution map
|
||||
[10]struct{ elem P, list []P } ≡<sub>A</sub> [10]struct{ elem string; list []string }
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
On the other hand, <code>[]map[int]bool</code> cannot be unified with any of
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
int // int is not a slice
|
||||
struct{} // a struct is not a slice
|
||||
[]struct{} // a struct is not a map
|
||||
[]map[T1]string // map element types don't match
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
As an exception to this general rule, because a <a href="#Type_definitions">defined type</a>
|
||||
<code>D</code> and a type literal <code>L</code> are never equivalent,
|
||||
unification compares the underlying type of <code>D</code> with <code>L</code> instead.
|
||||
For example, given the defined type
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
type Vector []float64
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
and the type literal <code>[]E</code>, unification compares <code>[]float64</code> with
|
||||
<code>[]E</code> and adds an entry <code>E</code> → <code>float64</code> to
|
||||
the substitution map.
|
||||
</p>
|
||||
|
||||
<h4 id="Function_argument_type_inference">Function argument type inference</h4>
|
||||
|
||||
<!-- In this section and the section on constraint type inference we start with examples
|
||||
rather than have the examples follow the rules as is customary elsewhere in spec.
|
||||
Hopefully this helps building an intuition and makes the rules easier to follow. -->
|
||||
|
||||
<p>
|
||||
Function argument type inference infers type arguments from function arguments:
|
||||
if a function parameter is declared with a type <code>T</code> that uses
|
||||
type parameters,
|
||||
<a href="#Type_unification">unifying</a> the type of the corresponding
|
||||
function argument with <code>T</code> may infer type arguments for the type
|
||||
parameters used by <code>T</code>.
|
||||
type inference starts with an empty map.
|
||||
Unification first compares the top-level structure of the LHS and RHS
|
||||
types.
|
||||
Both are arrays of the same length; they unify if the element types unify.
|
||||
Both element types are structs; they unify if they have
|
||||
the same number of fields with the same names and if the
|
||||
field types unify.
|
||||
The type argument for <code>P</code> is not known yet (there is no map entry),
|
||||
so unifying <code>P</code> with <code>string</code> adds
|
||||
the mapping <code>P ➞ string</code> to the map.
|
||||
Unifying the types of the <code>list</code> field requires
|
||||
unifying <code>[]P</code> and <code>[]string</code> and
|
||||
thus <code>P</code> and <code>string</code>.
|
||||
Since the type argument for <code>P</code> is known at this point
|
||||
(there is a map entry for <code>P</code>), its type argument
|
||||
<code>string</code> takes the place of <code>P</code>.
|
||||
And since <code>string</code> is identical to <code>string</code>,
|
||||
this unification step succeeds as well.
|
||||
Unification of the LHS and RHS of the equation is now finished.
|
||||
Type inference succeeds because there is only one type equation,
|
||||
no unification step failed, and the map is fully populated.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
For instance, given the generic function
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
func scale[Number ~int64|~float64|~complex128](v []Number, s Number) []Number
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
and the call
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
var vector []float64
|
||||
scaledVector := scale(vector, 42)
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
the type argument for <code>Number</code> can be inferred from the function argument
|
||||
<code>vector</code> by unifying the type of <code>vector</code> with the corresponding
|
||||
parameter type: <code>[]float64</code> and <code>[]Number</code>
|
||||
match in structure and <code>float64</code> matches with <code>Number</code>.
|
||||
This adds the entry <code>Number</code> → <code>float64</code> to the
|
||||
<a href="#Type_unification">substitution map</a>.
|
||||
Untyped arguments, such as the second function argument <code>42</code> here, are ignored
|
||||
in the first round of function argument type inference and only considered if there are
|
||||
unresolved type parameters left.
|
||||
Unification uses a combination of <i>exact</i> and <i>loose</i>
|
||||
unification depending on whether two types have to be
|
||||
<a href="#Type_identity">identical</a>,
|
||||
<a href="#Assignability">assignment-compatible</a>, or
|
||||
only structurally equal.
|
||||
The respective <a href="#Type_unification_rules">type unification rules</a>
|
||||
are spelled out in detail in the <a href="#Appendix">Appendix</a>.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Inference happens in two separate phases; each phase operates on a specific list of
|
||||
(parameter, argument) pairs:
|
||||
For an equation of the form <code>X ≡<sub>A</sub> Y</code>,
|
||||
where <code>X</code> and <code>Y</code> are types involved
|
||||
in an assignment (including parameter passing and return statements),
|
||||
the top-level type structures may unify loosely but element types
|
||||
must unify exactly, matching the rules for assignments.
|
||||
</p>
|
||||
|
||||
<ol>
|
||||
<p>
|
||||
For an equation of the form <code>P ≡<sub>C</sub> C</code>,
|
||||
where <code>P</code> is a type parameter and <code>C</code>
|
||||
its corresponding constraint, the unification rules are bit
|
||||
more complicated:
|
||||
</p>
|
||||
|
||||
<ul>
|
||||
<li>
|
||||
The list <i>Lt</i> contains all (parameter, argument) pairs where the parameter
|
||||
type uses type parameters and where the function argument is <i>typed</i>.
|
||||
If <code>C</code> has a <a href="#Core_types">core type</a>
|
||||
<code>core(C)</code>
|
||||
and <code>P</code> has a known type argument <code>A</code>,
|
||||
<code>core(C)</code> and <code>A</code> must unify loosely.
|
||||
If <code>P</code> does not have a known type argument
|
||||
and <code>C</code> contains exactly one type term <code>T</code>
|
||||
that is not an underlying (tilde) type, unification adds the
|
||||
mapping <code>P ➞ T</code> to the map.
|
||||
</li>
|
||||
<li>
|
||||
The list <i>Lu</i> contains all remaining pairs where the parameter type is a single
|
||||
type parameter. In this list, the respective function arguments are untyped.
|
||||
If <code>C</code> does not have a core type
|
||||
and <code>P</code> has a known type argument <code>A</code>,
|
||||
<code>A</code> must have all methods of <code>C</code>, if any,
|
||||
and corresponding method types must unify exactly.
|
||||
</li>
|
||||
</ol>
|
||||
</ul>
|
||||
|
||||
<p>
|
||||
Any other (parameter, argument) pair is ignored.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
By construction, the arguments of the pairs in <i>Lu</i> are <i>untyped</i> constants
|
||||
(or the untyped boolean result of a comparison). And because <a href="#Constants">default types</a>
|
||||
of untyped values are always predeclared non-composite types, they can never match against
|
||||
a composite type, so it is sufficient to only consider parameter types that are single type
|
||||
parameters.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Each list is processed in a separate phase:
|
||||
</p>
|
||||
|
||||
<ol>
|
||||
<li>
|
||||
In the first phase, the parameter and argument types of each pair in <i>Lt</i>
|
||||
are unified. If unification succeeds for a pair, it may yield new entries that
|
||||
are added to the substitution map <i>M</i>. If unification fails, type inference
|
||||
fails.
|
||||
</li>
|
||||
<li>
|
||||
The second phase considers the entries of list <i>Lu</i>. Type parameters for
|
||||
which the type argument has already been determined are ignored in this phase.
|
||||
For each remaining pair, the parameter type (which is a single type parameter) and
|
||||
the <a href="#Constants">default type</a> of the corresponding untyped argument is
|
||||
unified. If unification fails, type inference fails.
|
||||
</li>
|
||||
</ol>
|
||||
|
||||
<p>
|
||||
While unification is successful, processing of each list continues until all list elements
|
||||
are considered, even if all type arguments are inferred before the last list element has
|
||||
been processed.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Example:
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
func min[T ~int|~float64](x, y T) T
|
||||
|
||||
var x int
|
||||
min(x, 2.0) // T is int, inferred from typed argument x; 2.0 is assignable to int
|
||||
min(1.0, 2.0) // T is float64, inferred from default type for 1.0 and matches default type for 2.0
|
||||
min(1.0, 2) // illegal: default type float64 (for 1.0) doesn't match default type int (for 2)
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
In the example <code>min(1.0, 2)</code>, processing the function argument <code>1.0</code>
|
||||
yields the substitution map entry <code>T</code> → <code>float64</code>. Because
|
||||
processing continues until all untyped arguments are considered, an error is reported. This
|
||||
ensures that type inference does not depend on the order of the untyped arguments.
|
||||
</p>
|
||||
|
||||
<h4 id="Constraint_type_inference">Constraint type inference</h4>
|
||||
|
||||
<p>
|
||||
Constraint type inference infers type arguments by considering type constraints.
|
||||
If a type parameter <code>P</code> has a constraint with a
|
||||
<a href="#Core_types">core type</a> <code>C</code>,
|
||||
<a href="#Type_unification">unifying</a> <code>P</code> with <code>C</code>
|
||||
may infer additional type arguments, either the type argument for <code>P</code>,
|
||||
or if that is already known, possibly the type arguments for type parameters
|
||||
used in <code>C</code>.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
For instance, consider the type parameter list with type parameters <code>List</code> and
|
||||
<code>Elem</code>:
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
[List ~[]Elem, Elem any]
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
Constraint type inference can deduce the type of <code>Elem</code> from the type argument
|
||||
for <code>List</code> because <code>Elem</code> is a type parameter in the core type
|
||||
<code>[]Elem</code> of <code>List</code>.
|
||||
If the type argument is <code>Bytes</code>:
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
type Bytes []byte
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
unifying the underlying type of <code>Bytes</code> with the core type means
|
||||
unifying <code>[]byte</code> with <code>[]Elem</code>. That unification succeeds and yields
|
||||
the <a href="#Type_unification">substitution map</a> entry
|
||||
<code>Elem</code> → <code>byte</code>.
|
||||
Thus, in this example, constraint type inference can infer the second type argument from the
|
||||
first one.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Using the core type of a constraint may lose some information: In the (unlikely) case that
|
||||
the constraint's type set contains a single <a href="#Type_definitions">defined type</a>
|
||||
<code>N</code>, the corresponding core type is <code>N</code>'s underlying type rather than
|
||||
<code>N</code> itself. In this case, constraint type inference may succeed but instantiation
|
||||
will fail because the inferred type is not in the type set of the constraint.
|
||||
Thus, constraint type inference uses the <i>adjusted core type</i> of
|
||||
a constraint: if the type set contains a single type, use that type; otherwise use the
|
||||
constraint's core type.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Generally, constraint type inference proceeds in two phases: Starting with a given
|
||||
substitution map <i>M</i>
|
||||
</p>
|
||||
|
||||
<ol>
|
||||
<li>
|
||||
For all type parameters with an adjusted core type, unify the type parameter with that
|
||||
type. If any unification fails, constraint type inference fails.
|
||||
</li>
|
||||
|
||||
<li>
|
||||
At this point, some entries in <i>M</i> may map type parameters to other
|
||||
type parameters or to types containing type parameters. For each entry
|
||||
<code>P</code> → <code>A</code> in <i>M</i> where <code>A</code> is or
|
||||
contains type parameters <code>Q</code> for which there exist entries
|
||||
<code>Q</code> → <code>B</code> in <i>M</i>, substitute those
|
||||
<code>Q</code> with the respective <code>B</code> in <code>A</code>.
|
||||
Stop when no further substitution is possible.
|
||||
</li>
|
||||
</ol>
|
||||
|
||||
<p>
|
||||
The result of constraint type inference is the final substitution map <i>M</i> from type
|
||||
parameters <code>P</code> to type arguments <code>A</code> where no type parameter <code>P</code>
|
||||
appears in any of the <code>A</code>.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
For instance, given the type parameter list
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
[A any, B []C, C *A]
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
and the single provided type argument <code>int</code> for type parameter <code>A</code>,
|
||||
the initial substitution map <i>M</i> contains the entry <code>A</code> → <code>int</code>.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
In the first phase, the type parameters <code>B</code> and <code>C</code> are unified
|
||||
with the core type of their respective constraints. This adds the entries
|
||||
<code>B</code> → <code>[]C</code> and <code>C</code> → <code>*A</code>
|
||||
to <i>M</i>.
|
||||
|
||||
<p>
|
||||
At this point there are two entries in <i>M</i> where the right-hand side
|
||||
is or contains type parameters for which there exists other entries in <i>M</i>:
|
||||
<code>[]C</code> and <code>*A</code>.
|
||||
In the second phase, these type parameters are replaced with their respective
|
||||
types. It doesn't matter in which order this happens. Starting with the state
|
||||
of <i>M</i> after the first phase:
|
||||
</p>
|
||||
|
||||
<p>
|
||||
<code>A</code> → <code>int</code>,
|
||||
<code>B</code> → <code>[]C</code>,
|
||||
<code>C</code> → <code>*A</code>
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Replace <code>A</code> on the right-hand side of → with <code>int</code>:
|
||||
</p>
|
||||
|
||||
<p>
|
||||
<code>A</code> → <code>int</code>,
|
||||
<code>B</code> → <code>[]C</code>,
|
||||
<code>C</code> → <code>*int</code>
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Replace <code>C</code> on the right-hand side of → with <code>*int</code>:
|
||||
</p>
|
||||
|
||||
<p>
|
||||
<code>A</code> → <code>int</code>,
|
||||
<code>B</code> → <code>[]*int</code>,
|
||||
<code>C</code> → <code>*int</code>
|
||||
</p>
|
||||
|
||||
<p>
|
||||
At this point no further substitution is possible and the map is full.
|
||||
Therefore, <code>M</code> represents the final map of type parameters
|
||||
to type arguments for the given type parameter list.
|
||||
When solving type equations from type constraints,
|
||||
solving one equation may infer additional type arguments,
|
||||
which in turn may enable solving other equations that depend
|
||||
on those type arguments.
|
||||
Type inference repeats type unification as long as new type
|
||||
arguments are inferred.
|
||||
</p>
|
||||
|
||||
<h3 id="Operators">Operators</h3>
|
||||
@@ -5479,7 +5400,7 @@ in any of these cases:
|
||||
ignoring struct tags (see below),
|
||||
<code>x</code>'s type and <code>T</code> are not
|
||||
<a href="#Type_parameter_declarations">type parameters</a> but have
|
||||
<a href="#Type_identity">identical</a> <a href="#Types">underlying types</a>.
|
||||
<a href="#Type_identity">identical</a> <a href="#Underlying_types">underlying types</a>.
|
||||
</li>
|
||||
<li>
|
||||
ignoring struct tags (see below),
|
||||
@@ -7324,7 +7245,8 @@ clear(t) type parameter see below
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
If the argument type is a <a href="#Type_parameter_declarations">type parameter</a>,
|
||||
If the type of the argument to <code>clear</code> is a
|
||||
<a href="#Type_parameter_declarations">type parameter</a>,
|
||||
all types in its type set must be maps or slices, and <code>clear</code>
|
||||
performs the operation corresponding to the actual type argument.
|
||||
</p>
|
||||
@@ -8290,7 +8212,7 @@ of if the general conversion rules take care of this.
|
||||
<p>
|
||||
A <code>Pointer</code> is a <a href="#Pointer_types">pointer type</a> but a <code>Pointer</code>
|
||||
value may not be <a href="#Address_operators">dereferenced</a>.
|
||||
Any pointer or value of <a href="#Types">underlying type</a> <code>uintptr</code> can be
|
||||
Any pointer or value of <a href="#Underlying_types">underlying type</a> <code>uintptr</code> can be
|
||||
<a href="#Conversions">converted</a> to a type of underlying type <code>Pointer</code> and vice versa.
|
||||
The effect of converting between <code>Pointer</code> and <code>uintptr</code> is implementation-defined.
|
||||
</p>
|
||||
@@ -8438,3 +8360,145 @@ The following minimal alignment properties are guaranteed:
|
||||
<p>
|
||||
A struct or array type has size zero if it contains no fields (or elements, respectively) that have a size greater than zero. Two distinct zero-size variables may have the same address in memory.
|
||||
</p>
|
||||
|
||||
<h2 id="Appendix">Appendix</h2>
|
||||
|
||||
<h3 id="Type_unification_rules">Type unification rules</h3>
|
||||
|
||||
<p>
|
||||
The type unification rules describe if and how two types unify.
|
||||
The precise details are relevant for Go implementations,
|
||||
affect the specifics of error messages (such as whether
|
||||
a compiler reports a type inference or other error),
|
||||
and may explain why type inference fails in unusual code situations.
|
||||
But by and large these rules can be ignored when writing Go code:
|
||||
type inference is designed to mostly "work as expected",
|
||||
and the unification rules are fine-tuned accordingly.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Type unification is controlled by a <i>matching mode</i>, which may
|
||||
be <i>exact</i> or <i>loose</i>.
|
||||
As unification recursively descends a composite type structure,
|
||||
the matching mode used for elements of the type, the <i>element matching mode</i>,
|
||||
remains the same as the matching mode except when two types are unified for
|
||||
<a href="#Assignability">assignability</a> (<code>≡<sub>A</sub></code>):
|
||||
in this case, the matching mode is <i>loose</i> at the top level but
|
||||
then changes to <i>exact</i> for element types, reflecting the fact
|
||||
that types don't have to be identical to be assignable.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Two types that are not bound type parameters unify exactly if any of
|
||||
following conditions is true:
|
||||
</p>
|
||||
|
||||
<ul>
|
||||
<li>
|
||||
Both types are <a href="#Type_identity">identical</a>.
|
||||
</li>
|
||||
<li>
|
||||
Both types have identical structure and their element types
|
||||
unify exactly.
|
||||
</li>
|
||||
<li>
|
||||
Exactly one type is an <a href="#Type_inference">unbound</a>
|
||||
type parameter with a <a href="#Core_types">core type</a>,
|
||||
and that core type unifies with the other type per the
|
||||
unification rules for <code>≡<sub>A</sub></code>
|
||||
(loose unification at the top level and exact unification
|
||||
for element types).
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<p>
|
||||
If both types are bound type parameters, they unify per the given
|
||||
matching modes if:
|
||||
</p>
|
||||
|
||||
<ul>
|
||||
<li>
|
||||
Both type parameters are identical.
|
||||
</li>
|
||||
<li>
|
||||
At most one of the type parameters has a known type argument.
|
||||
In this case, the type parameters are <i>joined</i>:
|
||||
they both stand for the same type argument.
|
||||
If neither type parameter has a known type argument yet,
|
||||
a future type argument inferred for one the type parameters
|
||||
is simultaneously inferred for both of them.
|
||||
</li>
|
||||
<li>
|
||||
Both type parameters have a known type argument
|
||||
and the type arguments unify per the given matching modes.
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<p>
|
||||
A single bound type parameter <code>P</code> and another type <code>T</code> unify
|
||||
per the given matching modes if:
|
||||
</p>
|
||||
|
||||
<ul>
|
||||
<li>
|
||||
<code>P</code> doesn't have a known type argument.
|
||||
In this case, <code>T</code> is inferred as the type argument for <code>P</code>.
|
||||
</li>
|
||||
<li>
|
||||
<code>P</code> does have a known type argument <code>A</code>,
|
||||
<code>A</code> and <code>T</code> unify per the given matching modes,
|
||||
and one of the following conditions is true:
|
||||
<ul>
|
||||
<li>
|
||||
Both <code>A</code> and <code>T</code> are interface types:
|
||||
In this case, if both <code>A</code> and <code>T</code> are
|
||||
also <a href="#Type_definitions">defined</a> types,
|
||||
they must be <a href="#Type_identity">identical</a>.
|
||||
Otherwise, if neither of them is a defined type, they must
|
||||
have the same number of methods
|
||||
(unification of <code>A</code> and <code>T</code> already
|
||||
established that the methods match).
|
||||
</li>
|
||||
<li>
|
||||
Neither <code>A</code> nor <code>T</code> are interface types:
|
||||
In this case, if <code>T</code> is a defined type, <code>T</code>
|
||||
replaces <code>A</code> as the inferred type argument for <code>P</code>.
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<p>
|
||||
Finally, two types that are not bound type parameters unify loosely
|
||||
(and per the element matching mode) if:
|
||||
</p>
|
||||
|
||||
<ul>
|
||||
<li>
|
||||
Both types unify exactly.
|
||||
</li>
|
||||
<li>
|
||||
One type is a <a href="#Type_definitions">defined type</a>,
|
||||
the other type is a type literal, but not an interface,
|
||||
and their underlying types unify per the element matching mode.
|
||||
</li>
|
||||
<li>
|
||||
Both types are interfaces (but not type parameters) with
|
||||
identical <a href="#Interface_types">type terms</a>,
|
||||
both or neither embed the predeclared type
|
||||
<a href="#Predeclared_identifiers">comparable</a>,
|
||||
corresponding method types unify per the element matching mode,
|
||||
and the method set of one of the interfaces is a subset of
|
||||
the method set of the other interface.
|
||||
</li>
|
||||
<li>
|
||||
Only one type is an interface (but not a type parameter),
|
||||
corresponding methods of the two types unify per the element matching mode,
|
||||
and the method set of the interface is a subset of
|
||||
the method set of the other type.
|
||||
</li>
|
||||
<li>
|
||||
Both types have the same structure and their element types
|
||||
unify per the element matching mode.
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -126,6 +126,14 @@ for example,
|
||||
see the [runtime documentation](/pkg/runtime#hdr-Environment_Variables)
|
||||
and the [go command documentation](/cmd/go#hdr-Build_and_test_caching).
|
||||
|
||||
### Go 1.22
|
||||
|
||||
Go 1.22 adds a configurable limit to control the maximum acceptable RSA key size
|
||||
that can be used in TLS handshakes, controlled by the [`tlsmaxrsasize`setting](/pkg/crypto/tls#Conn.Handshake).
|
||||
The default is tlsmaxrsasize=8192, limiting RSA to 8192-bit keys. To avoid
|
||||
denial of service attacks, this setting and default was backported to Go
|
||||
1.19.13, Go 1.20.8, and Go 1.21.1.
|
||||
|
||||
### Go 1.21
|
||||
|
||||
Go 1.21 made it a run-time error to call `panic` with a nil interface value,
|
||||
|
||||
@@ -10,12 +10,12 @@ case "$GOWASIRUNTIME" in
|
||||
"wasmer")
|
||||
exec wasmer run --dir=/ --env PWD="$PWD" --env PATH="$PATH" ${GOWASIRUNTIMEARGS:-} "$1" -- "${@:2}"
|
||||
;;
|
||||
"wasmtime")
|
||||
exec wasmtime run --dir=/ --env PWD="$PWD" --env PATH="$PATH" --max-wasm-stack 1048576 ${GOWASIRUNTIMEARGS:-} "$1" -- "${@:2}"
|
||||
;;
|
||||
"wazero" | "")
|
||||
"wazero")
|
||||
exec wazero run -mount /:/ -env-inherit -cachedir "${TMPDIR:-/tmp}"/wazero ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}"
|
||||
;;
|
||||
"wasmtime" | "")
|
||||
exec wasmtime run --dir=/ --env PWD="$PWD" --env PATH="$PATH" --max-wasm-stack 1048576 ${GOWASIRUNTIMEARGS:-} "$1" -- "${@:2}"
|
||||
;;
|
||||
*)
|
||||
echo "Unknown Go WASI runtime specified: $GOWASIRUNTIME"
|
||||
exit 1
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package api
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
//go:build boringcrypto
|
||||
|
||||
package api
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
@@ -2,9 +2,10 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package api computes the exported API of a set of Go packages.
|
||||
// This package computes the exported API of a set of Go packages.
|
||||
// It is only a test, not a command, nor a usefully importable package.
|
||||
package api
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
40
src/cmd/asm/internal/asm/testdata/riscv64.s
vendored
40
src/cmd/asm/internal/asm/testdata/riscv64.s
vendored
@@ -183,28 +183,28 @@ start:
|
||||
// 8.2: Load-Reserved/Store-Conditional
|
||||
LRW (X5), X6 // 2fa30214
|
||||
LRD (X5), X6 // 2fb30214
|
||||
SCW X5, (X6), X7 // af23531c
|
||||
SCD X5, (X6), X7 // af33531c
|
||||
SCW X5, (X6), X7 // af23531a
|
||||
SCD X5, (X6), X7 // af33531a
|
||||
|
||||
// 8.3: Atomic Memory Operations
|
||||
AMOSWAPW X5, (X6), X7 // af23530c
|
||||
AMOSWAPD X5, (X6), X7 // af33530c
|
||||
AMOADDW X5, (X6), X7 // af235304
|
||||
AMOADDD X5, (X6), X7 // af335304
|
||||
AMOANDW X5, (X6), X7 // af235364
|
||||
AMOANDD X5, (X6), X7 // af335364
|
||||
AMOORW X5, (X6), X7 // af235344
|
||||
AMOORD X5, (X6), X7 // af335344
|
||||
AMOXORW X5, (X6), X7 // af235324
|
||||
AMOXORD X5, (X6), X7 // af335324
|
||||
AMOMAXW X5, (X6), X7 // af2353a4
|
||||
AMOMAXD X5, (X6), X7 // af3353a4
|
||||
AMOMAXUW X5, (X6), X7 // af2353e4
|
||||
AMOMAXUD X5, (X6), X7 // af3353e4
|
||||
AMOMINW X5, (X6), X7 // af235384
|
||||
AMOMIND X5, (X6), X7 // af335384
|
||||
AMOMINUW X5, (X6), X7 // af2353c4
|
||||
AMOMINUD X5, (X6), X7 // af3353c4
|
||||
AMOSWAPW X5, (X6), X7 // af23530e
|
||||
AMOSWAPD X5, (X6), X7 // af33530e
|
||||
AMOADDW X5, (X6), X7 // af235306
|
||||
AMOADDD X5, (X6), X7 // af335306
|
||||
AMOANDW X5, (X6), X7 // af235366
|
||||
AMOANDD X5, (X6), X7 // af335366
|
||||
AMOORW X5, (X6), X7 // af235346
|
||||
AMOORD X5, (X6), X7 // af335346
|
||||
AMOXORW X5, (X6), X7 // af235326
|
||||
AMOXORD X5, (X6), X7 // af335326
|
||||
AMOMAXW X5, (X6), X7 // af2353a6
|
||||
AMOMAXD X5, (X6), X7 // af3353a6
|
||||
AMOMAXUW X5, (X6), X7 // af2353e6
|
||||
AMOMAXUD X5, (X6), X7 // af3353e6
|
||||
AMOMINW X5, (X6), X7 // af235386
|
||||
AMOMIND X5, (X6), X7 // af335386
|
||||
AMOMINUW X5, (X6), X7 // af2353c6
|
||||
AMOMINUD X5, (X6), X7 // af3353c6
|
||||
|
||||
// 10.1: Base Counters and Timers
|
||||
RDCYCLE X5 // f32200c0
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package cgotest
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lm
|
||||
#cgo !darwin LDFLAGS: -lm
|
||||
#include <math.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package issue8756
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lm
|
||||
#cgo !darwin LDFLAGS: -lm
|
||||
#include <math.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
@@ -23,7 +23,7 @@ package cgotest
|
||||
#include <unistd.h>
|
||||
#include <sys/stat.h>
|
||||
#include <errno.h>
|
||||
#cgo LDFLAGS: -lm
|
||||
#cgo !darwin LDFLAGS: -lm
|
||||
|
||||
#ifndef WIN32
|
||||
#include <pthread.h>
|
||||
|
||||
@@ -389,9 +389,11 @@ func TestForkExec(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGeneric(t *testing.T) {
|
||||
func TestSymbolNameMangle(t *testing.T) {
|
||||
// Issue 58800: generic function name may contain weird characters
|
||||
// that confuse the external linker.
|
||||
// Issue 62098: the name mangling code doesn't handle some string
|
||||
// symbols correctly.
|
||||
globalSkip(t)
|
||||
goCmd(t, "build", "-buildmode=plugin", "-o", "generic.so", "./generic/plugin.go")
|
||||
goCmd(t, "build", "-buildmode=plugin", "-o", "mangle.so", "./mangle/plugin.go")
|
||||
}
|
||||
|
||||
@@ -2,21 +2,37 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Instantiated function name may contain weird characters
|
||||
// that confuse the external linker, so it needs to be
|
||||
// mangled.
|
||||
// Test cases for symbol name mangling.
|
||||
|
||||
package main
|
||||
|
||||
//go:noinline
|
||||
func F[T any]() {}
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Issue 58800:
|
||||
// Instantiated function name may contain weird characters
|
||||
// that confuse the external linker, so it needs to be
|
||||
// mangled.
|
||||
type S struct {
|
||||
X int `parser:"|@@)"`
|
||||
}
|
||||
|
||||
//go:noinline
|
||||
func F[T any]() {}
|
||||
|
||||
func P() {
|
||||
F[S]()
|
||||
}
|
||||
|
||||
// Issue 62098: the name mangling code doesn't handle some string
|
||||
// symbols correctly.
|
||||
func G(id string) error {
|
||||
if strings.ContainsAny(id, "&$@;/:+,?\\{^}%`]\">[~<#|") {
|
||||
return fmt.Errorf("invalid")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {}
|
||||
@@ -1571,14 +1571,6 @@
|
||||
// zero upper bit of the register; no need to zero-extend
|
||||
(MOVBUreg x:((Equal|NotEqual|LessThan|LessThanU|LessThanF|LessEqual|LessEqualU|LessEqualF|GreaterThan|GreaterThanU|GreaterThanF|GreaterEqual|GreaterEqualU|GreaterEqualF) _)) => (MOVDreg x)
|
||||
|
||||
// omit unsign extension
|
||||
(MOVWUreg x) && zeroUpper32Bits(x, 3) => x
|
||||
|
||||
// omit sign extension
|
||||
(MOVWreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffff80000000) == 0 => (ANDconst <t> x [c])
|
||||
(MOVHreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffffffff8000) == 0 => (ANDconst <t> x [c])
|
||||
(MOVBreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffffffffff80) == 0 => (ANDconst <t> x [c])
|
||||
|
||||
// absorb flag constants into conditional instructions
|
||||
(CSEL [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
|
||||
(CSEL [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => y
|
||||
|
||||
@@ -13,7 +13,6 @@ import "strings"
|
||||
// - *const instructions may use a constant larger than the instruction can encode.
|
||||
// In this case the assembler expands to multiple instructions and uses tmp
|
||||
// register (R27).
|
||||
// - All 32-bit Ops will zero the upper 32 bits of the destination register.
|
||||
|
||||
// Suffixes encode the bit width of various instructions.
|
||||
// D (double word) = 64 bit
|
||||
|
||||
@@ -855,7 +855,7 @@ func storeOneArg(x *expandState, pos src.XPos, b *Block, locs []*LocalSlot, suff
|
||||
// storeOneLoad creates a decomposed (one step) load that is then stored.
|
||||
func storeOneLoad(x *expandState, pos src.XPos, b *Block, source, mem *Value, t *types.Type, offArg, offStore int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value {
|
||||
from := x.offsetFrom(source.Block, source.Args[0], offArg, types.NewPtr(t))
|
||||
w := source.Block.NewValue2(source.Pos, OpLoad, t, from, mem)
|
||||
w := b.NewValue2(source.Pos, OpLoad, t, from, mem)
|
||||
return x.storeArgOrLoad(pos, b, w, mem, t, offStore, loadRegOffset, storeRc)
|
||||
}
|
||||
|
||||
@@ -962,7 +962,7 @@ func (x *expandState) storeArgOrLoad(pos src.XPos, b *Block, source, mem *Value,
|
||||
eltRO := x.regWidth(elt)
|
||||
source.Type = t
|
||||
for i := int64(0); i < t.NumElem(); i++ {
|
||||
sel := source.Block.NewValue1I(pos, OpArraySelect, elt, i, source)
|
||||
sel := b.NewValue1I(pos, OpArraySelect, elt, i, source)
|
||||
mem = x.storeArgOrLoad(pos, b, sel, mem, elt, storeOffset+i*elt.Size(), loadRegOffset, storeRc.at(t, 0))
|
||||
loadRegOffset += eltRO
|
||||
pos = pos.WithNotStmt()
|
||||
@@ -997,7 +997,7 @@ func (x *expandState) storeArgOrLoad(pos src.XPos, b *Block, source, mem *Value,
|
||||
source.Type = t
|
||||
for i := 0; i < t.NumFields(); i++ {
|
||||
fld := t.Field(i)
|
||||
sel := source.Block.NewValue1I(pos, OpStructSelect, fld.Type, int64(i), source)
|
||||
sel := b.NewValue1I(pos, OpStructSelect, fld.Type, int64(i), source)
|
||||
mem = x.storeArgOrLoad(pos, b, sel, mem, fld.Type, storeOffset+fld.Offset, loadRegOffset, storeRc.next(fld.Type))
|
||||
loadRegOffset += x.regWidth(fld.Type)
|
||||
pos = pos.WithNotStmt()
|
||||
@@ -1009,48 +1009,48 @@ func (x *expandState) storeArgOrLoad(pos src.XPos, b *Block, source, mem *Value,
|
||||
break
|
||||
}
|
||||
tHi, tLo := x.intPairTypes(t.Kind())
|
||||
sel := source.Block.NewValue1(pos, OpInt64Hi, tHi, source)
|
||||
sel := b.NewValue1(pos, OpInt64Hi, tHi, source)
|
||||
mem = x.storeArgOrLoad(pos, b, sel, mem, tHi, storeOffset+x.hiOffset, loadRegOffset+x.hiRo, storeRc.plus(x.hiRo))
|
||||
pos = pos.WithNotStmt()
|
||||
sel = source.Block.NewValue1(pos, OpInt64Lo, tLo, source)
|
||||
sel = b.NewValue1(pos, OpInt64Lo, tLo, source)
|
||||
return x.storeArgOrLoad(pos, b, sel, mem, tLo, storeOffset+x.lowOffset, loadRegOffset+x.loRo, storeRc.plus(x.hiRo))
|
||||
|
||||
case types.TINTER:
|
||||
sel := source.Block.NewValue1(pos, OpITab, x.typs.BytePtr, source)
|
||||
sel := b.NewValue1(pos, OpITab, x.typs.BytePtr, source)
|
||||
mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.BytePtr, storeOffset, loadRegOffset, storeRc.next(x.typs.BytePtr))
|
||||
pos = pos.WithNotStmt()
|
||||
sel = source.Block.NewValue1(pos, OpIData, x.typs.BytePtr, source)
|
||||
sel = b.NewValue1(pos, OpIData, x.typs.BytePtr, source)
|
||||
return x.storeArgOrLoad(pos, b, sel, mem, x.typs.BytePtr, storeOffset+x.ptrSize, loadRegOffset+RO_iface_data, storeRc)
|
||||
|
||||
case types.TSTRING:
|
||||
sel := source.Block.NewValue1(pos, OpStringPtr, x.typs.BytePtr, source)
|
||||
sel := b.NewValue1(pos, OpStringPtr, x.typs.BytePtr, source)
|
||||
mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.BytePtr, storeOffset, loadRegOffset, storeRc.next(x.typs.BytePtr))
|
||||
pos = pos.WithNotStmt()
|
||||
sel = source.Block.NewValue1(pos, OpStringLen, x.typs.Int, source)
|
||||
sel = b.NewValue1(pos, OpStringLen, x.typs.Int, source)
|
||||
return x.storeArgOrLoad(pos, b, sel, mem, x.typs.Int, storeOffset+x.ptrSize, loadRegOffset+RO_string_len, storeRc)
|
||||
|
||||
case types.TSLICE:
|
||||
et := types.NewPtr(t.Elem())
|
||||
sel := source.Block.NewValue1(pos, OpSlicePtr, et, source)
|
||||
sel := b.NewValue1(pos, OpSlicePtr, et, source)
|
||||
mem = x.storeArgOrLoad(pos, b, sel, mem, et, storeOffset, loadRegOffset, storeRc.next(et))
|
||||
pos = pos.WithNotStmt()
|
||||
sel = source.Block.NewValue1(pos, OpSliceLen, x.typs.Int, source)
|
||||
sel = b.NewValue1(pos, OpSliceLen, x.typs.Int, source)
|
||||
mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.Int, storeOffset+x.ptrSize, loadRegOffset+RO_slice_len, storeRc.next(x.typs.Int))
|
||||
sel = source.Block.NewValue1(pos, OpSliceCap, x.typs.Int, source)
|
||||
sel = b.NewValue1(pos, OpSliceCap, x.typs.Int, source)
|
||||
return x.storeArgOrLoad(pos, b, sel, mem, x.typs.Int, storeOffset+2*x.ptrSize, loadRegOffset+RO_slice_cap, storeRc)
|
||||
|
||||
case types.TCOMPLEX64:
|
||||
sel := source.Block.NewValue1(pos, OpComplexReal, x.typs.Float32, source)
|
||||
sel := b.NewValue1(pos, OpComplexReal, x.typs.Float32, source)
|
||||
mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.Float32, storeOffset, loadRegOffset, storeRc.next(x.typs.Float32))
|
||||
pos = pos.WithNotStmt()
|
||||
sel = source.Block.NewValue1(pos, OpComplexImag, x.typs.Float32, source)
|
||||
sel = b.NewValue1(pos, OpComplexImag, x.typs.Float32, source)
|
||||
return x.storeArgOrLoad(pos, b, sel, mem, x.typs.Float32, storeOffset+4, loadRegOffset+RO_complex_imag, storeRc)
|
||||
|
||||
case types.TCOMPLEX128:
|
||||
sel := source.Block.NewValue1(pos, OpComplexReal, x.typs.Float64, source)
|
||||
sel := b.NewValue1(pos, OpComplexReal, x.typs.Float64, source)
|
||||
mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.Float64, storeOffset, loadRegOffset, storeRc.next(x.typs.Float64))
|
||||
pos = pos.WithNotStmt()
|
||||
sel = source.Block.NewValue1(pos, OpComplexImag, x.typs.Float64, source)
|
||||
sel = b.NewValue1(pos, OpComplexImag, x.typs.Float64, source)
|
||||
return x.storeArgOrLoad(pos, b, sel, mem, x.typs.Float64, storeOffset+8, loadRegOffset+RO_complex_imag, storeRc)
|
||||
}
|
||||
|
||||
@@ -1113,6 +1113,9 @@ func (x *expandState) rewriteArgs(v *Value, firstArg int) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if x.debug > 1 {
|
||||
x.Printf("...storeArg %s, %v, %d\n", a.LongString(), aType, aOffset)
|
||||
}
|
||||
// "Dereference" of addressed (probably not-SSA-eligible) value becomes Move
|
||||
// TODO(register args) this will be more complicated with registers in the picture.
|
||||
mem = x.rewriteDereference(v.Block, sp, a, mem, aOffset, aux.SizeOfArg(auxI), aType, v.Pos)
|
||||
|
||||
@@ -1281,10 +1281,6 @@ func zeroUpper32Bits(x *Value, depth int) bool {
|
||||
OpAMD64SHRL, OpAMD64SHRLconst, OpAMD64SARL, OpAMD64SARLconst,
|
||||
OpAMD64SHLL, OpAMD64SHLLconst:
|
||||
return true
|
||||
case OpARM64REV16W, OpARM64REVW, OpARM64RBITW, OpARM64CLZW, OpARM64EXTRWconst,
|
||||
OpARM64MULW, OpARM64MNEGW, OpARM64UDIVW, OpARM64DIVW, OpARM64UMODW,
|
||||
OpARM64MADDW, OpARM64MSUBW, OpARM64RORW, OpARM64RORWconst:
|
||||
return true
|
||||
case OpArg:
|
||||
return x.Type.Size() == 4
|
||||
case OpPhi, OpSelect0, OpSelect1:
|
||||
|
||||
@@ -8668,25 +8668,6 @@ func rewriteValueARM64_OpARM64MOVBreg(v *Value) bool {
|
||||
v.AuxInt = int64ToAuxInt(int64(int8(c)))
|
||||
return true
|
||||
}
|
||||
// match: (MOVBreg <t> (ANDconst x [c]))
|
||||
// cond: uint64(c) & uint64(0xffffffffffffff80) == 0
|
||||
// result: (ANDconst <t> x [c])
|
||||
for {
|
||||
t := v.Type
|
||||
if v_0.Op != OpARM64ANDconst {
|
||||
break
|
||||
}
|
||||
c := auxIntToInt64(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if !(uint64(c)&uint64(0xffffffffffffff80) == 0) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64ANDconst)
|
||||
v.Type = t
|
||||
v.AuxInt = int64ToAuxInt(c)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (MOVBreg (SLLconst [lc] x))
|
||||
// cond: lc < 8
|
||||
// result: (SBFIZ [armBFAuxInt(lc, 8-lc)] x)
|
||||
@@ -10765,25 +10746,6 @@ func rewriteValueARM64_OpARM64MOVHreg(v *Value) bool {
|
||||
v.AuxInt = int64ToAuxInt(int64(int16(c)))
|
||||
return true
|
||||
}
|
||||
// match: (MOVHreg <t> (ANDconst x [c]))
|
||||
// cond: uint64(c) & uint64(0xffffffffffff8000) == 0
|
||||
// result: (ANDconst <t> x [c])
|
||||
for {
|
||||
t := v.Type
|
||||
if v_0.Op != OpARM64ANDconst {
|
||||
break
|
||||
}
|
||||
c := auxIntToInt64(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if !(uint64(c)&uint64(0xffffffffffff8000) == 0) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64ANDconst)
|
||||
v.Type = t
|
||||
v.AuxInt = int64ToAuxInt(c)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (MOVHreg (SLLconst [lc] x))
|
||||
// cond: lc < 16
|
||||
// result: (SBFIZ [armBFAuxInt(lc, 16-lc)] x)
|
||||
@@ -11943,17 +11905,6 @@ func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool {
|
||||
v.AuxInt = int64ToAuxInt(int64(uint32(c)))
|
||||
return true
|
||||
}
|
||||
// match: (MOVWUreg x)
|
||||
// cond: zeroUpper32Bits(x, 3)
|
||||
// result: x
|
||||
for {
|
||||
x := v_0
|
||||
if !(zeroUpper32Bits(x, 3)) {
|
||||
break
|
||||
}
|
||||
v.copyOf(x)
|
||||
return true
|
||||
}
|
||||
// match: (MOVWUreg (SLLconst [lc] x))
|
||||
// cond: lc >= 32
|
||||
// result: (MOVDconst [0])
|
||||
@@ -12458,25 +12409,6 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool {
|
||||
v.AuxInt = int64ToAuxInt(int64(int32(c)))
|
||||
return true
|
||||
}
|
||||
// match: (MOVWreg <t> (ANDconst x [c]))
|
||||
// cond: uint64(c) & uint64(0xffffffff80000000) == 0
|
||||
// result: (ANDconst <t> x [c])
|
||||
for {
|
||||
t := v.Type
|
||||
if v_0.Op != OpARM64ANDconst {
|
||||
break
|
||||
}
|
||||
c := auxIntToInt64(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if !(uint64(c)&uint64(0xffffffff80000000) == 0) {
|
||||
break
|
||||
}
|
||||
v.reset(OpARM64ANDconst)
|
||||
v.Type = t
|
||||
v.AuxInt = int64ToAuxInt(c)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (MOVWreg (SLLconst [lc] x))
|
||||
// cond: lc < 32
|
||||
// result: (SBFIZ [armBFAuxInt(lc, 32-lc)] x)
|
||||
|
||||
@@ -7083,8 +7083,21 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
|
||||
// for an empty block this will be used for its control
|
||||
// instruction. We won't use the actual liveness map on a
|
||||
// control instruction. Just mark it something that is
|
||||
// preemptible, unless this function is "all unsafe".
|
||||
s.pp.NextLive = objw.LivenessIndex{StackMapIndex: -1, IsUnsafePoint: liveness.IsUnsafe(f)}
|
||||
// preemptible, unless this function is "all unsafe", or
|
||||
// the empty block is in a write barrier.
|
||||
unsafe := liveness.IsUnsafe(f)
|
||||
if b.Kind == ssa.BlockPlain {
|
||||
// Empty blocks that are part of write barriers need
|
||||
// to have their control instructions marked unsafe.
|
||||
c := b.Succs[0].Block()
|
||||
for _, v := range c.Values {
|
||||
if v.Op == ssa.OpWBend {
|
||||
unsafe = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
s.pp.NextLive = objw.LivenessIndex{StackMapIndex: -1, IsUnsafePoint: unsafe}
|
||||
|
||||
if idx, ok := argLiveBlockMap[b.ID]; ok && idx != argLiveIdx {
|
||||
argLiveIdx = idx
|
||||
|
||||
@@ -110,11 +110,11 @@ type Config struct {
|
||||
// type checker will initialize this field with a newly created context.
|
||||
Context *Context
|
||||
|
||||
// GoVersion describes the accepted Go language version. The string
|
||||
// must follow the format "go%d.%d" (e.g. "go1.12") or ist must be
|
||||
// empty; an empty string disables Go language version checks.
|
||||
// If the format is invalid, invoking the type checker will cause a
|
||||
// panic.
|
||||
// GoVersion describes the accepted Go language version. The string must
|
||||
// start with a prefix of the form "go%d.%d" (e.g. "go1.20", "go1.21rc1", or
|
||||
// "go1.21.0") or it must be empty; an empty string disables Go language
|
||||
// version checks. If the format is invalid, invoking the type checker will
|
||||
// result in an error.
|
||||
GoVersion string
|
||||
|
||||
// If IgnoreFuncBodies is set, function bodies are not
|
||||
|
||||
@@ -2070,6 +2070,29 @@ func TestIdenticalUnions(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssue61737(t *testing.T) {
|
||||
// This test verifies that it is possible to construct invalid interfaces
|
||||
// containing duplicate methods using the go/types API.
|
||||
//
|
||||
// It must be possible for importers to construct such invalid interfaces.
|
||||
// Previously, this panicked.
|
||||
|
||||
sig1 := NewSignatureType(nil, nil, nil, NewTuple(NewParam(nopos, nil, "", Typ[Int])), nil, false)
|
||||
sig2 := NewSignatureType(nil, nil, nil, NewTuple(NewParam(nopos, nil, "", Typ[String])), nil, false)
|
||||
|
||||
methods := []*Func{
|
||||
NewFunc(nopos, nil, "M", sig1),
|
||||
NewFunc(nopos, nil, "M", sig2),
|
||||
}
|
||||
|
||||
embeddedMethods := []*Func{
|
||||
NewFunc(nopos, nil, "M", sig2),
|
||||
}
|
||||
embedded := NewInterfaceType(embeddedMethods, nil)
|
||||
iface := NewInterfaceType(methods, []Type{embedded})
|
||||
iface.NumMethods() // unlike go/types, there is no Complete() method, so we complete implicitly
|
||||
}
|
||||
|
||||
func TestIssue15305(t *testing.T) {
|
||||
const src = "package p; func f() int16; var _ = f(undef)"
|
||||
f := mustParse(src)
|
||||
|
||||
@@ -576,6 +576,11 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
|
||||
// If nargs == 1, make sure x.mode is either a value or a constant.
|
||||
if x.mode != constant_ {
|
||||
x.mode = value
|
||||
// A value must not be untyped.
|
||||
check.assignment(x, &emptyInterface, "argument to "+bin.name)
|
||||
if x.mode == invalid {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Use the final type computed above for all arguments.
|
||||
|
||||
@@ -610,20 +610,17 @@ func (check *Checker) arguments(call *syntax.CallExpr, sig *Signature, targs []T
|
||||
return // error already reported
|
||||
}
|
||||
|
||||
// compute result signature: instantiate if needed
|
||||
rsig = sig
|
||||
// update result signature: instantiate if needed
|
||||
if n > 0 {
|
||||
rsig = check.instantiateSignature(call.Pos(), call.Fun, sig, targs[:n], xlist)
|
||||
}
|
||||
|
||||
// Optimization: Only if the callee's parameter list was adjusted do we need to
|
||||
// compute it from the adjusted list; otherwise we can simply use the result
|
||||
// signature's parameter list. We only need the n type parameters and arguments
|
||||
// of the callee.
|
||||
if n > 0 && adjusted {
|
||||
sigParams = check.subst(call.Pos(), sigParams, makeSubstMap(tparams[:n], targs[:n]), nil, check.context()).(*Tuple)
|
||||
} else {
|
||||
sigParams = rsig.params
|
||||
// If the callee's parameter list was adjusted we need to update (instantiate)
|
||||
// it separately. Otherwise we can simply use the result signature's parameter
|
||||
// list.
|
||||
if adjusted {
|
||||
sigParams = check.subst(call.Pos(), sigParams, makeSubstMap(tparams[:n], targs[:n]), nil, check.context()).(*Tuple)
|
||||
} else {
|
||||
sigParams = rsig.params
|
||||
}
|
||||
}
|
||||
|
||||
// compute argument signatures: instantiate if needed
|
||||
|
||||
@@ -96,7 +96,7 @@ func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type,
|
||||
// Unify parameter and argument types for generic parameters with typed arguments
|
||||
// and collect the indices of generic parameters with untyped arguments.
|
||||
// Terminology: generic parameter = function parameter with a type-parameterized type
|
||||
u := newUnifier(tparams, targs)
|
||||
u := newUnifier(tparams, targs, check.allowVersion(check.pkg, pos, go1_21))
|
||||
|
||||
errorf := func(kind string, tpar, targ Type, arg *operand) {
|
||||
// provide a better error message if we can
|
||||
|
||||
@@ -900,3 +900,23 @@ func _cgoCheckResult(interface{})
|
||||
*boolFieldAddr(cfg, "go115UsesCgo") = true
|
||||
})
|
||||
}
|
||||
|
||||
func TestIssue61931(t *testing.T) {
|
||||
const src = `
|
||||
package p
|
||||
|
||||
func A(func(any), ...any) {}
|
||||
func B[T any](T) {}
|
||||
|
||||
func _() {
|
||||
A(B, nil // syntax error: missing ',' before newline in argument list
|
||||
}
|
||||
`
|
||||
f, err := syntax.Parse(syntax.NewFileBase(pkgName(src)), strings.NewReader(src), func(error) {}, nil, 0)
|
||||
if err == nil {
|
||||
t.Fatal("expected syntax error")
|
||||
}
|
||||
|
||||
var conf Config
|
||||
conf.Check(f.PkgName.Value, []*syntax.File{f}, nil) // must not panic
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ package types2
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/syntax"
|
||||
"fmt"
|
||||
. "internal/types/errors"
|
||||
"sort"
|
||||
"strings"
|
||||
@@ -212,7 +211,6 @@ func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_
|
||||
// we can get rid of the mpos map below and simply use the cloned method's
|
||||
// position.
|
||||
|
||||
var todo []*Func
|
||||
var seen objset
|
||||
var allMethods []*Func
|
||||
mpos := make(map[*Func]syntax.Pos) // method specification or method embedding position, for good error messages
|
||||
@@ -222,36 +220,30 @@ func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_
|
||||
allMethods = append(allMethods, m)
|
||||
mpos[m] = pos
|
||||
case explicit:
|
||||
if check == nil {
|
||||
panic(fmt.Sprintf("%s: duplicate method %s", m.pos, m.name))
|
||||
if check != nil {
|
||||
var err error_
|
||||
err.code = DuplicateDecl
|
||||
err.errorf(pos, "duplicate method %s", m.name)
|
||||
err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name)
|
||||
check.report(&err)
|
||||
}
|
||||
// check != nil
|
||||
var err error_
|
||||
err.code = DuplicateDecl
|
||||
err.errorf(pos, "duplicate method %s", m.name)
|
||||
err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name)
|
||||
check.report(&err)
|
||||
default:
|
||||
// We have a duplicate method name in an embedded (not explicitly declared) method.
|
||||
// Check method signatures after all types are computed (go.dev/issue/33656).
|
||||
// If we're pre-go1.14 (overlapping embeddings are not permitted), report that
|
||||
// error here as well (even though we could do it eagerly) because it's the same
|
||||
// error message.
|
||||
if check == nil {
|
||||
// check method signatures after all locally embedded interfaces are computed
|
||||
todo = append(todo, m, other.(*Func))
|
||||
break
|
||||
if check != nil {
|
||||
check.later(func() {
|
||||
if !check.allowVersion(m.pkg, pos, go1_14) || !Identical(m.typ, other.Type()) {
|
||||
var err error_
|
||||
err.code = DuplicateDecl
|
||||
err.errorf(pos, "duplicate method %s", m.name)
|
||||
err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name)
|
||||
check.report(&err)
|
||||
}
|
||||
}).describef(pos, "duplicate method check for %s", m.name)
|
||||
}
|
||||
// check != nil
|
||||
check.later(func() {
|
||||
if !check.allowVersion(m.pkg, pos, go1_14) || !Identical(m.typ, other.Type()) {
|
||||
var err error_
|
||||
err.code = DuplicateDecl
|
||||
err.errorf(pos, "duplicate method %s", m.name)
|
||||
err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name)
|
||||
check.report(&err)
|
||||
}
|
||||
}).describef(pos, "duplicate method check for %s", m.name)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -314,15 +306,6 @@ func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_
|
||||
}
|
||||
ityp.embedPos = nil // not needed anymore (errors have been reported)
|
||||
|
||||
// process todo's (this only happens if check == nil)
|
||||
for i := 0; i < len(todo); i += 2 {
|
||||
m := todo[i]
|
||||
other := todo[i+1]
|
||||
if !Identical(m.typ, other.typ) {
|
||||
panic(fmt.Sprintf("%s: duplicate method %s", m.pos, m.name))
|
||||
}
|
||||
}
|
||||
|
||||
ityp.tset.comparable = allComparable
|
||||
if len(allMethods) != 0 {
|
||||
sortMethods(allMethods)
|
||||
|
||||
@@ -53,11 +53,6 @@ const (
|
||||
// the core types, if any, of non-local (unbound) type parameters.
|
||||
enableCoreTypeUnification = true
|
||||
|
||||
// If enableInterfaceInference is set, type inference uses
|
||||
// shared methods for improved type inference involving
|
||||
// interfaces.
|
||||
enableInterfaceInference = true
|
||||
|
||||
// If traceInference is set, unification will print a trace of its operation.
|
||||
// Interpretation of trace:
|
||||
// x ≡ y attempt to unify types x and y
|
||||
@@ -81,15 +76,16 @@ type unifier struct {
|
||||
// that inferring the type for a given type parameter P will
|
||||
// automatically infer the same type for all other parameters
|
||||
// unified (joined) with P.
|
||||
handles map[*TypeParam]*Type
|
||||
depth int // recursion depth during unification
|
||||
handles map[*TypeParam]*Type
|
||||
depth int // recursion depth during unification
|
||||
enableInterfaceInference bool // use shared methods for better inference
|
||||
}
|
||||
|
||||
// newUnifier returns a new unifier initialized with the given type parameter
|
||||
// and corresponding type argument lists. The type argument list may be shorter
|
||||
// than the type parameter list, and it may contain nil types. Matching type
|
||||
// parameters and arguments must have the same index.
|
||||
func newUnifier(tparams []*TypeParam, targs []Type) *unifier {
|
||||
func newUnifier(tparams []*TypeParam, targs []Type, enableInterfaceInference bool) *unifier {
|
||||
assert(len(tparams) >= len(targs))
|
||||
handles := make(map[*TypeParam]*Type, len(tparams))
|
||||
// Allocate all handles up-front: in a correct program, all type parameters
|
||||
@@ -103,7 +99,7 @@ func newUnifier(tparams []*TypeParam, targs []Type) *unifier {
|
||||
}
|
||||
handles[x] = &t
|
||||
}
|
||||
return &unifier{handles, 0}
|
||||
return &unifier{handles, 0, enableInterfaceInference}
|
||||
}
|
||||
|
||||
// unifyMode controls the behavior of the unifier.
|
||||
@@ -339,7 +335,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
|
||||
// we will fail at function instantiation or argument assignment time.
|
||||
//
|
||||
// If we have at least one defined type, there is one in y.
|
||||
if ny, _ := y.(*Named); mode&exact == 0 && ny != nil && isTypeLit(x) && !(enableInterfaceInference && IsInterface(x)) {
|
||||
if ny, _ := y.(*Named); mode&exact == 0 && ny != nil && isTypeLit(x) && !(u.enableInterfaceInference && IsInterface(x)) {
|
||||
if traceInference {
|
||||
u.tracef("%s ≡ under %s", x, ny)
|
||||
}
|
||||
@@ -405,18 +401,40 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
|
||||
// Therefore, we must fail unification (go.dev/issue/60933).
|
||||
return false
|
||||
}
|
||||
// If y is a defined type, make sure we record that type
|
||||
// for type parameter x, which may have until now only
|
||||
// recorded an underlying type (go.dev/issue/43056).
|
||||
// Either both types are interfaces, or neither type is.
|
||||
// If both are interfaces, they have the same methods.
|
||||
// If we have inexact unification and one of x or y is a defined type, select the
|
||||
// defined type. This ensures that in a series of types, all matching against the
|
||||
// same type parameter, we infer a defined type if there is one, independent of
|
||||
// order. Type inference or assignment may fail, which is ok.
|
||||
// Selecting a defined type, if any, ensures that we don't lose the type name;
|
||||
// and since we have inexact unification, a value of equally named or matching
|
||||
// undefined type remains assignable (go.dev/issue/43056).
|
||||
//
|
||||
// Note: Changing the recorded type for a type parameter to
|
||||
// a defined type is only ok when unification is inexact.
|
||||
// But in exact unification, if we have a match, x and y must
|
||||
// be identical, so changing the recorded type for x is a no-op.
|
||||
if yn {
|
||||
u.set(px, y)
|
||||
// Similarly, if we have inexact unification and there are no defined types but
|
||||
// channel types, select a directed channel, if any. This ensures that in a series
|
||||
// of unnamed types, all matching against the same type parameter, we infer the
|
||||
// directed channel if there is one, independent of order.
|
||||
// Selecting a directional channel, if any, ensures that a value of another
|
||||
// inexactly unifying channel type remains assignable (go.dev/issue/62157).
|
||||
//
|
||||
// If we have multiple defined channel types, they are either identical or we
|
||||
// have assignment conflicts, so we can ignore directionality in this case.
|
||||
//
|
||||
// If we have defined and literal channel types, a defined type wins to avoid
|
||||
// order dependencies.
|
||||
if mode&exact == 0 {
|
||||
switch {
|
||||
case xn:
|
||||
// x is a defined type: nothing to do.
|
||||
case yn:
|
||||
// x is not a defined type and y is a defined type: select y.
|
||||
u.set(px, y)
|
||||
default:
|
||||
// Neither x nor y are defined types.
|
||||
if yc, _ := under(y).(*Chan); yc != nil && yc.dir != SendRecv {
|
||||
// y is a directed channel type: select y.
|
||||
u.set(px, y)
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
@@ -437,12 +455,12 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
|
||||
emode |= exact
|
||||
}
|
||||
|
||||
// If EnableInterfaceInference is set and we don't require exact unification,
|
||||
// If u.EnableInterfaceInference is set and we don't require exact unification,
|
||||
// if both types are interfaces, one interface must have a subset of the
|
||||
// methods of the other and corresponding method signatures must unify.
|
||||
// If only one type is an interface, all its methods must be present in the
|
||||
// other type and corresponding method signatures must unify.
|
||||
if enableInterfaceInference && mode&exact == 0 {
|
||||
if u.enableInterfaceInference && mode&exact == 0 {
|
||||
// One or both interfaces may be defined types.
|
||||
// Look under the name, but not under type parameters (go.dev/issue/60564).
|
||||
xi := asInterface(x)
|
||||
@@ -505,7 +523,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
|
||||
}
|
||||
// All xmethods must exist in ymethods and corresponding signatures must unify.
|
||||
for _, xm := range xmethods {
|
||||
if ym := ymap[xm.Id()]; ym == nil || !u.nify(xm.typ, ym.typ, emode, p) {
|
||||
if ym := ymap[xm.Id()]; ym == nil || !u.nify(xm.typ, ym.typ, exact, p) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -526,7 +544,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
|
||||
xmethods := xi.typeSet().methods
|
||||
for _, xm := range xmethods {
|
||||
obj, _, _ := LookupFieldOrMethod(y, false, xm.pkg, xm.name)
|
||||
if ym, _ := obj.(*Func); ym == nil || !u.nify(xm.typ, ym.typ, emode, p) {
|
||||
if ym, _ := obj.(*Func); ym == nil || !u.nify(xm.typ, ym.typ, exact, p) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -632,7 +650,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
|
||||
}
|
||||
|
||||
case *Interface:
|
||||
assert(!enableInterfaceInference || mode&exact != 0) // handled before this switch
|
||||
assert(!u.enableInterfaceInference || mode&exact != 0) // handled before this switch
|
||||
|
||||
// Two interface types unify if they have the same set of methods with
|
||||
// the same names, and corresponding function types unify.
|
||||
@@ -685,7 +703,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
|
||||
}
|
||||
for i, f := range a {
|
||||
g := b[i]
|
||||
if f.Id() != g.Id() || !u.nify(f.typ, g.typ, emode, q) {
|
||||
if f.Id() != g.Id() || !u.nify(f.typ, g.typ, exact, q) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -255,7 +255,10 @@ func walkLenCap(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
|
||||
return mkcall("countrunes", n.Type(), init, typecheck.Conv(n.X.(*ir.ConvExpr).X, types.Types[types.TSTRING]))
|
||||
}
|
||||
if isByteCount(n) {
|
||||
_, len := backingArrayPtrLen(cheapExpr(n.X.(*ir.ConvExpr).X, init))
|
||||
conv := n.X.(*ir.ConvExpr)
|
||||
walkStmtList(conv.Init())
|
||||
init.Append(ir.TakeInit(conv)...)
|
||||
_, len := backingArrayPtrLen(cheapExpr(conv.X, init))
|
||||
return len
|
||||
}
|
||||
|
||||
|
||||
@@ -278,8 +278,10 @@ func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) {
|
||||
} else {
|
||||
ptr.SetType(n.Type().Elem().PtrTo())
|
||||
}
|
||||
ptr.SetTypecheck(1)
|
||||
length = ir.NewUnaryExpr(base.Pos, ir.OLEN, n)
|
||||
length.SetType(types.Types[types.TINT])
|
||||
length.SetTypecheck(1)
|
||||
return ptr, length
|
||||
}
|
||||
|
||||
|
||||
30
src/cmd/dist/test.go
vendored
30
src/cmd/dist/test.go
vendored
@@ -91,6 +91,29 @@ type work struct {
|
||||
end chan bool
|
||||
}
|
||||
|
||||
// printSkip prints a skip message for all of work.
|
||||
func (w *work) printSkip(t *tester, msg string) {
|
||||
if t.json {
|
||||
type event struct {
|
||||
Time time.Time
|
||||
Action string
|
||||
Package string
|
||||
Output string `json:",omitempty"`
|
||||
}
|
||||
enc := json.NewEncoder(&w.out)
|
||||
ev := event{Time: time.Now(), Package: w.dt.name, Action: "start"}
|
||||
enc.Encode(ev)
|
||||
ev.Action = "output"
|
||||
ev.Output = msg
|
||||
enc.Encode(ev)
|
||||
ev.Action = "skip"
|
||||
ev.Output = ""
|
||||
enc.Encode(ev)
|
||||
return
|
||||
}
|
||||
fmt.Fprintln(&w.out, msg)
|
||||
}
|
||||
|
||||
// A distTest is a test run by dist test.
|
||||
// Each test has a unique name and belongs to a group (heading)
|
||||
type distTest struct {
|
||||
@@ -405,6 +428,9 @@ func (opts *goTest) buildArgs(t *tester) (build, run, pkgs, testFlags []string,
|
||||
if opts.timeout != 0 {
|
||||
d := opts.timeout * time.Duration(t.timeoutScale)
|
||||
run = append(run, "-timeout="+d.String())
|
||||
} else if t.timeoutScale != 1 {
|
||||
const goTestDefaultTimeout = 10 * time.Minute // Default value of go test -timeout flag.
|
||||
run = append(run, "-timeout="+(goTestDefaultTimeout*time.Duration(t.timeoutScale)).String())
|
||||
}
|
||||
if opts.short || t.short {
|
||||
run = append(run, "-short")
|
||||
@@ -1235,7 +1261,7 @@ func (t *tester) runPending(nextTest *distTest) {
|
||||
go func(w *work) {
|
||||
if !<-w.start {
|
||||
timelog("skip", w.dt.name)
|
||||
w.out.WriteString("skipped due to earlier error\n")
|
||||
w.printSkip(t, "skipped due to earlier error")
|
||||
} else {
|
||||
timelog("start", w.dt.name)
|
||||
w.err = w.cmd.Run()
|
||||
@@ -1246,7 +1272,7 @@ func (t *tester) runPending(nextTest *distTest) {
|
||||
if isUnsupportedVMASize(w) {
|
||||
timelog("skip", w.dt.name)
|
||||
w.out.Reset()
|
||||
w.out.WriteString("skipped due to unsupported VMA\n")
|
||||
w.printSkip(t, "skipped due to unsupported VMA")
|
||||
w.err = nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ type fileInfo struct {
|
||||
func (i fileInfo) Name() string { return path.Base(i.f.Name) }
|
||||
func (i fileInfo) ModTime() time.Time { return i.f.Time }
|
||||
func (i fileInfo) Mode() fs.FileMode { return i.f.Mode }
|
||||
func (i fileInfo) IsDir() bool { return false }
|
||||
func (i fileInfo) IsDir() bool { return i.f.Mode&fs.ModeDir != 0 }
|
||||
func (i fileInfo) Size() int64 { return i.f.Size }
|
||||
func (i fileInfo) Sys() any { return nil }
|
||||
|
||||
|
||||
@@ -329,8 +329,47 @@ func writeTgz(name string, a *Archive) {
|
||||
|
||||
zw := check(gzip.NewWriterLevel(out, gzip.BestCompression))
|
||||
tw := tar.NewWriter(zw)
|
||||
|
||||
// Find the mode and mtime to use for directory entries,
|
||||
// based on the mode and mtime of the first file we see.
|
||||
// We know that modes and mtimes are uniform across the archive.
|
||||
var dirMode fs.FileMode
|
||||
var mtime time.Time
|
||||
for _, f := range a.Files {
|
||||
dirMode = fs.ModeDir | f.Mode | (f.Mode&0444)>>2 // copy r bits down to x bits
|
||||
mtime = f.Time
|
||||
break
|
||||
}
|
||||
|
||||
// mkdirAll ensures that the tar file contains directory
|
||||
// entries for dir and all its parents. Some programs reading
|
||||
// these tar files expect that. See go.dev/issue/61862.
|
||||
haveDir := map[string]bool{".": true}
|
||||
var mkdirAll func(string)
|
||||
mkdirAll = func(dir string) {
|
||||
if dir == "/" {
|
||||
panic("mkdirAll /")
|
||||
}
|
||||
if haveDir[dir] {
|
||||
return
|
||||
}
|
||||
haveDir[dir] = true
|
||||
mkdirAll(path.Dir(dir))
|
||||
df := &File{
|
||||
Name: dir + "/",
|
||||
Time: mtime,
|
||||
Mode: dirMode,
|
||||
}
|
||||
h := check(tar.FileInfoHeader(df.Info(), ""))
|
||||
h.Name = dir + "/"
|
||||
if err := tw.WriteHeader(h); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, f = range a.Files {
|
||||
h := check(tar.FileInfoHeader(f.Info(), ""))
|
||||
mkdirAll(path.Dir(f.Name))
|
||||
h.Name = f.Name
|
||||
if err := tw.WriteHeader(h); err != nil {
|
||||
panic(err)
|
||||
|
||||
@@ -490,25 +490,43 @@ func findGOROOT(env string) string {
|
||||
// depend on the executable's location.
|
||||
return def
|
||||
}
|
||||
|
||||
// canonical returns a directory path that represents
|
||||
// the same directory as dir,
|
||||
// preferring the spelling in def if the two are the same.
|
||||
canonical := func(dir string) string {
|
||||
if isSameDir(def, dir) {
|
||||
return def
|
||||
}
|
||||
return dir
|
||||
}
|
||||
|
||||
exe, err := os.Executable()
|
||||
if err == nil {
|
||||
exe, err = filepath.Abs(exe)
|
||||
if err == nil {
|
||||
// cmd/go may be installed in GOROOT/bin or GOROOT/bin/GOOS_GOARCH,
|
||||
// depending on whether it was cross-compiled with a different
|
||||
// GOHOSTOS (see https://go.dev/issue/62119). Try both.
|
||||
if dir := filepath.Join(exe, "../.."); isGOROOT(dir) {
|
||||
// If def (runtime.GOROOT()) and dir are the same
|
||||
// directory, prefer the spelling used in def.
|
||||
if isSameDir(def, dir) {
|
||||
return def
|
||||
}
|
||||
return dir
|
||||
return canonical(dir)
|
||||
}
|
||||
if dir := filepath.Join(exe, "../../.."); isGOROOT(dir) {
|
||||
return canonical(dir)
|
||||
}
|
||||
|
||||
// Depending on what was passed on the command line, it is possible
|
||||
// that os.Executable is a symlink (like /usr/local/bin/go) referring
|
||||
// to a binary installed in a real GOROOT elsewhere
|
||||
// (like /usr/lib/go/bin/go).
|
||||
// Try to find that GOROOT by resolving the symlinks.
|
||||
exe, err = filepath.EvalSymlinks(exe)
|
||||
if err == nil {
|
||||
if dir := filepath.Join(exe, "../.."); isGOROOT(dir) {
|
||||
if isSameDir(def, dir) {
|
||||
return def
|
||||
}
|
||||
return dir
|
||||
return canonical(dir)
|
||||
}
|
||||
if dir := filepath.Join(exe, "../../.."); isGOROOT(dir) {
|
||||
return canonical(dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,6 +22,13 @@ import (
|
||||
// FromToolchain("go1.2.3-bigcorp") == "1.2.3"
|
||||
// FromToolchain("invalid") == ""
|
||||
func FromToolchain(name string) string {
|
||||
if strings.ContainsAny(name, "\\/") {
|
||||
// The suffix must not include a path separator, since that would cause
|
||||
// exec.LookPath to resolve it from a relative directory instead of from
|
||||
// $PATH.
|
||||
return ""
|
||||
}
|
||||
|
||||
var v string
|
||||
if strings.HasPrefix(name, "go") {
|
||||
v = name[2:]
|
||||
|
||||
@@ -959,7 +959,10 @@ func collectDepsErrors(p *load.Package) {
|
||||
if len(stkj) != 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
return p.DepsErrors[i].Err.Error() < p.DepsErrors[j].Err.Error()
|
||||
} else if len(stkj) == 0 {
|
||||
return false
|
||||
}
|
||||
pathi, pathj := stki[len(stki)-1], stkj[len(stkj)-1]
|
||||
return pathi < pathj
|
||||
|
||||
@@ -473,6 +473,7 @@ func recompileForTest(pmain, preal, ptest, pxtest *Package) *PackageError {
|
||||
p.Target = ""
|
||||
p.Internal.BuildInfo = nil
|
||||
p.Internal.ForceLibrary = true
|
||||
p.Internal.PGOProfile = preal.Internal.PGOProfile
|
||||
}
|
||||
|
||||
// Update p.Internal.Imports to use test copies.
|
||||
@@ -496,6 +497,11 @@ func recompileForTest(pmain, preal, ptest, pxtest *Package) *PackageError {
|
||||
if p.Name == "main" && p != pmain && p != ptest {
|
||||
split()
|
||||
}
|
||||
// Split and attach PGO information to test dependencies if preal
|
||||
// is built with PGO.
|
||||
if preal.Internal.PGOProfile != "" && p.Internal.PGOProfile == "" {
|
||||
split()
|
||||
}
|
||||
}
|
||||
|
||||
// Do search to find cycle.
|
||||
|
||||
@@ -110,7 +110,13 @@ func ListModules(ctx context.Context, args []string, mode ListMode, reuseFile st
|
||||
|
||||
if err == nil {
|
||||
requirements = rs
|
||||
if !ExplicitWriteGoMod {
|
||||
// TODO(#61605): The extra ListU clause fixes a problem with Go 1.21rc3
|
||||
// where "go mod tidy" and "go list -m -u all" fight over whether the go.sum
|
||||
// should be considered up-to-date. The fix for now is to always treat the
|
||||
// go.sum as up-to-date during list -m -u. Probably the right fix is more targeted,
|
||||
// but in general list -u is looking up other checksums in the checksum database
|
||||
// that won't be necessary later, so it makes sense not to write the go.sum back out.
|
||||
if !ExplicitWriteGoMod && mode&ListU == 0 {
|
||||
err = commitRequirements(ctx, WriteOpts{})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1363,65 +1363,87 @@ func (r *runTestActor) Act(b *work.Builder, ctx context.Context, a *work.Action)
|
||||
ctx, cancel := context.WithTimeout(ctx, testKillTimeout)
|
||||
defer cancel()
|
||||
|
||||
cmd := exec.CommandContext(ctx, args[0], args[1:]...)
|
||||
cmd.Dir = a.Package.Dir
|
||||
|
||||
env := slices.Clip(cfg.OrigEnv)
|
||||
env = base.AppendPATH(env)
|
||||
env = base.AppendPWD(env, cmd.Dir)
|
||||
cmd.Env = env
|
||||
if addToEnv != "" {
|
||||
cmd.Env = append(cmd.Env, addToEnv)
|
||||
}
|
||||
|
||||
cmd.Stdout = stdout
|
||||
cmd.Stderr = stdout
|
||||
|
||||
// If there are any local SWIG dependencies, we want to load
|
||||
// the shared library from the build directory.
|
||||
if a.Package.UsesSwig() {
|
||||
env := cmd.Env
|
||||
found := false
|
||||
prefix := "LD_LIBRARY_PATH="
|
||||
for i, v := range env {
|
||||
if strings.HasPrefix(v, prefix) {
|
||||
env[i] = v + ":."
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
env = append(env, "LD_LIBRARY_PATH=.")
|
||||
}
|
||||
cmd.Env = env
|
||||
}
|
||||
// Now we're ready to actually run the command.
|
||||
//
|
||||
// If the -o flag is set, or if at some point we change cmd/go to start
|
||||
// copying test executables into the build cache, we may run into spurious
|
||||
// ETXTBSY errors on Unix platforms (see https://go.dev/issue/22315).
|
||||
//
|
||||
// Since we know what causes those, and we know that they should resolve
|
||||
// quickly (the ETXTBSY error will resolve as soon as the subprocess
|
||||
// holding the descriptor open reaches its 'exec' call), we retry them
|
||||
// in a loop.
|
||||
|
||||
var (
|
||||
cmd *exec.Cmd
|
||||
t0 time.Time
|
||||
cancelKilled = false
|
||||
cancelSignaled = false
|
||||
)
|
||||
cmd.Cancel = func() error {
|
||||
if base.SignalTrace == nil {
|
||||
err := cmd.Process.Kill()
|
||||
for {
|
||||
cmd = exec.CommandContext(ctx, args[0], args[1:]...)
|
||||
cmd.Dir = a.Package.Dir
|
||||
|
||||
env := slices.Clip(cfg.OrigEnv)
|
||||
env = base.AppendPATH(env)
|
||||
env = base.AppendPWD(env, cmd.Dir)
|
||||
cmd.Env = env
|
||||
if addToEnv != "" {
|
||||
cmd.Env = append(cmd.Env, addToEnv)
|
||||
}
|
||||
|
||||
cmd.Stdout = stdout
|
||||
cmd.Stderr = stdout
|
||||
|
||||
// If there are any local SWIG dependencies, we want to load
|
||||
// the shared library from the build directory.
|
||||
if a.Package.UsesSwig() {
|
||||
env := cmd.Env
|
||||
found := false
|
||||
prefix := "LD_LIBRARY_PATH="
|
||||
for i, v := range env {
|
||||
if strings.HasPrefix(v, prefix) {
|
||||
env[i] = v + ":."
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
env = append(env, "LD_LIBRARY_PATH=.")
|
||||
}
|
||||
cmd.Env = env
|
||||
}
|
||||
|
||||
cmd.Cancel = func() error {
|
||||
if base.SignalTrace == nil {
|
||||
err := cmd.Process.Kill()
|
||||
if err == nil {
|
||||
cancelKilled = true
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Send a quit signal in the hope that the program will print
|
||||
// a stack trace and exit.
|
||||
err := cmd.Process.Signal(base.SignalTrace)
|
||||
if err == nil {
|
||||
cancelKilled = true
|
||||
cancelSignaled = true
|
||||
}
|
||||
return err
|
||||
}
|
||||
cmd.WaitDelay = testWaitDelay
|
||||
|
||||
// Send a quit signal in the hope that the program will print
|
||||
// a stack trace and exit.
|
||||
err := cmd.Process.Signal(base.SignalTrace)
|
||||
if err == nil {
|
||||
cancelSignaled = true
|
||||
base.StartSigHandlers()
|
||||
t0 = time.Now()
|
||||
err = cmd.Run()
|
||||
|
||||
if !isETXTBSY(err) {
|
||||
// We didn't hit the race in #22315, so there is no reason to retry the
|
||||
// command.
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
cmd.WaitDelay = testWaitDelay
|
||||
|
||||
base.StartSigHandlers()
|
||||
t0 := time.Now()
|
||||
err = cmd.Run()
|
||||
out := buf.Bytes()
|
||||
a.TestOutput = &buf
|
||||
t := fmt.Sprintf("%.3fs", time.Since(t0).Seconds())
|
||||
|
||||
12
src/cmd/go/internal/test/test_nonunix.go
Normal file
12
src/cmd/go/internal/test/test_nonunix.go
Normal file
@@ -0,0 +1,12 @@
|
||||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !unix
|
||||
|
||||
package test
|
||||
|
||||
func isETXTBSY(err error) bool {
|
||||
// syscall.ETXTBSY is only meaningful on Unix platforms.
|
||||
return false
|
||||
}
|
||||
16
src/cmd/go/internal/test/test_unix.go
Normal file
16
src/cmd/go/internal/test/test_unix.go
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build unix
|
||||
|
||||
package test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func isETXTBSY(err error) bool {
|
||||
return errors.Is(err, syscall.ETXTBSY)
|
||||
}
|
||||
@@ -61,7 +61,7 @@ func init() {
|
||||
cf.String("run", "", "")
|
||||
cf.Bool("short", false, "")
|
||||
cf.String("skip", "", "")
|
||||
cf.DurationVar(&testTimeout, "timeout", 10*time.Minute, "")
|
||||
cf.DurationVar(&testTimeout, "timeout", 10*time.Minute, "") // known to cmd/dist
|
||||
cf.String("fuzztime", "", "")
|
||||
cf.String("fuzzminimizetime", "", "")
|
||||
cf.StringVar(&testTrace, "trace", "", "")
|
||||
|
||||
@@ -212,16 +212,22 @@ func get(security SecurityMode, url *urlpkg.URL) (*Response, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if res == nil || res.Body == nil {
|
||||
if err != nil {
|
||||
// Per the docs for [net/http.Client.Do], “On error, any Response can be
|
||||
// ignored. A non-nil Response with a non-nil error only occurs when
|
||||
// CheckRedirect fails, and even then the returned Response.Body is
|
||||
// already closed.”
|
||||
release()
|
||||
} else {
|
||||
body := res.Body
|
||||
res.Body = hookCloser{
|
||||
ReadCloser: body,
|
||||
afterClose: release,
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// “If the returned error is nil, the Response will contain a non-nil Body
|
||||
// which the user is expected to close.”
|
||||
body := res.Body
|
||||
res.Body = hookCloser{
|
||||
ReadCloser: body,
|
||||
afterClose: release,
|
||||
}
|
||||
return url, res, err
|
||||
}
|
||||
|
||||
|
||||
@@ -175,7 +175,11 @@ func main() {
|
||||
if used > 0 {
|
||||
helpArg += " " + strings.Join(args[:used], " ")
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "go %s: unknown command\nRun 'go help%s' for usage.\n", cfg.CmdName, helpArg)
|
||||
cmdName := cfg.CmdName
|
||||
if cmdName == "" {
|
||||
cmdName = args[0]
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "go %s: unknown command\nRun 'go help%s' for usage.\n", cmdName, helpArg)
|
||||
base.SetExitStatus(2)
|
||||
base.Exit()
|
||||
}
|
||||
|
||||
@@ -45,6 +45,12 @@ stderr 'compile.*-pgoprofile=.*b(/|\\\\)default\.pgo.*b(/|\\\\)b_test\.go'
|
||||
stderr 'compile.*-pgoprofile=.*b(/|\\\\)default\.pgo.*dep(/|\\\\)dep\.go'
|
||||
! stderr 'compile.*-pgoprofile=.*nopgo(/|\\\\)nopgo_test\.go'
|
||||
|
||||
# test-only dependencies also have profiles attached
|
||||
stderr 'compile.*-pgoprofile=.*a(/|\\\\)default\.pgo.*testdep(/|\\\\)testdep\.go'
|
||||
stderr 'compile.*-pgoprofile=.*b(/|\\\\)default\.pgo.*testdep(/|\\\\)testdep\.go'
|
||||
stderr 'compile.*-pgoprofile=.*a(/|\\\\)default\.pgo.*testdep2(/|\\\\)testdep2\.go'
|
||||
stderr 'compile.*-pgoprofile=.*b(/|\\\\)default\.pgo.*testdep2(/|\\\\)testdep2\.go'
|
||||
|
||||
# go list -deps prints packages built multiple times.
|
||||
go list -pgo=auto -deps ./a ./b ./nopgo
|
||||
stdout 'test/dep \[test/a\]'
|
||||
@@ -66,6 +72,7 @@ func main() {}
|
||||
-- a/a_test.go --
|
||||
package main
|
||||
import "testing"
|
||||
import _ "test/testdep"
|
||||
func TestA(*testing.T) {}
|
||||
-- a/default.pgo --
|
||||
-- b/b.go --
|
||||
@@ -76,6 +83,7 @@ func main() {}
|
||||
-- b/b_test.go --
|
||||
package main
|
||||
import "testing"
|
||||
import _ "test/testdep"
|
||||
func TestB(*testing.T) {}
|
||||
-- b/default.pgo --
|
||||
-- nopgo/nopgo.go --
|
||||
@@ -94,3 +102,8 @@ import _ "test/dep3"
|
||||
package dep2
|
||||
-- dep3/dep3.go --
|
||||
package dep3
|
||||
-- testdep/testdep.go --
|
||||
package testdep
|
||||
import _ "test/testdep2"
|
||||
-- testdep2/testdep2.go --
|
||||
package testdep2
|
||||
|
||||
2
src/cmd/go/testdata/script/go_badcmd.txt
vendored
Normal file
2
src/cmd/go/testdata/script/go_badcmd.txt
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
! go asdf
|
||||
stderr '^go asdf: unknown command'
|
||||
@@ -1,4 +1,5 @@
|
||||
[compiler:gccgo] skip
|
||||
[short] skip 'builds and links another cmd/go'
|
||||
|
||||
mkdir $WORK/new/bin
|
||||
|
||||
@@ -9,15 +10,18 @@ mkdir $WORK/new/bin
|
||||
# new cmd/go is built.
|
||||
env GOROOT_FINAL=
|
||||
|
||||
# $GOROOT/bin/go is whatever the user has already installed
|
||||
# (using make.bash or similar). We can't make assumptions about what
|
||||
# options it may have been built with, such as -trimpath or GOROOT_FINAL.
|
||||
# Instead, we build a fresh copy of the binary with known settings.
|
||||
go build -o $WORK/new/bin/go$GOEXE cmd/go &
|
||||
go build -o $WORK/bin/check$GOEXE check.go &
|
||||
go build -trimpath -o $WORK/bin/check$GOEXE check.go &
|
||||
wait
|
||||
|
||||
env TESTGOROOT=$GOROOT
|
||||
env GOROOT=
|
||||
|
||||
# Relocated Executable
|
||||
# cp $TESTGOROOT/bin/go$GOEXE $WORK/new/bin/go$GOEXE
|
||||
exec $WORK/bin/check$GOEXE $WORK/new/bin/go$GOEXE $TESTGOROOT
|
||||
|
||||
# Relocated Tree:
|
||||
|
||||
91
src/cmd/go/testdata/script/goroot_executable_trimpath.txt
vendored
Normal file
91
src/cmd/go/testdata/script/goroot_executable_trimpath.txt
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
# Regression test for https://go.dev/issue/62119:
|
||||
# A 'go' command cross-compiled with a different GOHOSTOS
|
||||
# should be able to locate its GOROOT using os.Executable.
|
||||
#
|
||||
# (This also tests a 'go' command built with -trimpath
|
||||
# that is not cross-compiled, since we need to build that
|
||||
# configuration for the test anyway.)
|
||||
|
||||
[short] skip 'builds and links another cmd/go'
|
||||
|
||||
mkdir $WORK/new/bin
|
||||
mkdir $WORK/new/bin/${GOOS}_${GOARCH}
|
||||
|
||||
# In this test, we are specifically checking the logic for deriving
|
||||
# the value of GOROOT from os.Executable when runtime.GOROOT is
|
||||
# trimmed away.
|
||||
# GOROOT_FINAL changes the default behavior of runtime.GOROOT,
|
||||
# so we explicitly clear it to remove it as a confounding variable.
|
||||
env GOROOT_FINAL=
|
||||
|
||||
# $GOROOT/bin/go is whatever the user has already installed
|
||||
# (using make.bash or similar). We can't make assumptions about what
|
||||
# options it may have been built with, such as -trimpath or GOROOT_FINAL.
|
||||
# Instead, we build a fresh copy of the binary with known settings.
|
||||
go build -trimpath -o $WORK/new/bin/go$GOEXE cmd/go &
|
||||
go build -trimpath -o $WORK/bin/check$GOEXE check.go &
|
||||
wait
|
||||
|
||||
env TESTGOROOT=$GOROOT
|
||||
env GOROOT=
|
||||
|
||||
# Relocated Executable
|
||||
# Since we built with -trimpath and the binary isn't installed in a
|
||||
# normal-looking GOROOT, this command should fail.
|
||||
|
||||
! exec $WORK/new/bin/go$GOEXE env GOROOT
|
||||
stderr '^go: cannot find GOROOT directory: ''go'' binary is trimmed and GOROOT is not set$'
|
||||
|
||||
# Cross-compiled binaries in cmd are installed to a ${GOOS}_${GOARCH} subdirectory,
|
||||
# so we also want to try a copy there.
|
||||
# (Note that the script engine's 'exec' engine already works around
|
||||
# https://go.dev/issue/22315, so we don't have to do that explicitly in the
|
||||
# 'check' program we use later.)
|
||||
cp $WORK/new/bin/go$GOEXE $WORK/new/bin/${GOOS}_${GOARCH}/go$GOEXE
|
||||
! exec $WORK/new/bin/${GOOS}_${GOARCH}/go$GOEXE env GOROOT
|
||||
stderr '^go: cannot find GOROOT directory: ''go'' binary is trimmed and GOROOT is not set$'
|
||||
|
||||
# Relocated Tree:
|
||||
# If the binary is sitting in a bin dir next to ../pkg/tool, that counts as a GOROOT,
|
||||
# so it should find the new tree.
|
||||
mkdir $WORK/new/pkg/tool
|
||||
exec $WORK/bin/check$GOEXE $WORK/new/bin/go$GOEXE $WORK/new
|
||||
exec $WORK/bin/check$GOEXE $WORK/new/bin/${GOOS}_${GOARCH}/go$GOEXE $WORK/new
|
||||
|
||||
-- check.go --
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func main() {
|
||||
exe := os.Args[1]
|
||||
want := os.Args[2]
|
||||
cmd := exec.Command(exe, "env", "GOROOT")
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%s env GOROOT: %v, %s\n", exe, err, out)
|
||||
os.Exit(1)
|
||||
}
|
||||
goroot, err := filepath.EvalSymlinks(strings.TrimSpace(string(out)))
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
want, err = filepath.EvalSymlinks(want)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if !strings.EqualFold(goroot, want) {
|
||||
fmt.Fprintf(os.Stderr, "go env GOROOT:\nhave %s\nwant %s\n", goroot, want)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "go env GOROOT: %s\n", goroot)
|
||||
|
||||
}
|
||||
26
src/cmd/go/testdata/script/list_issue_59905.txt
vendored
26
src/cmd/go/testdata/script/list_issue_59905.txt
vendored
@@ -1,8 +1,13 @@
|
||||
# Expect no panic
|
||||
go list -f '{{if .DepsErrors}}{{.DepsErrors}}{{end}}' -export -e -deps
|
||||
cmpenv stdout wanterr
|
||||
cmpenv stdout wanterr_59905
|
||||
|
||||
-- wanterr --
|
||||
# Expect no panic (Issue 61816)
|
||||
cp level1b_61816.txt level1b/pkg.go
|
||||
go list -f '{{if .DepsErrors}}{{.DepsErrors}}{{end}}' -export -e -deps
|
||||
cmpenv stdout wanterr_61816
|
||||
|
||||
-- wanterr_59905 --
|
||||
[# test/main/level1a
|
||||
level1a${/}pkg.go:5:2: level2x redeclared in this block
|
||||
level1a${/}pkg.go:4:2: other declaration of level2x
|
||||
@@ -14,6 +19,23 @@ level1b${/}pkg.go:5:2: level2x redeclared in this block
|
||||
level1b${/}pkg.go:5:2: "test/main/level1b/level2y" imported as level2x and not used
|
||||
level1b${/}pkg.go:8:39: undefined: level2y
|
||||
]
|
||||
-- wanterr_61816 --
|
||||
[level1b${/}pkg.go:4:2: package foo is not in std ($GOROOT${/}src${/}foo)]
|
||||
[# test/main/level1a
|
||||
level1a${/}pkg.go:5:2: level2x redeclared in this block
|
||||
level1a${/}pkg.go:4:2: other declaration of level2x
|
||||
level1a${/}pkg.go:5:2: "test/main/level1a/level2y" imported as level2x and not used
|
||||
level1a${/}pkg.go:8:39: undefined: level2y
|
||||
level1b${/}pkg.go:4:2: package foo is not in std ($GOROOT${/}src${/}foo)]
|
||||
-- level1b_61816.txt --
|
||||
package level1b
|
||||
|
||||
import (
|
||||
"foo"
|
||||
)
|
||||
|
||||
func Print() { println(level2x.Value, level2y.Value) }
|
||||
|
||||
-- go.mod --
|
||||
module test/main
|
||||
|
||||
|
||||
19
src/cmd/go/testdata/script/mod_get_insecure_redirect.txt
vendored
Normal file
19
src/cmd/go/testdata/script/mod_get_insecure_redirect.txt
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
# golang.org/issue/29591: 'go get' was following plain-HTTP redirects even without -insecure (now replaced by GOINSECURE).
|
||||
# golang.org/issue/61877: 'go get' would panic in case of an insecure redirect in module mode
|
||||
|
||||
[!git] skip
|
||||
|
||||
env GOPRIVATE=vcs-test.golang.org
|
||||
|
||||
! go get -d vcs-test.golang.org/insecure/go/insecure
|
||||
stderr 'redirected .* to insecure URL'
|
||||
|
||||
[short] stop 'builds a git repo'
|
||||
|
||||
env GOINSECURE=vcs-test.golang.org/insecure/go/insecure
|
||||
go get -d vcs-test.golang.org/insecure/go/insecure
|
||||
|
||||
-- go.mod --
|
||||
module example
|
||||
go 1.21
|
||||
|
||||
32
src/cmd/go/testdata/script/mod_toolchain_slash.txt
vendored
Normal file
32
src/cmd/go/testdata/script/mod_toolchain_slash.txt
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
[!exec:/bin/sh] skip
|
||||
|
||||
chmod 0777 go1.999999-/run.sh
|
||||
chmod 0777 run.sh
|
||||
|
||||
! go list all
|
||||
! stdout 'RAN SCRIPT'
|
||||
|
||||
cd subdir
|
||||
! go list all
|
||||
! stdout 'RAN SCRIPT'
|
||||
|
||||
-- go.mod --
|
||||
module exploit
|
||||
|
||||
go 1.21
|
||||
toolchain go1.999999-/run.sh
|
||||
-- go1.999999-/run.sh --
|
||||
#!/bin/sh
|
||||
printf 'RAN SCRIPT\n'
|
||||
exit 1
|
||||
-- run.sh --
|
||||
#!/bin/sh
|
||||
printf 'RAN SCRIPT\n'
|
||||
exit 1
|
||||
-- subdir/go.mod --
|
||||
module exploit
|
||||
|
||||
go 1.21
|
||||
toolchain go1.999999-/../../run.sh
|
||||
-- subdir/go1.999999-/README.txt --
|
||||
heh heh heh
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
# Verify test -c can output multiple executables to a directory.
|
||||
|
||||
# This test also serves as a regression test for https://go.dev/issue/62221:
|
||||
# prior to the fix for that issue, it occasionally failed with ETXTBSY when
|
||||
# run on Unix platforms.
|
||||
|
||||
go test -c -o $WORK/some/nonexisting/directory/ ./pkg/...
|
||||
exists -exec $WORK/some/nonexisting/directory/pkg1.test$GOEXE
|
||||
exists -exec $WORK/some/nonexisting/directory/pkg2.test$GOEXE
|
||||
@@ -43,4 +47,4 @@ package pkg1
|
||||
package pkg2
|
||||
|
||||
-- anotherpkg/pkg1/pkg1_test.go --
|
||||
package pkg1
|
||||
package pkg1
|
||||
|
||||
@@ -2067,17 +2067,22 @@ func instructionsForProg(p *obj.Prog) []*instruction {
|
||||
return instructionsForStore(p, ins.as, p.To.Reg)
|
||||
|
||||
case ALRW, ALRD:
|
||||
// Set aq to use acquire access ordering, which matches Go's memory requirements.
|
||||
// Set aq to use acquire access ordering
|
||||
ins.funct7 = 2
|
||||
ins.rs1, ins.rs2 = uint32(p.From.Reg), REG_ZERO
|
||||
|
||||
case AADDI, AANDI, AORI, AXORI:
|
||||
inss = instructionsForOpImmediate(p, ins.as, p.Reg)
|
||||
|
||||
case ASCW, ASCD, AAMOSWAPW, AAMOSWAPD, AAMOADDW, AAMOADDD, AAMOANDW, AAMOANDD, AAMOORW, AAMOORD,
|
||||
case ASCW, ASCD:
|
||||
// Set release access ordering
|
||||
ins.funct7 = 1
|
||||
ins.rd, ins.rs1, ins.rs2 = uint32(p.RegTo2), uint32(p.To.Reg), uint32(p.From.Reg)
|
||||
|
||||
case AAMOSWAPW, AAMOSWAPD, AAMOADDW, AAMOADDD, AAMOANDW, AAMOANDD, AAMOORW, AAMOORD,
|
||||
AAMOXORW, AAMOXORD, AAMOMINW, AAMOMIND, AAMOMINUW, AAMOMINUD, AAMOMAXW, AAMOMAXD, AAMOMAXUW, AAMOMAXUD:
|
||||
// Set aq to use acquire access ordering, which matches Go's memory requirements.
|
||||
ins.funct7 = 2
|
||||
// Set aqrl to use acquire & release access ordering
|
||||
ins.funct7 = 3
|
||||
ins.rd, ins.rs1, ins.rs2 = uint32(p.RegTo2), uint32(p.To.Reg), uint32(p.From.Reg)
|
||||
|
||||
case AECALL, AEBREAK, ARDCYCLE, ARDTIME, ARDINSTRET:
|
||||
|
||||
@@ -446,7 +446,7 @@ func machoreloc1(arch *sys.Arch, out *ld.OutBuf, ldr *loader.Loader, s loader.Sy
|
||||
rs := r.Xsym
|
||||
rt := r.Type
|
||||
|
||||
if ldr.SymType(rs) == sym.SHOSTOBJ || rt == objabi.R_PCREL || rt == objabi.R_GOTPCREL || rt == objabi.R_CALL {
|
||||
if rt == objabi.R_PCREL || rt == objabi.R_GOTPCREL || rt == objabi.R_CALL || ldr.SymType(rs) == sym.SHOSTOBJ || ldr.SymType(s) == sym.SINITARR {
|
||||
if ldr.SymDynid(rs) < 0 {
|
||||
ldr.Errorf(s, "reloc %d (%s) to non-macho symbol %s type=%d (%s)", rt, sym.RelocName(arch, rt), ldr.SymName(rs), ldr.SymType(rs), ldr.SymType(rs))
|
||||
return false
|
||||
|
||||
@@ -545,10 +545,11 @@ func machoreloc1(arch *sys.Arch, out *ld.OutBuf, ldr *loader.Loader, s loader.Sy
|
||||
}
|
||||
}
|
||||
|
||||
if ldr.SymType(rs) == sym.SHOSTOBJ || rt == objabi.R_CALLARM64 ||
|
||||
if rt == objabi.R_CALLARM64 ||
|
||||
rt == objabi.R_ARM64_PCREL_LDST8 || rt == objabi.R_ARM64_PCREL_LDST16 ||
|
||||
rt == objabi.R_ARM64_PCREL_LDST32 || rt == objabi.R_ARM64_PCREL_LDST64 ||
|
||||
rt == objabi.R_ADDRARM64 || rt == objabi.R_ARM64_GOTPCREL {
|
||||
rt == objabi.R_ADDRARM64 || rt == objabi.R_ARM64_GOTPCREL ||
|
||||
ldr.SymType(rs) == sym.SHOSTOBJ || ldr.SymType(s) == sym.SINITARR {
|
||||
if ldr.SymDynid(rs) < 0 {
|
||||
ldr.Errorf(s, "reloc %d (%s) to non-macho symbol %s type=%d (%s)", rt, sym.RelocName(arch, rt), ldr.SymName(rs), ldr.SymType(rs), ldr.SymType(rs))
|
||||
return false
|
||||
|
||||
@@ -368,7 +368,9 @@ func (st *relocSymState) relocsym(s loader.Sym, P []byte) {
|
||||
o = 0
|
||||
}
|
||||
} else if target.IsDarwin() {
|
||||
if ldr.SymType(rs) != sym.SHOSTOBJ {
|
||||
if ldr.SymType(rs) != sym.SHOSTOBJ && ldr.SymType(s) != sym.SINITARR {
|
||||
// ld-prime drops the offset in data for SINITARR. We need to use
|
||||
// symbol-targeted relocation. See also machoreloc1.
|
||||
o += ldr.SymValue(rs)
|
||||
}
|
||||
} else if target.IsWindows() {
|
||||
|
||||
@@ -992,6 +992,11 @@ func typeSymbolMangle(name string) string {
|
||||
if strings.HasPrefix(name, "type:runtime.") {
|
||||
return name
|
||||
}
|
||||
if strings.HasPrefix(name, "go:string.") {
|
||||
// String symbols will be grouped to a single go:string.* symbol.
|
||||
// No need to mangle individual symbol names.
|
||||
return name
|
||||
}
|
||||
if len(name) <= 14 && !strings.Contains(name, "@") { // Issue 19529
|
||||
return name
|
||||
}
|
||||
@@ -1006,7 +1011,7 @@ func typeSymbolMangle(name string) string {
|
||||
// instantiated symbol, replace type name in []
|
||||
i := strings.IndexByte(name, '[')
|
||||
j := strings.LastIndexByte(name, ']')
|
||||
if j == -1 {
|
||||
if j == -1 || j <= i {
|
||||
j = len(name)
|
||||
}
|
||||
hash := notsha256.Sum256([]byte(name[i+1 : j]))
|
||||
|
||||
@@ -833,9 +833,9 @@ func asmbMacho(ctxt *Link) {
|
||||
ml.data[2] = uint32(linkoff + s1 + s2 + s3 + s4 + s5) /* stroff */
|
||||
ml.data[3] = uint32(s6) /* strsize */
|
||||
|
||||
machodysymtab(ctxt, linkoff+s1+s2)
|
||||
|
||||
if ctxt.LinkMode != LinkExternal {
|
||||
machodysymtab(ctxt, linkoff+s1+s2)
|
||||
|
||||
ml := newMachoLoad(ctxt.Arch, LC_LOAD_DYLINKER, 6)
|
||||
ml.data[0] = 12 /* offset to string */
|
||||
stringtouint32(ml.data[1:], "/usr/lib/dyld")
|
||||
|
||||
@@ -125,25 +125,64 @@ func ExampleWithValue() {
|
||||
// This example uses AfterFunc to define a function which waits on a sync.Cond,
|
||||
// stopping the wait when a context is canceled.
|
||||
func ExampleAfterFunc_cond() {
|
||||
waitOnCond := func(ctx context.Context, cond *sync.Cond) error {
|
||||
stopf := context.AfterFunc(ctx, cond.Broadcast)
|
||||
waitOnCond := func(ctx context.Context, cond *sync.Cond, conditionMet func() bool) error {
|
||||
stopf := context.AfterFunc(ctx, func() {
|
||||
// We need to acquire cond.L here to be sure that the Broadcast
|
||||
// below won't occur before the call to Wait, which would result
|
||||
// in a missed signal (and deadlock).
|
||||
cond.L.Lock()
|
||||
defer cond.L.Unlock()
|
||||
|
||||
// If multiple goroutines are waiting on cond simultaneously,
|
||||
// we need to make sure we wake up exactly this one.
|
||||
// That means that we need to Broadcast to all of the goroutines,
|
||||
// which will wake them all up.
|
||||
//
|
||||
// If there are N concurrent calls to waitOnCond, each of the goroutines
|
||||
// will spuriously wake up O(N) other goroutines that aren't ready yet,
|
||||
// so this will cause the overall CPU cost to be O(N²).
|
||||
cond.Broadcast()
|
||||
})
|
||||
defer stopf()
|
||||
cond.Wait()
|
||||
return ctx.Err()
|
||||
|
||||
// Since the wakeups are using Broadcast instead of Signal, this call to
|
||||
// Wait may unblock due to some other goroutine's context becoming done,
|
||||
// so to be sure that ctx is actually done we need to check it in a loop.
|
||||
for !conditionMet() {
|
||||
cond.Wait()
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Millisecond)
|
||||
defer cancel()
|
||||
cond := sync.NewCond(new(sync.Mutex))
|
||||
|
||||
var mu sync.Mutex
|
||||
cond := sync.NewCond(&mu)
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < 4; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
mu.Lock()
|
||||
err := waitOnCond(ctx, cond)
|
||||
fmt.Println(err)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
cond.L.Lock()
|
||||
defer cond.L.Unlock()
|
||||
|
||||
err := waitOnCond(ctx, cond, func() bool { return false })
|
||||
fmt.Println(err)
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Output:
|
||||
// context deadline exceeded
|
||||
// context deadline exceeded
|
||||
// context deadline exceeded
|
||||
// context deadline exceeded
|
||||
}
|
||||
|
||||
// This example uses AfterFunc to define a function which reads from a net.Conn,
|
||||
|
||||
@@ -1467,6 +1467,11 @@ func (c *Conn) closeNotify() error {
|
||||
//
|
||||
// For control over canceling or setting a timeout on a handshake, use
|
||||
// HandshakeContext or the Dialer's DialContext method instead.
|
||||
//
|
||||
// In order to avoid denial of service attacks, the maximum RSA key size allowed
|
||||
// in certificates sent by either the TLS server or client is limited to 8192
|
||||
// bits. This limit can be overridden by setting tlsmaxrsasize in the GODEBUG
|
||||
// environment variable (e.g. GODEBUG=tlsmaxrsasize=4096).
|
||||
func (c *Conn) Handshake() error {
|
||||
return c.HandshakeContext(context.Background())
|
||||
}
|
||||
|
||||
@@ -17,8 +17,10 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"internal/godebug"
|
||||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
@@ -936,6 +938,24 @@ func (hs *clientHandshakeState) sendFinished(out []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// defaultMaxRSAKeySize is the maximum RSA key size in bits that we are willing
|
||||
// to verify the signatures of during a TLS handshake.
|
||||
const defaultMaxRSAKeySize = 8192
|
||||
|
||||
var tlsmaxrsasize = godebug.New("tlsmaxrsasize")
|
||||
|
||||
func checkKeySize(n int) (max int, ok bool) {
|
||||
if v := tlsmaxrsasize.Value(); v != "" {
|
||||
if max, err := strconv.Atoi(v); err == nil {
|
||||
if (n <= max) != (n <= defaultMaxRSAKeySize) {
|
||||
tlsmaxrsasize.IncNonDefault()
|
||||
}
|
||||
return max, n <= max
|
||||
}
|
||||
}
|
||||
return defaultMaxRSAKeySize, n <= defaultMaxRSAKeySize
|
||||
}
|
||||
|
||||
// verifyServerCertificate parses and verifies the provided chain, setting
|
||||
// c.verifiedChains and c.peerCertificates or sending the appropriate alert.
|
||||
func (c *Conn) verifyServerCertificate(certificates [][]byte) error {
|
||||
@@ -947,6 +967,13 @@ func (c *Conn) verifyServerCertificate(certificates [][]byte) error {
|
||||
c.sendAlert(alertBadCertificate)
|
||||
return errors.New("tls: failed to parse certificate from server: " + err.Error())
|
||||
}
|
||||
if cert.cert.PublicKeyAlgorithm == x509.RSA {
|
||||
n := cert.cert.PublicKey.(*rsa.PublicKey).N.BitLen()
|
||||
if max, ok := checkKeySize(n); !ok {
|
||||
c.sendAlert(alertBadCertificate)
|
||||
return fmt.Errorf("tls: server sent certificate containing RSA key larger than %d bits", max)
|
||||
}
|
||||
}
|
||||
activeHandles[i] = cert
|
||||
certs[i] = cert.cert
|
||||
}
|
||||
|
||||
@@ -2721,3 +2721,106 @@ func testTLS13OnlyClientHelloCipherSuite(t *testing.T, ciphers []uint16) {
|
||||
t.Fatalf("handshake failed: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// discardConn wraps a net.Conn but discards all writes, but reports that they happened.
|
||||
type discardConn struct {
|
||||
net.Conn
|
||||
}
|
||||
|
||||
func (dc *discardConn) Write(data []byte) (int, error) {
|
||||
return len(data), nil
|
||||
}
|
||||
|
||||
// largeRSAKeyCertPEM contains a 8193 bit RSA key
|
||||
const largeRSAKeyCertPEM = `-----BEGIN CERTIFICATE-----
|
||||
MIIInjCCBIWgAwIBAgIBAjANBgkqhkiG9w0BAQsFADASMRAwDgYDVQQDEwd0ZXN0
|
||||
aW5nMB4XDTIzMDYwNzIxMjMzNloXDTIzMDYwNzIzMjMzNlowEjEQMA4GA1UEAxMH
|
||||
dGVzdGluZzCCBCIwDQYJKoZIhvcNAQEBBQADggQPADCCBAoCggQBAWdHsf6Rh2Ca
|
||||
n2SQwn4t4OQrOjbLLdGE1pM6TBKKrHUFy62uEL8atNjlcfXIsa4aEu3xNGiqxqur
|
||||
ZectlkZbm0FkaaQ1Wr9oikDY3KfjuaXdPdO/XC/h8AKNxlDOylyXwUSK/CuYb+1j
|
||||
gy8yF5QFvVfwW/xwTlHmhUeSkVSQPosfQ6yXNNsmMzkd+ZPWLrfq4R+wiNtwYGu0
|
||||
WSBcI/M9o8/vrNLnIppoiBJJ13j9CR1ToEAzOFh9wwRWLY10oZhoh1ONN1KQURx4
|
||||
qedzvvP2DSjZbUccdvl2rBGvZpzfOiFdm1FCnxB0c72Cqx+GTHXBFf8bsa7KHky9
|
||||
sNO1GUanbq17WoDNgwbY6H51bfShqv0CErxatwWox3we4EcAmFHPVTCYL1oWVMGo
|
||||
a3Eth91NZj+b/nGhF9lhHKGzXSv9brmLLkfvM1jA6XhNhA7BQ5Vz67lj2j3XfXdh
|
||||
t/BU5pBXbL4Ut4mIhT1YnKXAjX2/LF5RHQTE8Vwkx5JAEKZyUEGOReD/B+7GOrLp
|
||||
HduMT9vZAc5aR2k9I8qq1zBAzsL69lyQNAPaDYd1BIAjUety9gAYaSQffCgAgpRO
|
||||
Gt+DYvxS+7AT/yEd5h74MU2AH7KrAkbXOtlwupiGwhMVTstncDJWXMJqbBhyHPF8
|
||||
3UmZH0hbL4PYmzSj9LDWQQXI2tv6vrCpfts3Cqhqxz9vRpgY7t1Wu6l/r+KxYYz3
|
||||
1pcGpPvRmPh0DJm7cPTiXqPnZcPt+ulSaSdlxmd19OnvG5awp0fXhxryZVwuiT8G
|
||||
VDkhyARrxYrdjlINsZJZbQjO0t8ketXAELJOnbFXXzeCOosyOHkLwsqOO96AVJA8
|
||||
45ZVL5m95ClGy0RSrjVIkXsxTAMVG6SPAqKwk6vmTdRGuSPS4rhgckPVDHmccmuq
|
||||
dfnT2YkX+wB2/M3oCgU+s30fAHGkbGZ0pCdNbFYFZLiH0iiMbTDl/0L/z7IdK0nH
|
||||
GLHVE7apPraKC6xl6rPWsD2iSfrmtIPQa0+rqbIVvKP5JdfJ8J4alI+OxFw/znQe
|
||||
V0/Rez0j22Fe119LZFFSXhRv+ZSvcq20xDwh00mzcumPWpYuCVPozA18yIhC9tNn
|
||||
ALHndz0tDseIdy9vC71jQWy9iwri3ueN0DekMMF8JGzI1Z6BAFzgyAx3DkHtwHg7
|
||||
B7qD0jPG5hJ5+yt323fYgJsuEAYoZ8/jzZ01pkX8bt+UsVN0DGnSGsI2ktnIIk3J
|
||||
l+8krjmUy6EaW79nITwoOqaeHOIp8m3UkjEcoKOYrzHRKqRy+A09rY+m/cAQaafW
|
||||
4xp0Zv7qZPLwnu0jsqB4jD8Ll9yPB02ndsoV6U5PeHzTkVhPml19jKUAwFfs7TJg
|
||||
kXy+/xFhYVUCAwEAATANBgkqhkiG9w0BAQsFAAOCBAIAAQnZY77pMNeypfpba2WK
|
||||
aDasT7dk2JqP0eukJCVPTN24Zca+xJNPdzuBATm/8SdZK9lddIbjSnWRsKvTnO2r
|
||||
/rYdlPf3jM5uuJtb8+Uwwe1s+gszelGS9G/lzzq+ehWicRIq2PFcs8o3iQMfENiv
|
||||
qILJ+xjcrvms5ZPDNahWkfRx3KCg8Q+/at2n5p7XYjMPYiLKHnDC+RE2b1qT20IZ
|
||||
FhuK/fTWLmKbfYFNNga6GC4qcaZJ7x0pbm4SDTYp0tkhzcHzwKhidfNB5J2vNz6l
|
||||
Ur6wiYwamFTLqcOwWo7rdvI+sSn05WQBv0QZlzFX+OAu0l7WQ7yU+noOxBhjvHds
|
||||
14+r9qcQZg2q9kG+evopYZqYXRUNNlZKo9MRBXhfrISulFAc5lRFQIXMXnglvAu+
|
||||
Ipz2gomEAOcOPNNVldhKAU94GAMJd/KfN0ZP7gX3YvPzuYU6XDhag5RTohXLm18w
|
||||
5AF+ES3DOQ6ixu3DTf0D+6qrDuK+prdX8ivcdTQVNOQ+MIZeGSc6NWWOTaMGJ3lg
|
||||
aZIxJUGdo6E7GBGiC1YTjgFKFbHzek1LRTh/LX3vbSudxwaG0HQxwsU9T4DWiMqa
|
||||
Fkf2KteLEUA6HrR+0XlAZrhwoqAmrJ+8lCFX3V0gE9lpENfVHlFXDGyx10DpTB28
|
||||
DdjnY3F7EPWNzwf9P3oNT69CKW3Bk6VVr3ROOJtDxVu1ioWo3TaXltQ0VOnap2Pu
|
||||
sa5wfrpfwBDuAS9JCDg4ttNp2nW3F7tgXC6xPqw5pvGwUppEw9XNrqV8TZrxduuv
|
||||
rQ3NyZ7KSzIpmFlD3UwV/fGfz3UQmHS6Ng1evrUID9DjfYNfRqSGIGjDfxGtYD+j
|
||||
Z1gLJZuhjJpNtwBkKRtlNtrCWCJK2hidK/foxwD7kwAPo2I9FjpltxCRywZUs07X
|
||||
KwXTfBR9v6ij1LV6K58hFS+8ezZyZ05CeVBFkMQdclTOSfuPxlMkQOtjp8QWDj+F
|
||||
j/MYziT5KBkHvcbrjdRtUJIAi4N7zCsPZtjik918AK1WBNRVqPbrgq/XSEXMfuvs
|
||||
6JbfK0B76vdBDRtJFC1JsvnIrGbUztxXzyQwFLaR/AjVJqpVlysLWzPKWVX6/+SJ
|
||||
u1NQOl2E8P6ycyBsuGnO89p0S4F8cMRcI2X1XQsZ7/q0NBrOMaEp5T3SrWo9GiQ3
|
||||
o2SBdbs3Y6MBPBtTu977Z/0RO63J3M5i2tjUiDfrFy7+VRLKr7qQ7JibohyB8QaR
|
||||
9tedgjn2f+of7PnP/PEl1cCphUZeHM7QKUMPT8dbqwmKtlYY43EHXcvNOT5IBk3X
|
||||
9lwJoZk/B2i+ZMRNSP34ztAwtxmasPt6RAWGQpWCn9qmttAHAnMfDqe7F7jVR6rS
|
||||
u58=
|
||||
-----END CERTIFICATE-----`
|
||||
|
||||
func TestHandshakeRSATooBig(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
godebug string
|
||||
expectedServerErr string
|
||||
expectedClientErr string
|
||||
}{
|
||||
{
|
||||
name: "key too large",
|
||||
expectedServerErr: "tls: server sent certificate containing RSA key larger than 8192 bits",
|
||||
expectedClientErr: "tls: client sent certificate containing RSA key larger than 8192 bits",
|
||||
},
|
||||
{
|
||||
name: "acceptable key (GODEBUG=tlsmaxrsasize=8193)",
|
||||
godebug: "tlsmaxrsasize=8193",
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
if tc.godebug != "" {
|
||||
t.Setenv("GODEBUG", tc.godebug)
|
||||
}
|
||||
|
||||
testCert, _ := pem.Decode([]byte(largeRSAKeyCertPEM))
|
||||
|
||||
c := &Conn{conn: &discardConn{}, config: testConfig.Clone()}
|
||||
|
||||
err := c.verifyServerCertificate([][]byte{testCert.Bytes})
|
||||
if tc.expectedServerErr == "" && err != nil {
|
||||
t.Errorf("Conn.verifyServerCertificate unexpected error: %s", err)
|
||||
} else if tc.expectedServerErr != "" && (err == nil || err.Error() != tc.expectedServerErr) {
|
||||
t.Errorf("Conn.verifyServerCertificate unexpected error: want %q, got %q", tc.expectedServerErr, err)
|
||||
}
|
||||
|
||||
err = c.processCertsFromClient(Certificate{Certificate: [][]byte{testCert.Bytes}})
|
||||
if tc.expectedClientErr == "" && err != nil {
|
||||
t.Errorf("Conn.processCertsFromClient unexpected error: %s", err)
|
||||
} else if tc.expectedClientErr != "" && (err == nil || err.Error() != tc.expectedClientErr) {
|
||||
t.Errorf("Conn.processCertsFromClient unexpected error: want %q, got %q", tc.expectedClientErr, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -864,6 +864,13 @@ func (c *Conn) processCertsFromClient(certificate Certificate) error {
|
||||
c.sendAlert(alertBadCertificate)
|
||||
return errors.New("tls: failed to parse client certificate: " + err.Error())
|
||||
}
|
||||
if certs[i].PublicKeyAlgorithm == x509.RSA {
|
||||
n := certs[i].PublicKey.(*rsa.PublicKey).N.BitLen()
|
||||
if max, ok := checkKeySize(n); !ok {
|
||||
c.sendAlert(alertBadCertificate)
|
||||
return fmt.Errorf("tls: client sent certificate containing RSA key larger than %d bits", max)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(certs) == 0 && requiresClientCert(c.config.ClientAuth) {
|
||||
|
||||
@@ -228,16 +228,22 @@ func (q *QUICConn) HandleData(level QUICEncryptionLevel, data []byte) error {
|
||||
return nil
|
||||
}
|
||||
// The handshake goroutine has exited.
|
||||
c.handshakeMutex.Lock()
|
||||
defer c.handshakeMutex.Unlock()
|
||||
c.hand.Write(c.quic.readbuf)
|
||||
c.quic.readbuf = nil
|
||||
for q.conn.hand.Len() >= 4 && q.conn.handshakeErr == nil {
|
||||
b := q.conn.hand.Bytes()
|
||||
n := int(b[1])<<16 | int(b[2])<<8 | int(b[3])
|
||||
if 4+n < len(b) {
|
||||
if n > maxHandshake {
|
||||
q.conn.handshakeErr = fmt.Errorf("tls: handshake message of length %d bytes exceeds maximum of %d bytes", n, maxHandshake)
|
||||
break
|
||||
}
|
||||
if len(b) < 4+n {
|
||||
return nil
|
||||
}
|
||||
if err := q.conn.handlePostHandshakeMessage(); err != nil {
|
||||
return quicError(err)
|
||||
q.conn.handshakeErr = err
|
||||
}
|
||||
}
|
||||
if q.conn.handshakeErr != nil {
|
||||
@@ -246,10 +252,15 @@ func (q *QUICConn) HandleData(level QUICEncryptionLevel, data []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type QUICSessionTicketOptions struct {
|
||||
// EarlyData specifies whether the ticket may be used for 0-RTT.
|
||||
EarlyData bool
|
||||
}
|
||||
|
||||
// SendSessionTicket sends a session ticket to the client.
|
||||
// It produces connection events, which may be read with NextEvent.
|
||||
// Currently, it can only be called once.
|
||||
func (q *QUICConn) SendSessionTicket(earlyData bool) error {
|
||||
func (q *QUICConn) SendSessionTicket(opts QUICSessionTicketOptions) error {
|
||||
c := q.conn
|
||||
if !c.isHandshakeComplete.Load() {
|
||||
return quicError(errors.New("tls: SendSessionTicket called before handshake completed"))
|
||||
@@ -261,7 +272,7 @@ func (q *QUICConn) SendSessionTicket(earlyData bool) error {
|
||||
return quicError(errors.New("tls: SendSessionTicket called multiple times"))
|
||||
}
|
||||
q.sessionTicketSent = true
|
||||
return quicError(c.sendSessionTicket(earlyData))
|
||||
return quicError(c.sendSessionTicket(opts.EarlyData))
|
||||
}
|
||||
|
||||
// ConnectionState returns basic TLS details about the connection.
|
||||
|
||||
@@ -85,7 +85,7 @@ func (q *testQUICConn) setWriteSecret(level QUICEncryptionLevel, suite uint16, s
|
||||
|
||||
var errTransportParametersRequired = errors.New("transport parameters required")
|
||||
|
||||
func runTestQUICConnection(ctx context.Context, cli, srv *testQUICConn, onHandleCryptoData func()) error {
|
||||
func runTestQUICConnection(ctx context.Context, cli, srv *testQUICConn, onEvent func(e QUICEvent, src, dst *testQUICConn) bool) error {
|
||||
a, b := cli, srv
|
||||
for _, c := range []*testQUICConn{a, b} {
|
||||
if !c.conn.conn.quic.started {
|
||||
@@ -97,6 +97,9 @@ func runTestQUICConnection(ctx context.Context, cli, srv *testQUICConn, onHandle
|
||||
idleCount := 0
|
||||
for {
|
||||
e := a.conn.NextEvent()
|
||||
if onEvent != nil && onEvent(e, a, b) {
|
||||
continue
|
||||
}
|
||||
switch e.Kind {
|
||||
case QUICNoEvent:
|
||||
idleCount++
|
||||
@@ -125,7 +128,8 @@ func runTestQUICConnection(ctx context.Context, cli, srv *testQUICConn, onHandle
|
||||
case QUICHandshakeDone:
|
||||
a.complete = true
|
||||
if a == srv {
|
||||
if err := srv.conn.SendSessionTicket(false); err != nil {
|
||||
opts := QUICSessionTicketOptions{}
|
||||
if err := srv.conn.SendSessionTicket(opts); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -210,6 +214,37 @@ func TestQUICSessionResumption(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestQUICFragmentaryData(t *testing.T) {
|
||||
clientConfig := testConfig.Clone()
|
||||
clientConfig.MinVersion = VersionTLS13
|
||||
clientConfig.ClientSessionCache = NewLRUClientSessionCache(1)
|
||||
clientConfig.ServerName = "example.go.dev"
|
||||
|
||||
serverConfig := testConfig.Clone()
|
||||
serverConfig.MinVersion = VersionTLS13
|
||||
|
||||
cli := newTestQUICClient(t, clientConfig)
|
||||
cli.conn.SetTransportParameters(nil)
|
||||
srv := newTestQUICServer(t, serverConfig)
|
||||
srv.conn.SetTransportParameters(nil)
|
||||
onEvent := func(e QUICEvent, src, dst *testQUICConn) bool {
|
||||
if e.Kind == QUICWriteData {
|
||||
// Provide the data one byte at a time.
|
||||
for i := range e.Data {
|
||||
if err := dst.conn.HandleData(e.Level, e.Data[i:i+1]); err != nil {
|
||||
t.Errorf("HandleData: %v", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
if err := runTestQUICConnection(context.Background(), cli, srv, onEvent); err != nil {
|
||||
t.Fatalf("error during first connection handshake: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestQUICPostHandshakeClientAuthentication(t *testing.T) {
|
||||
// RFC 9001, Section 4.4.
|
||||
config := testConfig.Clone()
|
||||
@@ -263,6 +298,28 @@ func TestQUICPostHandshakeKeyUpdate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestQUICPostHandshakeMessageTooLarge(t *testing.T) {
|
||||
config := testConfig.Clone()
|
||||
config.MinVersion = VersionTLS13
|
||||
cli := newTestQUICClient(t, config)
|
||||
cli.conn.SetTransportParameters(nil)
|
||||
srv := newTestQUICServer(t, config)
|
||||
srv.conn.SetTransportParameters(nil)
|
||||
if err := runTestQUICConnection(context.Background(), cli, srv, nil); err != nil {
|
||||
t.Fatalf("error during connection handshake: %v", err)
|
||||
}
|
||||
|
||||
size := maxHandshake + 1
|
||||
if err := cli.conn.HandleData(QUICEncryptionLevelApplication, []byte{
|
||||
byte(typeNewSessionTicket),
|
||||
byte(size >> 16),
|
||||
byte(size >> 8),
|
||||
byte(size),
|
||||
}); err == nil {
|
||||
t.Fatalf("%v-byte post-handshake message: got no error, want one", size)
|
||||
}
|
||||
}
|
||||
|
||||
func TestQUICHandshakeError(t *testing.T) {
|
||||
clientConfig := testConfig.Clone()
|
||||
clientConfig.MinVersion = VersionTLS13
|
||||
@@ -297,26 +354,22 @@ func TestQUICConnectionState(t *testing.T) {
|
||||
cli.conn.SetTransportParameters(nil)
|
||||
srv := newTestQUICServer(t, config)
|
||||
srv.conn.SetTransportParameters(nil)
|
||||
onHandleCryptoData := func() {
|
||||
onEvent := func(e QUICEvent, src, dst *testQUICConn) bool {
|
||||
cliCS := cli.conn.ConnectionState()
|
||||
cliWantALPN := ""
|
||||
if _, ok := cli.readSecret[QUICEncryptionLevelApplication]; ok {
|
||||
cliWantALPN = "h3"
|
||||
if want, got := cliCS.NegotiatedProtocol, "h3"; want != got {
|
||||
t.Errorf("cli.ConnectionState().NegotiatedProtocol = %q, want %q", want, got)
|
||||
}
|
||||
}
|
||||
if want, got := cliCS.NegotiatedProtocol, cliWantALPN; want != got {
|
||||
t.Errorf("cli.ConnectionState().NegotiatedProtocol = %q, want %q", want, got)
|
||||
}
|
||||
|
||||
srvCS := srv.conn.ConnectionState()
|
||||
srvWantALPN := ""
|
||||
if _, ok := srv.readSecret[QUICEncryptionLevelHandshake]; ok {
|
||||
srvWantALPN = "h3"
|
||||
}
|
||||
if want, got := srvCS.NegotiatedProtocol, srvWantALPN; want != got {
|
||||
t.Errorf("srv.ConnectionState().NegotiatedProtocol = %q, want %q", want, got)
|
||||
if want, got := srvCS.NegotiatedProtocol, "h3"; want != got {
|
||||
t.Errorf("srv.ConnectionState().NegotiatedProtocol = %q, want %q", want, got)
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
if err := runTestQUICConnection(context.Background(), cli, srv, onHandleCryptoData); err != nil {
|
||||
if err := runTestQUICConnection(context.Background(), cli, srv, onEvent); err != nil {
|
||||
t.Fatalf("error during connection handshake: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1607,3 +1607,15 @@ func TestLargeSlice(t *testing.T) {
|
||||
testEncodeDecode(t, st, rt)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLocalRemoteTypesMismatch(t *testing.T) {
|
||||
// Test data is from https://go.dev/issue/62117.
|
||||
testData := []byte{9, 127, 3, 1, 2, 255, 128, 0, 0, 0, 3, 255, 128, 0}
|
||||
|
||||
var v []*struct{}
|
||||
buf := bytes.NewBuffer(testData)
|
||||
err := NewDecoder(buf).Decode(&v)
|
||||
if err == nil {
|
||||
t.Error("Encode/Decode: expected error but got err == nil")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1082,7 +1082,7 @@ func (dec *Decoder) compatibleType(fr reflect.Type, fw typeId, inProgress map[re
|
||||
func (dec *Decoder) typeString(remoteId typeId) string {
|
||||
typeLock.Lock()
|
||||
defer typeLock.Unlock()
|
||||
if t := idToType[remoteId]; t != nil {
|
||||
if t := idToType(remoteId); t != nil {
|
||||
// globally known type.
|
||||
return t.string()
|
||||
}
|
||||
|
||||
@@ -173,9 +173,18 @@ type gobType interface {
|
||||
safeString(seen map[typeId]bool) string
|
||||
}
|
||||
|
||||
var types = make(map[reflect.Type]gobType, 32)
|
||||
var idToType = make([]gobType, 1, firstUserId)
|
||||
var builtinIdToTypeSlice [firstUserId]gobType // set in init() after builtins are established
|
||||
var (
|
||||
types = make(map[reflect.Type]gobType, 32)
|
||||
idToTypeSlice = make([]gobType, 1, firstUserId)
|
||||
builtinIdToTypeSlice [firstUserId]gobType // set in init() after builtins are established
|
||||
)
|
||||
|
||||
func idToType(id typeId) gobType {
|
||||
if id < 0 || int(id) >= len(idToTypeSlice) {
|
||||
return nil
|
||||
}
|
||||
return idToTypeSlice[id]
|
||||
}
|
||||
|
||||
func builtinIdToType(id typeId) gobType {
|
||||
if id < 0 || int(id) >= len(builtinIdToTypeSlice) {
|
||||
@@ -189,16 +198,16 @@ func setTypeId(typ gobType) {
|
||||
if typ.id() != 0 {
|
||||
return
|
||||
}
|
||||
nextId := typeId(len(idToType))
|
||||
nextId := typeId(len(idToTypeSlice))
|
||||
typ.setId(nextId)
|
||||
idToType = append(idToType, typ)
|
||||
idToTypeSlice = append(idToTypeSlice, typ)
|
||||
}
|
||||
|
||||
func (t typeId) gobType() gobType {
|
||||
if t == 0 {
|
||||
return nil
|
||||
}
|
||||
return idToType[t]
|
||||
return idToType(t)
|
||||
}
|
||||
|
||||
// string returns the string representation of the type associated with the typeId.
|
||||
@@ -277,14 +286,14 @@ func init() {
|
||||
checkId(21, mustGetTypeInfo(reflect.TypeOf((*fieldType)(nil)).Elem()).id)
|
||||
checkId(23, mustGetTypeInfo(reflect.TypeOf((*mapType)(nil)).Elem()).id)
|
||||
|
||||
copy(builtinIdToTypeSlice[:], idToType)
|
||||
copy(builtinIdToTypeSlice[:], idToTypeSlice)
|
||||
|
||||
// Move the id space upwards to allow for growth in the predefined world
|
||||
// without breaking existing files.
|
||||
if nextId := len(idToType); nextId > firstUserId {
|
||||
if nextId := len(idToTypeSlice); nextId > firstUserId {
|
||||
panic(fmt.Sprintln("nextId too large:", nextId))
|
||||
}
|
||||
idToType = idToType[:firstUserId]
|
||||
idToTypeSlice = idToTypeSlice[:firstUserId]
|
||||
registerBasics()
|
||||
wireTypeUserInfo = userType(wireTypeType)
|
||||
}
|
||||
@@ -526,7 +535,7 @@ func newTypeObject(name string, ut *userTypeInfo, rt reflect.Type) (gobType, err
|
||||
case reflect.Struct:
|
||||
st := newStructType(name)
|
||||
types[rt] = st
|
||||
idToType[st.id()] = st
|
||||
idToTypeSlice[st.id()] = st
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
if !isSent(&f) {
|
||||
|
||||
@@ -543,8 +543,9 @@ func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplat
|
||||
}
|
||||
}
|
||||
|
||||
// If a name was found, namespace is overridden with an empty space
|
||||
// If a empty name was found, namespace is overridden with an empty space
|
||||
if tinfo.xmlname != nil && start.Name.Space == "" &&
|
||||
tinfo.xmlname.xmlns == "" && tinfo.xmlname.name == "" &&
|
||||
len(p.tags) != 0 && p.tags[len(p.tags)-1].Space != "" {
|
||||
start.Attr = append(start.Attr, Attr{Name{"", xmlnsPrefix}, ""})
|
||||
}
|
||||
|
||||
@@ -1064,14 +1064,19 @@ func TestIssue7113(t *testing.T) {
|
||||
XMLName Name `xml:""` // Sets empty namespace
|
||||
}
|
||||
|
||||
type D struct {
|
||||
XMLName Name `xml:"d"`
|
||||
}
|
||||
|
||||
type A struct {
|
||||
XMLName Name `xml:""`
|
||||
C C `xml:""`
|
||||
D D
|
||||
}
|
||||
|
||||
var a A
|
||||
structSpace := "b"
|
||||
xmlTest := `<A xmlns="` + structSpace + `"><C xmlns=""></C></A>`
|
||||
xmlTest := `<A xmlns="` + structSpace + `"><C xmlns=""></C><d></d></A>`
|
||||
t.Log(xmlTest)
|
||||
err := Unmarshal([]byte(xmlTest), &a)
|
||||
if err != nil {
|
||||
|
||||
@@ -245,15 +245,15 @@ var depsRules = `
|
||||
< text/template
|
||||
< internal/lazytemplate;
|
||||
|
||||
encoding/json, html, text/template
|
||||
< html/template;
|
||||
|
||||
# regexp
|
||||
FMT
|
||||
< regexp/syntax
|
||||
< regexp
|
||||
< internal/lazyregexp;
|
||||
|
||||
encoding/json, html, text/template, regexp
|
||||
< html/template;
|
||||
|
||||
# suffix array
|
||||
encoding/binary, regexp
|
||||
< index/suffixarray;
|
||||
|
||||
@@ -114,11 +114,11 @@ type Config struct {
|
||||
// type checker will initialize this field with a newly created context.
|
||||
Context *Context
|
||||
|
||||
// GoVersion describes the accepted Go language version. The string
|
||||
// must follow the format "go%d.%d" (e.g. "go1.12") or it must be
|
||||
// empty; an empty string disables Go language version checks.
|
||||
// If the format is invalid, invoking the type checker will cause a
|
||||
// panic.
|
||||
// GoVersion describes the accepted Go language version. The string must
|
||||
// start with a prefix of the form "go%d.%d" (e.g. "go1.20", "go1.21rc1", or
|
||||
// "go1.21.0") or it must be empty; an empty string disables Go language
|
||||
// version checks. If the format is invalid, invoking the type checker will
|
||||
// result in an error.
|
||||
GoVersion string
|
||||
|
||||
// If IgnoreFuncBodies is set, function bodies are not
|
||||
|
||||
@@ -2071,6 +2071,29 @@ func TestIdenticalUnions(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssue61737(t *testing.T) {
|
||||
// This test verifies that it is possible to construct invalid interfaces
|
||||
// containing duplicate methods using the go/types API.
|
||||
//
|
||||
// It must be possible for importers to construct such invalid interfaces.
|
||||
// Previously, this panicked.
|
||||
|
||||
sig1 := NewSignatureType(nil, nil, nil, NewTuple(NewParam(nopos, nil, "", Typ[Int])), nil, false)
|
||||
sig2 := NewSignatureType(nil, nil, nil, NewTuple(NewParam(nopos, nil, "", Typ[String])), nil, false)
|
||||
|
||||
methods := []*Func{
|
||||
NewFunc(nopos, nil, "M", sig1),
|
||||
NewFunc(nopos, nil, "M", sig2),
|
||||
}
|
||||
|
||||
embeddedMethods := []*Func{
|
||||
NewFunc(nopos, nil, "M", sig2),
|
||||
}
|
||||
embedded := NewInterfaceType(embeddedMethods, nil)
|
||||
iface := NewInterfaceType(methods, []Type{embedded})
|
||||
iface.Complete()
|
||||
}
|
||||
|
||||
func TestIssue15305(t *testing.T) {
|
||||
const src = "package p; func f() int16; var _ = f(undef)"
|
||||
fset := token.NewFileSet()
|
||||
|
||||
@@ -575,6 +575,11 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b
|
||||
// If nargs == 1, make sure x.mode is either a value or a constant.
|
||||
if x.mode != constant_ {
|
||||
x.mode = value
|
||||
// A value must not be untyped.
|
||||
check.assignment(x, &emptyInterface, "argument to "+bin.name)
|
||||
if x.mode == invalid {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Use the final type computed above for all arguments.
|
||||
|
||||
@@ -612,20 +612,17 @@ func (check *Checker) arguments(call *ast.CallExpr, sig *Signature, targs []Type
|
||||
return // error already reported
|
||||
}
|
||||
|
||||
// compute result signature: instantiate if needed
|
||||
rsig = sig
|
||||
// update result signature: instantiate if needed
|
||||
if n > 0 {
|
||||
rsig = check.instantiateSignature(call.Pos(), call.Fun, sig, targs[:n], xlist)
|
||||
}
|
||||
|
||||
// Optimization: Only if the callee's parameter list was adjusted do we need to
|
||||
// compute it from the adjusted list; otherwise we can simply use the result
|
||||
// signature's parameter list. We only need the n type parameters and arguments
|
||||
// of the callee.
|
||||
if n > 0 && adjusted {
|
||||
sigParams = check.subst(call.Pos(), sigParams, makeSubstMap(tparams[:n], targs[:n]), nil, check.context()).(*Tuple)
|
||||
} else {
|
||||
sigParams = rsig.params
|
||||
// If the callee's parameter list was adjusted we need to update (instantiate)
|
||||
// it separately. Otherwise we can simply use the result signature's parameter
|
||||
// list.
|
||||
if adjusted {
|
||||
sigParams = check.subst(call.Pos(), sigParams, makeSubstMap(tparams[:n], targs[:n]), nil, check.context()).(*Tuple)
|
||||
} else {
|
||||
sigParams = rsig.params
|
||||
}
|
||||
}
|
||||
|
||||
// compute argument signatures: instantiate if needed
|
||||
|
||||
@@ -242,6 +242,14 @@ func fixInferSig(f *ast.File) {
|
||||
n.Args[0] = arg
|
||||
return false
|
||||
}
|
||||
case "allowVersion":
|
||||
// rewrite check.allowVersion(..., pos, ...) to check.allowVersion(..., posn, ...)
|
||||
if ident, _ := n.Args[1].(*ast.Ident); ident != nil && ident.Name == "pos" {
|
||||
pos := n.Args[1].Pos()
|
||||
arg := newIdent(pos, "posn")
|
||||
n.Args[1] = arg
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -98,7 +98,7 @@ func (check *Checker) infer(posn positioner, tparams []*TypeParam, targs []Type,
|
||||
// Unify parameter and argument types for generic parameters with typed arguments
|
||||
// and collect the indices of generic parameters with untyped arguments.
|
||||
// Terminology: generic parameter = function parameter with a type-parameterized type
|
||||
u := newUnifier(tparams, targs)
|
||||
u := newUnifier(tparams, targs, check.allowVersion(check.pkg, posn, go1_21))
|
||||
|
||||
errorf := func(kind string, tpar, targ Type, arg *operand) {
|
||||
// provide a better error message if we can
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/importer"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"internal/testenv"
|
||||
"regexp"
|
||||
@@ -908,3 +909,24 @@ func _cgoCheckResult(interface{})
|
||||
*boolFieldAddr(cfg, "go115UsesCgo") = true
|
||||
})
|
||||
}
|
||||
|
||||
func TestIssue61931(t *testing.T) {
|
||||
const src = `
|
||||
package p
|
||||
|
||||
func A(func(any), ...any) {}
|
||||
func B[T any](T) {}
|
||||
|
||||
func _() {
|
||||
A(B, nil // syntax error: missing ',' before newline in argument list
|
||||
}
|
||||
`
|
||||
fset := token.NewFileSet()
|
||||
f, err := parser.ParseFile(fset, pkgName(src), src, 0)
|
||||
if err == nil {
|
||||
t.Fatal("expected syntax error")
|
||||
}
|
||||
|
||||
var conf Config
|
||||
conf.Check(f.Name.Name, fset, []*ast.File{f}, nil) // must not panic
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/token"
|
||||
. "internal/types/errors"
|
||||
"sort"
|
||||
@@ -216,7 +215,6 @@ func computeInterfaceTypeSet(check *Checker, pos token.Pos, ityp *Interface) *_T
|
||||
// we can get rid of the mpos map below and simply use the cloned method's
|
||||
// position.
|
||||
|
||||
var todo []*Func
|
||||
var seen objset
|
||||
var allMethods []*Func
|
||||
mpos := make(map[*Func]token.Pos) // method specification or method embedding position, for good error messages
|
||||
@@ -226,30 +224,24 @@ func computeInterfaceTypeSet(check *Checker, pos token.Pos, ityp *Interface) *_T
|
||||
allMethods = append(allMethods, m)
|
||||
mpos[m] = pos
|
||||
case explicit:
|
||||
if check == nil {
|
||||
panic(fmt.Sprintf("%v: duplicate method %s", m.pos, m.name))
|
||||
if check != nil {
|
||||
check.errorf(atPos(pos), DuplicateDecl, "duplicate method %s", m.name)
|
||||
check.errorf(atPos(mpos[other.(*Func)]), DuplicateDecl, "\tother declaration of %s", m.name) // secondary error, \t indented
|
||||
}
|
||||
// check != nil
|
||||
check.errorf(atPos(pos), DuplicateDecl, "duplicate method %s", m.name)
|
||||
check.errorf(atPos(mpos[other.(*Func)]), DuplicateDecl, "\tother declaration of %s", m.name) // secondary error, \t indented
|
||||
default:
|
||||
// We have a duplicate method name in an embedded (not explicitly declared) method.
|
||||
// Check method signatures after all types are computed (go.dev/issue/33656).
|
||||
// If we're pre-go1.14 (overlapping embeddings are not permitted), report that
|
||||
// error here as well (even though we could do it eagerly) because it's the same
|
||||
// error message.
|
||||
if check == nil {
|
||||
// check method signatures after all locally embedded interfaces are computed
|
||||
todo = append(todo, m, other.(*Func))
|
||||
break
|
||||
if check != nil {
|
||||
check.later(func() {
|
||||
if !check.allowVersion(m.pkg, atPos(pos), go1_14) || !Identical(m.typ, other.Type()) {
|
||||
check.errorf(atPos(pos), DuplicateDecl, "duplicate method %s", m.name)
|
||||
check.errorf(atPos(mpos[other.(*Func)]), DuplicateDecl, "\tother declaration of %s", m.name) // secondary error, \t indented
|
||||
}
|
||||
}).describef(atPos(pos), "duplicate method check for %s", m.name)
|
||||
}
|
||||
// check != nil
|
||||
check.later(func() {
|
||||
if !check.allowVersion(m.pkg, atPos(pos), go1_14) || !Identical(m.typ, other.Type()) {
|
||||
check.errorf(atPos(pos), DuplicateDecl, "duplicate method %s", m.name)
|
||||
check.errorf(atPos(mpos[other.(*Func)]), DuplicateDecl, "\tother declaration of %s", m.name) // secondary error, \t indented
|
||||
}
|
||||
}).describef(atPos(pos), "duplicate method check for %s", m.name)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -312,15 +304,6 @@ func computeInterfaceTypeSet(check *Checker, pos token.Pos, ityp *Interface) *_T
|
||||
}
|
||||
ityp.embedPos = nil // not needed anymore (errors have been reported)
|
||||
|
||||
// process todo's (this only happens if check == nil)
|
||||
for i := 0; i < len(todo); i += 2 {
|
||||
m := todo[i]
|
||||
other := todo[i+1]
|
||||
if !Identical(m.typ, other.typ) {
|
||||
panic(fmt.Sprintf("%v: duplicate method %s", m.pos, m.name))
|
||||
}
|
||||
}
|
||||
|
||||
ityp.tset.comparable = allComparable
|
||||
if len(allMethods) != 0 {
|
||||
sortMethods(allMethods)
|
||||
|
||||
@@ -55,11 +55,6 @@ const (
|
||||
// the core types, if any, of non-local (unbound) type parameters.
|
||||
enableCoreTypeUnification = true
|
||||
|
||||
// If enableInterfaceInference is set, type inference uses
|
||||
// shared methods for improved type inference involving
|
||||
// interfaces.
|
||||
enableInterfaceInference = true
|
||||
|
||||
// If traceInference is set, unification will print a trace of its operation.
|
||||
// Interpretation of trace:
|
||||
// x ≡ y attempt to unify types x and y
|
||||
@@ -83,15 +78,16 @@ type unifier struct {
|
||||
// that inferring the type for a given type parameter P will
|
||||
// automatically infer the same type for all other parameters
|
||||
// unified (joined) with P.
|
||||
handles map[*TypeParam]*Type
|
||||
depth int // recursion depth during unification
|
||||
handles map[*TypeParam]*Type
|
||||
depth int // recursion depth during unification
|
||||
enableInterfaceInference bool // use shared methods for better inference
|
||||
}
|
||||
|
||||
// newUnifier returns a new unifier initialized with the given type parameter
|
||||
// and corresponding type argument lists. The type argument list may be shorter
|
||||
// than the type parameter list, and it may contain nil types. Matching type
|
||||
// parameters and arguments must have the same index.
|
||||
func newUnifier(tparams []*TypeParam, targs []Type) *unifier {
|
||||
func newUnifier(tparams []*TypeParam, targs []Type, enableInterfaceInference bool) *unifier {
|
||||
assert(len(tparams) >= len(targs))
|
||||
handles := make(map[*TypeParam]*Type, len(tparams))
|
||||
// Allocate all handles up-front: in a correct program, all type parameters
|
||||
@@ -105,7 +101,7 @@ func newUnifier(tparams []*TypeParam, targs []Type) *unifier {
|
||||
}
|
||||
handles[x] = &t
|
||||
}
|
||||
return &unifier{handles, 0}
|
||||
return &unifier{handles, 0, enableInterfaceInference}
|
||||
}
|
||||
|
||||
// unifyMode controls the behavior of the unifier.
|
||||
@@ -341,7 +337,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
|
||||
// we will fail at function instantiation or argument assignment time.
|
||||
//
|
||||
// If we have at least one defined type, there is one in y.
|
||||
if ny, _ := y.(*Named); mode&exact == 0 && ny != nil && isTypeLit(x) && !(enableInterfaceInference && IsInterface(x)) {
|
||||
if ny, _ := y.(*Named); mode&exact == 0 && ny != nil && isTypeLit(x) && !(u.enableInterfaceInference && IsInterface(x)) {
|
||||
if traceInference {
|
||||
u.tracef("%s ≡ under %s", x, ny)
|
||||
}
|
||||
@@ -407,18 +403,40 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
|
||||
// Therefore, we must fail unification (go.dev/issue/60933).
|
||||
return false
|
||||
}
|
||||
// If y is a defined type, make sure we record that type
|
||||
// for type parameter x, which may have until now only
|
||||
// recorded an underlying type (go.dev/issue/43056).
|
||||
// Either both types are interfaces, or neither type is.
|
||||
// If both are interfaces, they have the same methods.
|
||||
// If we have inexact unification and one of x or y is a defined type, select the
|
||||
// defined type. This ensures that in a series of types, all matching against the
|
||||
// same type parameter, we infer a defined type if there is one, independent of
|
||||
// order. Type inference or assignment may fail, which is ok.
|
||||
// Selecting a defined type, if any, ensures that we don't lose the type name;
|
||||
// and since we have inexact unification, a value of equally named or matching
|
||||
// undefined type remains assignable (go.dev/issue/43056).
|
||||
//
|
||||
// Note: Changing the recorded type for a type parameter to
|
||||
// a defined type is only ok when unification is inexact.
|
||||
// But in exact unification, if we have a match, x and y must
|
||||
// be identical, so changing the recorded type for x is a no-op.
|
||||
if yn {
|
||||
u.set(px, y)
|
||||
// Similarly, if we have inexact unification and there are no defined types but
|
||||
// channel types, select a directed channel, if any. This ensures that in a series
|
||||
// of unnamed types, all matching against the same type parameter, we infer the
|
||||
// directed channel if there is one, independent of order.
|
||||
// Selecting a directional channel, if any, ensures that a value of another
|
||||
// inexactly unifying channel type remains assignable (go.dev/issue/62157).
|
||||
//
|
||||
// If we have multiple defined channel types, they are either identical or we
|
||||
// have assignment conflicts, so we can ignore directionality in this case.
|
||||
//
|
||||
// If we have defined and literal channel types, a defined type wins to avoid
|
||||
// order dependencies.
|
||||
if mode&exact == 0 {
|
||||
switch {
|
||||
case xn:
|
||||
// x is a defined type: nothing to do.
|
||||
case yn:
|
||||
// x is not a defined type and y is a defined type: select y.
|
||||
u.set(px, y)
|
||||
default:
|
||||
// Neither x nor y are defined types.
|
||||
if yc, _ := under(y).(*Chan); yc != nil && yc.dir != SendRecv {
|
||||
// y is a directed channel type: select y.
|
||||
u.set(px, y)
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
@@ -439,12 +457,12 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
|
||||
emode |= exact
|
||||
}
|
||||
|
||||
// If EnableInterfaceInference is set and we don't require exact unification,
|
||||
// If u.EnableInterfaceInference is set and we don't require exact unification,
|
||||
// if both types are interfaces, one interface must have a subset of the
|
||||
// methods of the other and corresponding method signatures must unify.
|
||||
// If only one type is an interface, all its methods must be present in the
|
||||
// other type and corresponding method signatures must unify.
|
||||
if enableInterfaceInference && mode&exact == 0 {
|
||||
if u.enableInterfaceInference && mode&exact == 0 {
|
||||
// One or both interfaces may be defined types.
|
||||
// Look under the name, but not under type parameters (go.dev/issue/60564).
|
||||
xi := asInterface(x)
|
||||
@@ -507,7 +525,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
|
||||
}
|
||||
// All xmethods must exist in ymethods and corresponding signatures must unify.
|
||||
for _, xm := range xmethods {
|
||||
if ym := ymap[xm.Id()]; ym == nil || !u.nify(xm.typ, ym.typ, emode, p) {
|
||||
if ym := ymap[xm.Id()]; ym == nil || !u.nify(xm.typ, ym.typ, exact, p) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -528,7 +546,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
|
||||
xmethods := xi.typeSet().methods
|
||||
for _, xm := range xmethods {
|
||||
obj, _, _ := LookupFieldOrMethod(y, false, xm.pkg, xm.name)
|
||||
if ym, _ := obj.(*Func); ym == nil || !u.nify(xm.typ, ym.typ, emode, p) {
|
||||
if ym, _ := obj.(*Func); ym == nil || !u.nify(xm.typ, ym.typ, exact, p) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -634,7 +652,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
|
||||
}
|
||||
|
||||
case *Interface:
|
||||
assert(!enableInterfaceInference || mode&exact != 0) // handled before this switch
|
||||
assert(!u.enableInterfaceInference || mode&exact != 0) // handled before this switch
|
||||
|
||||
// Two interface types unify if they have the same set of methods with
|
||||
// the same names, and corresponding function types unify.
|
||||
@@ -687,7 +705,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
|
||||
}
|
||||
for i, f := range a {
|
||||
g := b[i]
|
||||
if f.Id() != g.Id() || !u.nify(f.typ, g.typ, emode, q) {
|
||||
if f.Id() != g.Id() || !u.nify(f.typ, g.typ, exact, q) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -128,6 +128,10 @@ const (
|
||||
stateJSBlockCmt
|
||||
// stateJSLineCmt occurs inside a JavaScript // line comment.
|
||||
stateJSLineCmt
|
||||
// stateJSHTMLOpenCmt occurs inside a JavaScript <!-- HTML-like comment.
|
||||
stateJSHTMLOpenCmt
|
||||
// stateJSHTMLCloseCmt occurs inside a JavaScript --> HTML-like comment.
|
||||
stateJSHTMLCloseCmt
|
||||
// stateCSS occurs inside a <style> element or style attribute.
|
||||
stateCSS
|
||||
// stateCSSDqStr occurs inside a CSS double quoted string.
|
||||
@@ -155,7 +159,7 @@ const (
|
||||
// authors & maintainers, not for end-users or machines.
|
||||
func isComment(s state) bool {
|
||||
switch s {
|
||||
case stateHTMLCmt, stateJSBlockCmt, stateJSLineCmt, stateCSSBlockCmt, stateCSSLineCmt:
|
||||
case stateHTMLCmt, stateJSBlockCmt, stateJSLineCmt, stateJSHTMLOpenCmt, stateJSHTMLCloseCmt, stateCSSBlockCmt, stateCSSLineCmt:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
@@ -170,6 +174,20 @@ func isInTag(s state) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// isInScriptLiteral returns true if s is one of the literal states within a
|
||||
// <script> tag, and as such occurances of "<!--", "<script", and "</script"
|
||||
// need to be treated specially.
|
||||
func isInScriptLiteral(s state) bool {
|
||||
// Ignore the comment states (stateJSBlockCmt, stateJSLineCmt,
|
||||
// stateJSHTMLOpenCmt, stateJSHTMLCloseCmt) because their content is already
|
||||
// omitted from the output.
|
||||
switch s {
|
||||
case stateJSDqStr, stateJSSqStr, stateJSBqStr, stateJSRegexp:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// delim is the delimiter that will end the current HTML attribute.
|
||||
type delim uint8
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"html"
|
||||
"internal/godebug"
|
||||
"io"
|
||||
"regexp"
|
||||
"text/template"
|
||||
"text/template/parse"
|
||||
)
|
||||
@@ -729,6 +730,26 @@ var delimEnds = [...]string{
|
||||
delimSpaceOrTagEnd: " \t\n\f\r>",
|
||||
}
|
||||
|
||||
var (
|
||||
// Per WHATWG HTML specification, section 4.12.1.3, there are extremely
|
||||
// complicated rules for how to handle the set of opening tags <!--,
|
||||
// <script, and </script when they appear in JS literals (i.e. strings,
|
||||
// regexs, and comments). The specification suggests a simple solution,
|
||||
// rather than implementing the arcane ABNF, which involves simply escaping
|
||||
// the opening bracket with \x3C. We use the below regex for this, since it
|
||||
// makes doing the case-insensitive find-replace much simpler.
|
||||
specialScriptTagRE = regexp.MustCompile("(?i)<(script|/script|!--)")
|
||||
specialScriptTagReplacement = []byte("\\x3C$1")
|
||||
)
|
||||
|
||||
func containsSpecialScriptTag(s []byte) bool {
|
||||
return specialScriptTagRE.Match(s)
|
||||
}
|
||||
|
||||
func escapeSpecialScriptTags(s []byte) []byte {
|
||||
return specialScriptTagRE.ReplaceAll(s, specialScriptTagReplacement)
|
||||
}
|
||||
|
||||
var doctypeBytes = []byte("<!DOCTYPE")
|
||||
|
||||
// escapeText escapes a text template node.
|
||||
@@ -777,13 +798,21 @@ func (e *escaper) escapeText(c context, n *parse.TextNode) context {
|
||||
if c.state != c1.state && isComment(c1.state) && c1.delim == delimNone {
|
||||
// Preserve the portion between written and the comment start.
|
||||
cs := i1 - 2
|
||||
if c1.state == stateHTMLCmt {
|
||||
if c1.state == stateHTMLCmt || c1.state == stateJSHTMLOpenCmt {
|
||||
// "<!--" instead of "/*" or "//"
|
||||
cs -= 2
|
||||
} else if c1.state == stateJSHTMLCloseCmt {
|
||||
// "-->" instead of "/*" or "//"
|
||||
cs -= 1
|
||||
}
|
||||
b.Write(s[written:cs])
|
||||
written = i1
|
||||
}
|
||||
if isInScriptLiteral(c.state) && containsSpecialScriptTag(s[i:i1]) {
|
||||
b.Write(s[written:i])
|
||||
b.Write(escapeSpecialScriptTags(s[i:i1]))
|
||||
written = i1
|
||||
}
|
||||
if i == i1 && c.state == c1.state {
|
||||
panic(fmt.Sprintf("infinite loop from %v to %v on %q..%q", c, c1, s[:i], s[i:]))
|
||||
}
|
||||
|
||||
@@ -503,6 +503,31 @@ func TestEscape(t *testing.T) {
|
||||
"<script>var a/*b*///c\nd</script>",
|
||||
"<script>var a \nd</script>",
|
||||
},
|
||||
{
|
||||
"JS HTML-like comments",
|
||||
"<script>before <!-- beep\nbetween\nbefore-->boop\n</script>",
|
||||
"<script>before \nbetween\nbefore\n</script>",
|
||||
},
|
||||
{
|
||||
"JS hashbang comment",
|
||||
"<script>#! beep\n</script>",
|
||||
"<script>\n</script>",
|
||||
},
|
||||
{
|
||||
"Special tags in <script> string literals",
|
||||
`<script>var a = "asd < 123 <!-- 456 < fgh <script jkl < 789 </script"</script>`,
|
||||
`<script>var a = "asd < 123 \x3C!-- 456 < fgh \x3Cscript jkl < 789 \x3C/script"</script>`,
|
||||
},
|
||||
{
|
||||
"Special tags in <script> string literals (mixed case)",
|
||||
`<script>var a = "<!-- <ScripT </ScripT"</script>`,
|
||||
`<script>var a = "\x3C!-- \x3CScripT \x3C/ScripT"</script>`,
|
||||
},
|
||||
{
|
||||
"Special tags in <script> regex literals (mixed case)",
|
||||
`<script>var a = /<!-- <ScripT </ScripT/</script>`,
|
||||
`<script>var a = /\x3C!-- \x3CScripT \x3C/ScripT/</script>`,
|
||||
},
|
||||
{
|
||||
"CSS comments",
|
||||
"<style>p// paragraph\n" +
|
||||
@@ -1523,8 +1548,38 @@ func TestEscapeText(t *testing.T) {
|
||||
context{state: stateJS, element: elementScript},
|
||||
},
|
||||
{
|
||||
// <script and </script tags are escaped, so </script> should not
|
||||
// cause us to exit the JS state.
|
||||
`<script>document.write("<script>alert(1)</script>");`,
|
||||
context{state: stateText},
|
||||
context{state: stateJS, element: elementScript},
|
||||
},
|
||||
{
|
||||
`<script>document.write("<script>`,
|
||||
context{state: stateJSDqStr, element: elementScript},
|
||||
},
|
||||
{
|
||||
`<script>document.write("<script>alert(1)</script>`,
|
||||
context{state: stateJSDqStr, element: elementScript},
|
||||
},
|
||||
{
|
||||
`<script>document.write("<script>alert(1)<!--`,
|
||||
context{state: stateJSDqStr, element: elementScript},
|
||||
},
|
||||
{
|
||||
`<script>document.write("<script>alert(1)</Script>");`,
|
||||
context{state: stateJS, element: elementScript},
|
||||
},
|
||||
{
|
||||
`<script>document.write("<!--");`,
|
||||
context{state: stateJS, element: elementScript},
|
||||
},
|
||||
{
|
||||
`<script>let a = /</script`,
|
||||
context{state: stateJSRegexp, element: elementScript},
|
||||
},
|
||||
{
|
||||
`<script>let a = /</script/`,
|
||||
context{state: stateJS, element: elementScript, jsCtx: jsCtxDivOp},
|
||||
},
|
||||
{
|
||||
`<script type="text/template">`,
|
||||
|
||||
@@ -25,21 +25,23 @@ func _() {
|
||||
_ = x[stateJSRegexp-14]
|
||||
_ = x[stateJSBlockCmt-15]
|
||||
_ = x[stateJSLineCmt-16]
|
||||
_ = x[stateCSS-17]
|
||||
_ = x[stateCSSDqStr-18]
|
||||
_ = x[stateCSSSqStr-19]
|
||||
_ = x[stateCSSDqURL-20]
|
||||
_ = x[stateCSSSqURL-21]
|
||||
_ = x[stateCSSURL-22]
|
||||
_ = x[stateCSSBlockCmt-23]
|
||||
_ = x[stateCSSLineCmt-24]
|
||||
_ = x[stateError-25]
|
||||
_ = x[stateDead-26]
|
||||
_ = x[stateJSHTMLOpenCmt-17]
|
||||
_ = x[stateJSHTMLCloseCmt-18]
|
||||
_ = x[stateCSS-19]
|
||||
_ = x[stateCSSDqStr-20]
|
||||
_ = x[stateCSSSqStr-21]
|
||||
_ = x[stateCSSDqURL-22]
|
||||
_ = x[stateCSSSqURL-23]
|
||||
_ = x[stateCSSURL-24]
|
||||
_ = x[stateCSSBlockCmt-25]
|
||||
_ = x[stateCSSLineCmt-26]
|
||||
_ = x[stateError-27]
|
||||
_ = x[stateDead-28]
|
||||
}
|
||||
|
||||
const _state_name = "stateTextstateTagstateAttrNamestateAfterNamestateBeforeValuestateHTMLCmtstateRCDATAstateAttrstateURLstateSrcsetstateJSstateJSDqStrstateJSSqStrstateJSBqStrstateJSRegexpstateJSBlockCmtstateJSLineCmtstateCSSstateCSSDqStrstateCSSSqStrstateCSSDqURLstateCSSSqURLstateCSSURLstateCSSBlockCmtstateCSSLineCmtstateErrorstateDead"
|
||||
const _state_name = "stateTextstateTagstateAttrNamestateAfterNamestateBeforeValuestateHTMLCmtstateRCDATAstateAttrstateURLstateSrcsetstateJSstateJSDqStrstateJSSqStrstateJSBqStrstateJSRegexpstateJSBlockCmtstateJSLineCmtstateJSHTMLOpenCmtstateJSHTMLCloseCmtstateCSSstateCSSDqStrstateCSSSqStrstateCSSDqURLstateCSSSqURLstateCSSURLstateCSSBlockCmtstateCSSLineCmtstateErrorstateDead"
|
||||
|
||||
var _state_index = [...]uint16{0, 9, 17, 30, 44, 60, 72, 83, 92, 100, 111, 118, 130, 142, 154, 167, 182, 196, 204, 217, 230, 243, 256, 267, 283, 298, 308, 317}
|
||||
var _state_index = [...]uint16{0, 9, 17, 30, 44, 60, 72, 83, 92, 100, 111, 118, 130, 142, 154, 167, 182, 196, 214, 233, 241, 254, 267, 280, 293, 304, 320, 335, 345, 354}
|
||||
|
||||
func (i state) String() string {
|
||||
if i >= state(len(_state_index)-1) {
|
||||
|
||||
@@ -14,32 +14,34 @@ import (
|
||||
// the updated context and the number of bytes consumed from the front of the
|
||||
// input.
|
||||
var transitionFunc = [...]func(context, []byte) (context, int){
|
||||
stateText: tText,
|
||||
stateTag: tTag,
|
||||
stateAttrName: tAttrName,
|
||||
stateAfterName: tAfterName,
|
||||
stateBeforeValue: tBeforeValue,
|
||||
stateHTMLCmt: tHTMLCmt,
|
||||
stateRCDATA: tSpecialTagEnd,
|
||||
stateAttr: tAttr,
|
||||
stateURL: tURL,
|
||||
stateSrcset: tURL,
|
||||
stateJS: tJS,
|
||||
stateJSDqStr: tJSDelimited,
|
||||
stateJSSqStr: tJSDelimited,
|
||||
stateJSBqStr: tJSDelimited,
|
||||
stateJSRegexp: tJSDelimited,
|
||||
stateJSBlockCmt: tBlockCmt,
|
||||
stateJSLineCmt: tLineCmt,
|
||||
stateCSS: tCSS,
|
||||
stateCSSDqStr: tCSSStr,
|
||||
stateCSSSqStr: tCSSStr,
|
||||
stateCSSDqURL: tCSSStr,
|
||||
stateCSSSqURL: tCSSStr,
|
||||
stateCSSURL: tCSSStr,
|
||||
stateCSSBlockCmt: tBlockCmt,
|
||||
stateCSSLineCmt: tLineCmt,
|
||||
stateError: tError,
|
||||
stateText: tText,
|
||||
stateTag: tTag,
|
||||
stateAttrName: tAttrName,
|
||||
stateAfterName: tAfterName,
|
||||
stateBeforeValue: tBeforeValue,
|
||||
stateHTMLCmt: tHTMLCmt,
|
||||
stateRCDATA: tSpecialTagEnd,
|
||||
stateAttr: tAttr,
|
||||
stateURL: tURL,
|
||||
stateSrcset: tURL,
|
||||
stateJS: tJS,
|
||||
stateJSDqStr: tJSDelimited,
|
||||
stateJSSqStr: tJSDelimited,
|
||||
stateJSBqStr: tJSDelimited,
|
||||
stateJSRegexp: tJSDelimited,
|
||||
stateJSBlockCmt: tBlockCmt,
|
||||
stateJSLineCmt: tLineCmt,
|
||||
stateJSHTMLOpenCmt: tLineCmt,
|
||||
stateJSHTMLCloseCmt: tLineCmt,
|
||||
stateCSS: tCSS,
|
||||
stateCSSDqStr: tCSSStr,
|
||||
stateCSSSqStr: tCSSStr,
|
||||
stateCSSDqURL: tCSSStr,
|
||||
stateCSSSqURL: tCSSStr,
|
||||
stateCSSURL: tCSSStr,
|
||||
stateCSSBlockCmt: tBlockCmt,
|
||||
stateCSSLineCmt: tLineCmt,
|
||||
stateError: tError,
|
||||
}
|
||||
|
||||
var commentStart = []byte("<!--")
|
||||
@@ -212,6 +214,11 @@ var (
|
||||
// element states.
|
||||
func tSpecialTagEnd(c context, s []byte) (context, int) {
|
||||
if c.element != elementNone {
|
||||
// script end tags ("</script") within script literals are ignored, so that
|
||||
// we can properly escape them.
|
||||
if c.element == elementScript && (isInScriptLiteral(c.state) || isComment(c.state)) {
|
||||
return c, len(s)
|
||||
}
|
||||
if i := indexTagEnd(s, specialTagEndMarkers[c.element]); i != -1 {
|
||||
return context{}, i
|
||||
}
|
||||
@@ -263,7 +270,7 @@ func tURL(c context, s []byte) (context, int) {
|
||||
|
||||
// tJS is the context transition function for the JS state.
|
||||
func tJS(c context, s []byte) (context, int) {
|
||||
i := bytes.IndexAny(s, "\"`'/")
|
||||
i := bytes.IndexAny(s, "\"`'/<-#")
|
||||
if i == -1 {
|
||||
// Entire input is non string, comment, regexp tokens.
|
||||
c.jsCtx = nextJSCtx(s, c.jsCtx)
|
||||
@@ -293,6 +300,26 @@ func tJS(c context, s []byte) (context, int) {
|
||||
err: errorf(ErrSlashAmbig, nil, 0, "'/' could start a division or regexp: %.32q", s[i:]),
|
||||
}, len(s)
|
||||
}
|
||||
// ECMAScript supports HTML style comments for legacy reasons, see Appendix
|
||||
// B.1.1 "HTML-like Comments". The handling of these comments is somewhat
|
||||
// confusing. Multi-line comments are not supported, i.e. anything on lines
|
||||
// between the opening and closing tokens is not considered a comment, but
|
||||
// anything following the opening or closing token, on the same line, is
|
||||
// ignored. As such we simply treat any line prefixed with "<!--" or "-->"
|
||||
// as if it were actually prefixed with "//" and move on.
|
||||
case '<':
|
||||
if i+3 < len(s) && bytes.Equal(commentStart, s[i:i+4]) {
|
||||
c.state, i = stateJSHTMLOpenCmt, i+3
|
||||
}
|
||||
case '-':
|
||||
if i+2 < len(s) && bytes.Equal(commentEnd, s[i:i+3]) {
|
||||
c.state, i = stateJSHTMLCloseCmt, i+2
|
||||
}
|
||||
// ECMAScript also supports "hashbang" comment lines, see Section 12.5.
|
||||
case '#':
|
||||
if i+1 < len(s) && s[i+1] == '!' {
|
||||
c.state, i = stateJSLineCmt, i+1
|
||||
}
|
||||
default:
|
||||
panic("unreachable")
|
||||
}
|
||||
@@ -331,6 +358,16 @@ func tJSDelimited(c context, s []byte) (context, int) {
|
||||
inCharset = true
|
||||
case ']':
|
||||
inCharset = false
|
||||
case '/':
|
||||
// If "</script" appears in a regex literal, the '/' should not
|
||||
// close the regex literal, and it will later be escaped to
|
||||
// "\x3C/script" in escapeText.
|
||||
if i > 0 && i+7 <= len(s) && bytes.Compare(bytes.ToLower(s[i-1:i+7]), []byte("</script")) == 0 {
|
||||
i++
|
||||
} else if !inCharset {
|
||||
c.state, c.jsCtx = stateJS, jsCtxDivOp
|
||||
return c, i + 1
|
||||
}
|
||||
default:
|
||||
// end delimiter
|
||||
if !inCharset {
|
||||
@@ -372,12 +409,12 @@ func tBlockCmt(c context, s []byte) (context, int) {
|
||||
return c, i + 2
|
||||
}
|
||||
|
||||
// tLineCmt is the context transition function for //comment states.
|
||||
// tLineCmt is the context transition function for //comment states, and the JS HTML-like comment state.
|
||||
func tLineCmt(c context, s []byte) (context, int) {
|
||||
var lineTerminators string
|
||||
var endState state
|
||||
switch c.state {
|
||||
case stateJSLineCmt:
|
||||
case stateJSLineCmt, stateJSHTMLOpenCmt, stateJSHTMLCloseCmt:
|
||||
lineTerminators, endState = "\n\r\u2028\u2029", stateJS
|
||||
case stateCSSLineCmt:
|
||||
lineTerminators, endState = "\n\f\r", stateCSS
|
||||
|
||||
@@ -42,6 +42,7 @@ var All = []Info{
|
||||
{Name: "panicnil", Package: "runtime", Changed: 21, Old: "1"},
|
||||
{Name: "randautoseed", Package: "math/rand"},
|
||||
{Name: "tarinsecurepath", Package: "archive/tar"},
|
||||
{Name: "tlsmaxrsasize", Package: "crypto/tls"},
|
||||
{Name: "x509sha1", Package: "crypto/x509"},
|
||||
{Name: "x509usefallbackroots", Package: "crypto/x509"},
|
||||
{Name: "zipinsecurepath", Package: "archive/zip"},
|
||||
|
||||
@@ -375,25 +375,5 @@ func ErrorLoadingGetTempPath2() error {
|
||||
|
||||
//sys RtlGenRandom(buf []byte) (err error) = advapi32.SystemFunction036
|
||||
|
||||
type FILE_ID_BOTH_DIR_INFO struct {
|
||||
NextEntryOffset uint32
|
||||
FileIndex uint32
|
||||
CreationTime syscall.Filetime
|
||||
LastAccessTime syscall.Filetime
|
||||
LastWriteTime syscall.Filetime
|
||||
ChangeTime syscall.Filetime
|
||||
EndOfFile uint64
|
||||
AllocationSize uint64
|
||||
FileAttributes uint32
|
||||
FileNameLength uint32
|
||||
EaSize uint32
|
||||
ShortNameLength uint32
|
||||
ShortName [12]uint16
|
||||
FileID uint64
|
||||
FileName [1]uint16
|
||||
}
|
||||
|
||||
//sys GetVolumeInformationByHandle(file syscall.Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) = GetVolumeInformationByHandleW
|
||||
|
||||
//sys RtlLookupFunctionEntry(pc uintptr, baseAddress *uintptr, table *byte) (ret uintptr) = kernel32.RtlLookupFunctionEntry
|
||||
//sys RtlVirtualUnwind(handlerType uint32, baseAddress uintptr, pc uintptr, entry uintptr, ctxt uintptr, data *uintptr, frame *uintptr, ctxptrs *byte) (ret uintptr) = kernel32.RtlVirtualUnwind
|
||||
|
||||
@@ -45,43 +45,42 @@ var (
|
||||
moduserenv = syscall.NewLazyDLL(sysdll.Add("userenv.dll"))
|
||||
modws2_32 = syscall.NewLazyDLL(sysdll.Add("ws2_32.dll"))
|
||||
|
||||
procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges")
|
||||
procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx")
|
||||
procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf")
|
||||
procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
|
||||
procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken")
|
||||
procRevertToSelf = modadvapi32.NewProc("RevertToSelf")
|
||||
procSetTokenInformation = modadvapi32.NewProc("SetTokenInformation")
|
||||
procSystemFunction036 = modadvapi32.NewProc("SystemFunction036")
|
||||
procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses")
|
||||
procCreateEventW = modkernel32.NewProc("CreateEventW")
|
||||
procGetACP = modkernel32.NewProc("GetACP")
|
||||
procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW")
|
||||
procGetConsoleCP = modkernel32.NewProc("GetConsoleCP")
|
||||
procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
|
||||
procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx")
|
||||
procGetFinalPathNameByHandleW = modkernel32.NewProc("GetFinalPathNameByHandleW")
|
||||
procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW")
|
||||
procGetTempPath2W = modkernel32.NewProc("GetTempPath2W")
|
||||
procGetVolumeInformationByHandleW = modkernel32.NewProc("GetVolumeInformationByHandleW")
|
||||
procLockFileEx = modkernel32.NewProc("LockFileEx")
|
||||
procModule32FirstW = modkernel32.NewProc("Module32FirstW")
|
||||
procModule32NextW = modkernel32.NewProc("Module32NextW")
|
||||
procMoveFileExW = modkernel32.NewProc("MoveFileExW")
|
||||
procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar")
|
||||
procRtlLookupFunctionEntry = modkernel32.NewProc("RtlLookupFunctionEntry")
|
||||
procRtlVirtualUnwind = modkernel32.NewProc("RtlVirtualUnwind")
|
||||
procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle")
|
||||
procUnlockFileEx = modkernel32.NewProc("UnlockFileEx")
|
||||
procVirtualQuery = modkernel32.NewProc("VirtualQuery")
|
||||
procNetShareAdd = modnetapi32.NewProc("NetShareAdd")
|
||||
procNetShareDel = modnetapi32.NewProc("NetShareDel")
|
||||
procNetUserGetLocalGroups = modnetapi32.NewProc("NetUserGetLocalGroups")
|
||||
procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo")
|
||||
procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock")
|
||||
procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock")
|
||||
procGetProfilesDirectoryW = moduserenv.NewProc("GetProfilesDirectoryW")
|
||||
procWSASocketW = modws2_32.NewProc("WSASocketW")
|
||||
procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges")
|
||||
procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx")
|
||||
procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf")
|
||||
procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
|
||||
procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken")
|
||||
procRevertToSelf = modadvapi32.NewProc("RevertToSelf")
|
||||
procSetTokenInformation = modadvapi32.NewProc("SetTokenInformation")
|
||||
procSystemFunction036 = modadvapi32.NewProc("SystemFunction036")
|
||||
procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses")
|
||||
procCreateEventW = modkernel32.NewProc("CreateEventW")
|
||||
procGetACP = modkernel32.NewProc("GetACP")
|
||||
procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW")
|
||||
procGetConsoleCP = modkernel32.NewProc("GetConsoleCP")
|
||||
procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
|
||||
procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx")
|
||||
procGetFinalPathNameByHandleW = modkernel32.NewProc("GetFinalPathNameByHandleW")
|
||||
procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW")
|
||||
procGetTempPath2W = modkernel32.NewProc("GetTempPath2W")
|
||||
procLockFileEx = modkernel32.NewProc("LockFileEx")
|
||||
procModule32FirstW = modkernel32.NewProc("Module32FirstW")
|
||||
procModule32NextW = modkernel32.NewProc("Module32NextW")
|
||||
procMoveFileExW = modkernel32.NewProc("MoveFileExW")
|
||||
procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar")
|
||||
procRtlLookupFunctionEntry = modkernel32.NewProc("RtlLookupFunctionEntry")
|
||||
procRtlVirtualUnwind = modkernel32.NewProc("RtlVirtualUnwind")
|
||||
procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle")
|
||||
procUnlockFileEx = modkernel32.NewProc("UnlockFileEx")
|
||||
procVirtualQuery = modkernel32.NewProc("VirtualQuery")
|
||||
procNetShareAdd = modnetapi32.NewProc("NetShareAdd")
|
||||
procNetShareDel = modnetapi32.NewProc("NetShareDel")
|
||||
procNetUserGetLocalGroups = modnetapi32.NewProc("NetUserGetLocalGroups")
|
||||
procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo")
|
||||
procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock")
|
||||
procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock")
|
||||
procGetProfilesDirectoryW = moduserenv.NewProc("GetProfilesDirectoryW")
|
||||
procWSASocketW = modws2_32.NewProc("WSASocketW")
|
||||
)
|
||||
|
||||
func adjustTokenPrivileges(token syscall.Token, disableAllPrivileges bool, newstate *TOKEN_PRIVILEGES, buflen uint32, prevstate *TOKEN_PRIVILEGES, returnlen *uint32) (ret uint32, err error) {
|
||||
@@ -242,14 +241,6 @@ func GetTempPath2(buflen uint32, buf *uint16) (n uint32, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func GetVolumeInformationByHandle(file syscall.Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall9(procGetVolumeInformationByHandleW.Addr(), 8, uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0)
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func LockFileEx(file syscall.Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *syscall.Overlapped) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)))
|
||||
if r1 == 0 {
|
||||
|
||||
9
src/internal/types/testdata/fixedbugs/issue61486.go
vendored
Normal file
9
src/internal/types/testdata/fixedbugs/issue61486.go
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package p
|
||||
|
||||
func _(s uint) {
|
||||
_ = min(1 << s)
|
||||
}
|
||||
57
src/internal/types/testdata/fixedbugs/issue61879.go
vendored
Normal file
57
src/internal/types/testdata/fixedbugs/issue61879.go
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package p
|
||||
|
||||
import "fmt"
|
||||
|
||||
type Interface[T any] interface {
|
||||
m(Interface[T])
|
||||
}
|
||||
|
||||
func f[S []Interface[T], T any](S) {}
|
||||
|
||||
func _() {
|
||||
var s []Interface[int]
|
||||
f(s) // panic here
|
||||
}
|
||||
|
||||
// Larger example from issue
|
||||
|
||||
type InterfaceA[T comparable] interface {
|
||||
setData(string) InterfaceA[T]
|
||||
}
|
||||
|
||||
type ImplA[T comparable] struct {
|
||||
data string
|
||||
args []any
|
||||
}
|
||||
|
||||
func NewInterfaceA[T comparable](args ...any) InterfaceA[T] {
|
||||
return &ImplA[T]{
|
||||
data: fmt.Sprintf("%v", args...),
|
||||
args: args,
|
||||
}
|
||||
}
|
||||
|
||||
func (k *ImplA[T]) setData(data string) InterfaceA[T] {
|
||||
k.data = data
|
||||
return k
|
||||
}
|
||||
|
||||
func Foo[M ~map[InterfaceA[T]]V, T comparable, V any](m M) {
|
||||
// DO SOMETHING HERE
|
||||
return
|
||||
}
|
||||
|
||||
func Bar() {
|
||||
keys := make([]InterfaceA[int], 0, 10)
|
||||
m := make(map[InterfaceA[int]]int)
|
||||
for i := 0; i < 10; i++ {
|
||||
keys = append(keys, NewInterfaceA[int](i))
|
||||
m[keys[i]] = i
|
||||
}
|
||||
|
||||
Foo(m) // panic here
|
||||
}
|
||||
20
src/internal/types/testdata/fixedbugs/issue61903.go
vendored
Normal file
20
src/internal/types/testdata/fixedbugs/issue61903.go
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// -lang=go1.20
|
||||
|
||||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package p
|
||||
|
||||
type T[P any] interface{}
|
||||
|
||||
func f1[P any](T[P]) {}
|
||||
func f2[P any](T[P], P) {}
|
||||
|
||||
func _() {
|
||||
var t T[int]
|
||||
f1(t)
|
||||
|
||||
var s string
|
||||
f2(t, s /* ERROR "type string of s does not match inferred type int for P" */)
|
||||
}
|
||||
128
src/internal/types/testdata/fixedbugs/issue62157.go
vendored
Normal file
128
src/internal/types/testdata/fixedbugs/issue62157.go
vendored
Normal file
@@ -0,0 +1,128 @@
|
||||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package p
|
||||
|
||||
func f[T any](...T) T { var x T; return x }
|
||||
|
||||
// Test case 1
|
||||
|
||||
func _() {
|
||||
var a chan string
|
||||
var b <-chan string
|
||||
f(a, b)
|
||||
f(b, a)
|
||||
}
|
||||
|
||||
// Test case 2
|
||||
|
||||
type F[T any] func(T) bool
|
||||
|
||||
func g[T any](T) F[<-chan T] { return nil }
|
||||
|
||||
func f1[T any](T, F[T]) {}
|
||||
func f2[T any](F[T], T) {}
|
||||
|
||||
func _() {
|
||||
var ch chan string
|
||||
f1(ch, g(""))
|
||||
f2(g(""), ch)
|
||||
}
|
||||
|
||||
// Test case 3: named and directional types combined
|
||||
|
||||
func _() {
|
||||
type namedA chan int
|
||||
type namedB chan<- int
|
||||
|
||||
var a chan int
|
||||
var A namedA
|
||||
var b chan<- int
|
||||
var B namedB
|
||||
|
||||
// Defined types win over channel types irrespective of channel direction.
|
||||
f(A, b /* ERROR "cannot use b (variable of type chan<- int) as namedA value in argument to f" */)
|
||||
f(b /* ERROR "cannot use b (variable of type chan<- int) as namedA value in argument to f" */, A)
|
||||
|
||||
f(a, b /* ERROR "cannot use b (variable of type chan<- int) as namedA value in argument to f" */, A)
|
||||
f(a, A, b /* ERROR "cannot use b (variable of type chan<- int) as namedA value in argument to f" */)
|
||||
f(b /* ERROR "cannot use b (variable of type chan<- int) as namedA value in argument to f" */, A, a)
|
||||
f(b /* ERROR "cannot use b (variable of type chan<- int) as namedA value in argument to f" */, a, A)
|
||||
f(A, a, b /* ERROR "cannot use b (variable of type chan<- int) as namedA value in argument to f" */)
|
||||
f(A, b /* ERROR "cannot use b (variable of type chan<- int) as namedA value in argument to f" */, a)
|
||||
|
||||
// Unnamed directed channels win over bidirectional channels.
|
||||
b = f(a, b)
|
||||
b = f(b, a)
|
||||
|
||||
// Defined directed channels win over defined bidirectional channels.
|
||||
A = f(A, a)
|
||||
A = f(a, A)
|
||||
B = f(B, b)
|
||||
B = f(b, B)
|
||||
|
||||
f(a, b, B)
|
||||
f(a, B, b)
|
||||
f(b, B, a)
|
||||
f(b, a, B)
|
||||
f(B, a, b)
|
||||
f(B, b, a)
|
||||
|
||||
// Differently named channel types conflict irrespective of channel direction.
|
||||
f(A, B /* ERROR "type namedB of B does not match inferred type namedA for T" */)
|
||||
f(B, A /* ERROR "type namedA of A does not match inferred type namedB for T" */)
|
||||
|
||||
// Ensure that all combinations of directional and
|
||||
// bidirectional channels with a named directional
|
||||
// channel lead to the correct (named) directional
|
||||
// channel.
|
||||
B = f(a, b)
|
||||
B = f(a, B)
|
||||
B = f(b, a)
|
||||
B = f(B, a)
|
||||
|
||||
B = f(a, b, B)
|
||||
B = f(a, B, b)
|
||||
B = f(b, B, a)
|
||||
B = f(b, a, B)
|
||||
B = f(B, a, b)
|
||||
B = f(B, b, a)
|
||||
|
||||
// verify type error
|
||||
A = f /* ERROR "cannot use f(B, b, a) (value of type namedB) as namedA value in assignment" */ (B, b, a)
|
||||
}
|
||||
|
||||
// Test case 4: some more combinations
|
||||
|
||||
func _() {
|
||||
type A chan int
|
||||
type B chan int
|
||||
type C = chan int
|
||||
type D = chan<- int
|
||||
|
||||
var a A
|
||||
var b B
|
||||
var c C
|
||||
var d D
|
||||
|
||||
f(a, b /* ERROR "type B of b does not match inferred type A for T" */, c)
|
||||
f(c, a, b /* ERROR "type B of b does not match inferred type A for T" */)
|
||||
f(a, b /* ERROR "type B of b does not match inferred type A for T" */, d)
|
||||
f(d, a, b /* ERROR "type B of b does not match inferred type A for T" */)
|
||||
}
|
||||
|
||||
// Simplified test case from issue
|
||||
|
||||
type Matcher[T any] func(T) bool
|
||||
|
||||
func Produces[T any](T) Matcher[<-chan T] { return nil }
|
||||
|
||||
func Assert1[T any](Matcher[T], T) {}
|
||||
func Assert2[T any](T, Matcher[T]) {}
|
||||
|
||||
func _() {
|
||||
var ch chan string
|
||||
Assert1(Produces(""), ch)
|
||||
Assert2(ch, Produces(""))
|
||||
}
|
||||
@@ -23,7 +23,6 @@ type Level int
|
||||
// First, we wanted the default level to be Info, Since Levels are ints, Info is
|
||||
// the default value for int, zero.
|
||||
//
|
||||
|
||||
// Second, we wanted to make it easy to use levels to specify logger verbosity.
|
||||
// Since a larger level means a more severe event, a logger that accepts events
|
||||
// with smaller (or more negative) level means a more verbose logger. Logger
|
||||
|
||||
@@ -5,34 +5,6 @@
|
||||
// Package maps defines various functions useful with maps of any type.
|
||||
package maps
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// keys is implemented in the runtime package.
|
||||
//
|
||||
//go:noescape
|
||||
func keys(m any, slice unsafe.Pointer)
|
||||
|
||||
// Keys returns the keys of the map m.
|
||||
// The keys will be in an indeterminate order.
|
||||
func Keys[M ~map[K]V, K comparable, V any](m M) []K {
|
||||
r := make([]K, 0, len(m))
|
||||
keys(m, unsafe.Pointer(&r))
|
||||
return r
|
||||
}
|
||||
|
||||
// values is implemented in the runtime package.
|
||||
//
|
||||
//go:noescape
|
||||
func values(m any, slice unsafe.Pointer)
|
||||
|
||||
// Values returns the values of the map m.
|
||||
// The values will be in an indeterminate order.
|
||||
func Values[M ~map[K]V, K comparable, V any](m M) []V {
|
||||
r := make([]V, 0, len(m))
|
||||
values(m, unsafe.Pointer(&r))
|
||||
return r
|
||||
}
|
||||
|
||||
// Equal reports whether two maps contain the same key/value pairs.
|
||||
// Values are compared using ==.
|
||||
func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool {
|
||||
|
||||
@@ -6,87 +6,13 @@ package maps
|
||||
|
||||
import (
|
||||
"math"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"testing"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var m1 = map[int]int{1: 2, 2: 4, 4: 8, 8: 16}
|
||||
var m2 = map[int]string{1: "2", 2: "4", 4: "8", 8: "16"}
|
||||
|
||||
func keysForBenchmarking[M ~map[K]V, K comparable, V any](m M, s []K) {
|
||||
keys(m, unsafe.Pointer(&s))
|
||||
}
|
||||
|
||||
func TestKeys(t *testing.T) {
|
||||
want := []int{1, 2, 4, 8}
|
||||
|
||||
got1 := Keys(m1)
|
||||
sort.Ints(got1)
|
||||
if !slices.Equal(got1, want) {
|
||||
t.Errorf("Keys(%v) = %v, want %v", m1, got1, want)
|
||||
}
|
||||
|
||||
got2 := Keys(m2)
|
||||
sort.Ints(got2)
|
||||
if !slices.Equal(got2, want) {
|
||||
t.Errorf("Keys(%v) = %v, want %v", m2, got2, want)
|
||||
}
|
||||
|
||||
// test for oldbucket code path
|
||||
// We grow from 128 to 256 buckets at size 832 (6.5 * 128).
|
||||
// Then we have to evacuate 128 buckets, which means we'll be done evacuation at 832+128=960 elements inserted.
|
||||
// so 840 is a good number to test for oldbucket code path.
|
||||
var want3 []int
|
||||
var m = make(map[int]int)
|
||||
for i := 0; i < 840; i++ {
|
||||
want3 = append(want3, i)
|
||||
m[i] = i * i
|
||||
}
|
||||
|
||||
got3 := Keys(m)
|
||||
sort.Ints(got3)
|
||||
if !slices.Equal(got3, want3) {
|
||||
t.Errorf("Keys(%v) = %v, want %v", m, got3, want3)
|
||||
}
|
||||
}
|
||||
|
||||
func valuesForBenchmarking[M ~map[K]V, K comparable, V any](m M, s []V) {
|
||||
values(m, unsafe.Pointer(&s))
|
||||
}
|
||||
|
||||
func TestValues(t *testing.T) {
|
||||
got1 := Values(m1)
|
||||
want1 := []int{2, 4, 8, 16}
|
||||
sort.Ints(got1)
|
||||
if !slices.Equal(got1, want1) {
|
||||
t.Errorf("Values(%v) = %v, want %v", m1, got1, want1)
|
||||
}
|
||||
|
||||
got2 := Values(m2)
|
||||
want2 := []string{"16", "2", "4", "8"}
|
||||
sort.Strings(got2)
|
||||
if !slices.Equal(got2, want2) {
|
||||
t.Errorf("Values(%v) = %v, want %v", m2, got2, want2)
|
||||
}
|
||||
|
||||
//test for oldbucket code path
|
||||
var want3 []int
|
||||
var m = make(map[int]int)
|
||||
for i := 0; i < 840; i++ {
|
||||
want3 = append(want3, i*i)
|
||||
m[i] = i * i
|
||||
}
|
||||
|
||||
got3 := Values(m)
|
||||
sort.Ints(got3)
|
||||
if !slices.Equal(got3, want3) {
|
||||
t.Errorf("Values(%v) = %v, want %v", m, got3, want3)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEqual(t *testing.T) {
|
||||
if !Equal(m1, m1) {
|
||||
t.Errorf("Equal(%v, %v) = false, want true", m1, m1)
|
||||
@@ -256,29 +182,3 @@ func TestCloneWithMapAssign(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkKeys(b *testing.B) {
|
||||
m := make(map[int]int, 1000000)
|
||||
for i := 0; i < 1000000; i++ {
|
||||
m[i] = i
|
||||
}
|
||||
b.ResetTimer()
|
||||
|
||||
slice := make([]int, 0, len(m))
|
||||
for i := 0; i < b.N; i++ {
|
||||
keysForBenchmarking(m, slice)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkValues(b *testing.B) {
|
||||
m := make(map[int]int, 1000000)
|
||||
for i := 0; i < 1000000; i++ {
|
||||
m[i] = i
|
||||
}
|
||||
b.ResetTimer()
|
||||
|
||||
slice := make([]int, 0, len(m))
|
||||
for i := 0; i < b.N; i++ {
|
||||
valuesForBenchmarking(m, slice)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -349,13 +349,8 @@ func serveContent(w ResponseWriter, r *Request, name string, modtime time.Time,
|
||||
|
||||
w.WriteHeader(code)
|
||||
|
||||
if r.Method != MethodHead {
|
||||
if sendSize == size {
|
||||
// use Copy in the non-range case to make use of WriterTo if available
|
||||
io.Copy(w, sendContent)
|
||||
} else {
|
||||
io.CopyN(w, sendContent, sendSize)
|
||||
}
|
||||
if r.Method != "HEAD" {
|
||||
io.CopyN(w, sendContent, sendSize)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -924,7 +924,6 @@ func testServeContent(t *testing.T, mode testMode) {
|
||||
wantContentType string
|
||||
wantContentRange string
|
||||
wantStatus int
|
||||
wantContent []byte
|
||||
}
|
||||
htmlModTime := mustStat(t, "testdata/index.html").ModTime()
|
||||
tests := map[string]testCase{
|
||||
@@ -1140,24 +1139,6 @@ func testServeContent(t *testing.T, mode testMode) {
|
||||
wantStatus: 412,
|
||||
wantLastMod: htmlModTime.UTC().Format(TimeFormat),
|
||||
},
|
||||
"uses_writeTo_if_available_and_non-range": {
|
||||
content: &panicOnNonWriterTo{seekWriterTo: strings.NewReader("foobar")},
|
||||
serveContentType: "text/plain; charset=utf-8",
|
||||
wantContentType: "text/plain; charset=utf-8",
|
||||
wantStatus: StatusOK,
|
||||
wantContent: []byte("foobar"),
|
||||
},
|
||||
"do_not_use_writeTo_for_range_requests": {
|
||||
content: &panicOnWriterTo{ReadSeeker: strings.NewReader("foobar")},
|
||||
serveContentType: "text/plain; charset=utf-8",
|
||||
reqHeader: map[string]string{
|
||||
"Range": "bytes=0-4",
|
||||
},
|
||||
wantContentType: "text/plain; charset=utf-8",
|
||||
wantContentRange: "bytes 0-4/6",
|
||||
wantStatus: StatusPartialContent,
|
||||
wantContent: []byte("fooba"),
|
||||
},
|
||||
}
|
||||
for testName, tt := range tests {
|
||||
var content io.ReadSeeker
|
||||
@@ -1171,8 +1152,7 @@ func testServeContent(t *testing.T, mode testMode) {
|
||||
} else {
|
||||
content = tt.content
|
||||
}
|
||||
contentOut := &strings.Builder{}
|
||||
for _, method := range []string{MethodGet, MethodHead} {
|
||||
for _, method := range []string{"GET", "HEAD"} {
|
||||
//restore content in case it is consumed by previous method
|
||||
if content, ok := content.(*strings.Reader); ok {
|
||||
content.Seek(0, io.SeekStart)
|
||||
@@ -1198,8 +1178,7 @@ func testServeContent(t *testing.T, mode testMode) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
contentOut.Reset()
|
||||
io.Copy(contentOut, res.Body)
|
||||
io.Copy(io.Discard, res.Body)
|
||||
res.Body.Close()
|
||||
if res.StatusCode != tt.wantStatus {
|
||||
t.Errorf("test %q using %q: got status = %d; want %d", testName, method, res.StatusCode, tt.wantStatus)
|
||||
@@ -1213,28 +1192,10 @@ func testServeContent(t *testing.T, mode testMode) {
|
||||
if g, e := res.Header.Get("Last-Modified"), tt.wantLastMod; g != e {
|
||||
t.Errorf("test %q using %q: got last-modified = %q, want %q", testName, method, g, e)
|
||||
}
|
||||
if g, e := contentOut.String(), tt.wantContent; e != nil && method == MethodGet && g != string(e) {
|
||||
t.Errorf("test %q using %q: got unexpected content %q, want %q", testName, method, g, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type seekWriterTo interface {
|
||||
io.Seeker
|
||||
io.WriterTo
|
||||
}
|
||||
|
||||
type panicOnNonWriterTo struct {
|
||||
io.Reader
|
||||
seekWriterTo
|
||||
}
|
||||
|
||||
type panicOnWriterTo struct {
|
||||
io.ReadSeeker
|
||||
io.WriterTo
|
||||
}
|
||||
|
||||
// Issue 12991
|
||||
func TestServerFileStatError(t *testing.T) {
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
@@ -591,8 +591,29 @@ func (r *Request) write(w io.Writer, usingProxy bool, extraHeaders Header, waitF
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Validate that the Host header is a valid header in general,
|
||||
// but don't validate the host itself. This is sufficient to avoid
|
||||
// header or request smuggling via the Host field.
|
||||
// The server can (and will, if it's a net/http server) reject
|
||||
// the request if it doesn't consider the host valid.
|
||||
if !httpguts.ValidHostHeader(host) {
|
||||
return errors.New("http: invalid Host header")
|
||||
// Historically, we would truncate the Host header after '/' or ' '.
|
||||
// Some users have relied on this truncation to convert a network
|
||||
// address such as Unix domain socket path into a valid, ignored
|
||||
// Host header (see https://go.dev/issue/61431).
|
||||
//
|
||||
// We don't preserve the truncation, because sending an altered
|
||||
// header field opens a smuggling vector. Instead, zero out the
|
||||
// Host header entirely if it isn't valid. (An empty Host is valid;
|
||||
// see RFC 9112 Section 3.2.)
|
||||
//
|
||||
// Return an error if we're sending to a proxy, since the proxy
|
||||
// probably can't do anything useful with an empty Host header.
|
||||
if !usingProxy {
|
||||
host = ""
|
||||
} else {
|
||||
return errors.New("http: invalid Host header")
|
||||
}
|
||||
}
|
||||
|
||||
// According to RFC 6874, an HTTP client, proxy, or other
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user