mirror of
https://github.com/golang/go.git
synced 2026-02-04 18:05:03 +03:00
Compare commits
82 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
69801b25b9 | ||
|
|
9d497df196 | ||
|
|
afa9b66ac0 | ||
|
|
2526187481 | ||
|
|
082365aa55 | ||
|
|
4be38528a6 | ||
|
|
525dd85363 | ||
|
|
ddcf27fc8c | ||
|
|
14f50f6e3e | ||
|
|
4e531b2f14 | ||
|
|
6f07a57145 | ||
|
|
ea603eea37 | ||
|
|
93f5d1c27e | ||
|
|
d5bfdcbc47 | ||
|
|
fefb02adf4 | ||
|
|
f7bce4bd6f | ||
|
|
287017aceb | ||
|
|
e1ce1bfa7f | ||
|
|
433c01e94e | ||
|
|
f2cd93aa05 | ||
|
|
83885f3c22 | ||
|
|
5ba37a3677 | ||
|
|
8097b1915f | ||
|
|
4942c74d04 | ||
|
|
cd21a7b31b | ||
|
|
bf95b76739 | ||
|
|
bbb7627123 | ||
|
|
7e049e5c31 | ||
|
|
f6db7350e1 | ||
|
|
28622c1959 | ||
|
|
e05b2c92d9 | ||
|
|
79ec0c94f3 | ||
|
|
bed6c81c2d | ||
|
|
2612dcfd3c | ||
|
|
90f72bd500 | ||
|
|
e0f655bf3f | ||
|
|
100c5a6680 | ||
|
|
f0c69db15a | ||
|
|
9fd3ac8a10 | ||
|
|
5d7a787aa2 | ||
|
|
930ce220d0 | ||
|
|
6a057327cf | ||
|
|
66f6feaa53 | ||
|
|
d6f2741248 | ||
|
|
28ac8d2104 | ||
|
|
06993c7721 | ||
|
|
0b53e410f8 | ||
|
|
7735dc90ed | ||
|
|
205d086595 | ||
|
|
16fdaac4b1 | ||
|
|
f3dc4aac0b | ||
|
|
79c3081b4b | ||
|
|
b816c79658 | ||
|
|
90de3b3399 | ||
|
|
bec452a3a2 | ||
|
|
57bd28ab7f | ||
|
|
f75bcffa4a | ||
|
|
7d570090a9 | ||
|
|
be61132165 | ||
|
|
a86792b169 | ||
|
|
879e3cb5f7 | ||
|
|
56ebf80e57 | ||
|
|
b1959cf6f7 | ||
|
|
cdd8cf4988 | ||
|
|
8995e84ac6 | ||
|
|
749dff880a | ||
|
|
21ac81c1e1 | ||
|
|
c72fcab6d6 | ||
|
|
6e676ab2b8 | ||
|
|
ac94297758 | ||
|
|
6961c3775f | ||
|
|
ebee011a54 | ||
|
|
84fb1b8253 | ||
|
|
c95d3093ca | ||
|
|
561964c9a8 | ||
|
|
e73dadc758 | ||
|
|
2899144b8d | ||
|
|
b062eb46e8 | ||
|
|
8ac5714ef2 | ||
|
|
9546293d22 | ||
|
|
4b3a0b9785 | ||
|
|
5abb1d84f8 |
@@ -1 +1,2 @@
|
||||
branch: master
|
||||
branch: release-branch.go1.25
|
||||
parent-branch: master
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
<!--{
|
||||
"Title": "The Go Programming Language Specification",
|
||||
"Subtitle": "Language version go1.25 (Feb 25, 2025)",
|
||||
"Subtitle": "Language version go1.25 (Aug 12, 2025)",
|
||||
"Path": "/ref/spec"
|
||||
}-->
|
||||
|
||||
|
||||
@@ -153,6 +153,23 @@ for example,
|
||||
see the [runtime documentation](/pkg/runtime#hdr-Environment_Variables)
|
||||
and the [go command documentation](/cmd/go#hdr-Build_and_test_caching).
|
||||
|
||||
### Go 1.26
|
||||
|
||||
Go 1.26 added a new `httpcookiemaxnum` setting that controls the maximum number
|
||||
of cookies that net/http will accept when parsing HTTP headers. If the number of
|
||||
cookie in a header exceeds the number set in `httpcookiemaxnum`, cookie parsing
|
||||
will fail early. The default value is `httpcookiemaxnum=3000`. Setting
|
||||
`httpcookiemaxnum=0` will allow the cookie parsing to accept an indefinite
|
||||
number of cookies. To avoid denial of service attacks, this setting and default
|
||||
was backported to Go 1.25.2 and Go 1.24.8.
|
||||
|
||||
Go 1.26 added a new `urlmaxqueryparams` setting that controls the maximum number
|
||||
of query parameters that net/url will accept when parsing a URL-encoded query string.
|
||||
If the number of parameters exceeds the number set in `urlmaxqueryparams`,
|
||||
parsing will fail early. The default value is `urlmaxqueryparams=10000`.
|
||||
Setting `urlmaxqueryparams=0`bles the limit. To avoid denial of service attacks,
|
||||
this setting and default was backported to Go 1.25.4 and Go 1.24.10.
|
||||
|
||||
### Go 1.25
|
||||
|
||||
Go 1.25 added a new `decoratemappings` setting that controls whether the Go
|
||||
|
||||
@@ -9,4 +9,4 @@
|
||||
#
|
||||
# go test cmd/go/internal/fips140 -update
|
||||
#
|
||||
v1.0.0.zip b50508feaeff05d22516b21e1fd210bbf5d6a1e422eaf2cfa23fe379342713b8
|
||||
v1.0.0-c2097c7c.zip daf3614e0406f67ae6323c902db3f953a1effb199142362a039e7526dfb9368b
|
||||
|
||||
@@ -1 +1 @@
|
||||
v1.0.0
|
||||
v1.0.0-c2097c7c
|
||||
|
||||
Binary file not shown.
1
lib/fips140/v1.0.0.txt
Normal file
1
lib/fips140/v1.0.0.txt
Normal file
@@ -0,0 +1 @@
|
||||
v1.0.0-c2097c7c
|
||||
@@ -39,6 +39,7 @@ var (
|
||||
errMissData = errors.New("archive/tar: sparse file references non-existent data")
|
||||
errUnrefData = errors.New("archive/tar: sparse file contains unreferenced data")
|
||||
errWriteHole = errors.New("archive/tar: write non-NUL byte in sparse hole")
|
||||
errSparseTooLong = errors.New("archive/tar: sparse map too long")
|
||||
)
|
||||
|
||||
type headerError []string
|
||||
|
||||
@@ -531,12 +531,17 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) {
|
||||
cntNewline int64
|
||||
buf bytes.Buffer
|
||||
blk block
|
||||
totalSize int
|
||||
)
|
||||
|
||||
// feedTokens copies data in blocks from r into buf until there are
|
||||
// at least cnt newlines in buf. It will not read more blocks than needed.
|
||||
feedTokens := func(n int64) error {
|
||||
for cntNewline < n {
|
||||
totalSize += len(blk)
|
||||
if totalSize > maxSpecialFileSize {
|
||||
return errSparseTooLong
|
||||
}
|
||||
if _, err := mustReadFull(r, blk[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -569,8 +574,8 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) {
|
||||
}
|
||||
|
||||
// Parse for all member entries.
|
||||
// numEntries is trusted after this since a potential attacker must have
|
||||
// committed resources proportional to what this library used.
|
||||
// numEntries is trusted after this since feedTokens limits the number of
|
||||
// tokens based on maxSpecialFileSize.
|
||||
if err := feedTokens(2 * numEntries); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -621,6 +621,11 @@ func TestReader(t *testing.T) {
|
||||
},
|
||||
Format: FormatPAX,
|
||||
}},
|
||||
}, {
|
||||
// Small compressed file that uncompresses to
|
||||
// a file with a very large GNU 1.0 sparse map.
|
||||
file: "testdata/gnu-sparse-many-zeros.tar.bz2",
|
||||
err: errSparseTooLong,
|
||||
}}
|
||||
|
||||
for _, v := range vectors {
|
||||
|
||||
BIN
src/archive/tar/testdata/gnu-sparse-many-zeros.tar.bz2
vendored
Normal file
BIN
src/archive/tar/testdata/gnu-sparse-many-zeros.tar.bz2
vendored
Normal file
Binary file not shown.
@@ -834,7 +834,16 @@ func (r *Reader) initFileList() {
|
||||
continue
|
||||
}
|
||||
|
||||
for dir := path.Dir(name); dir != "."; dir = path.Dir(dir) {
|
||||
dir := name
|
||||
for {
|
||||
if idx := strings.LastIndex(dir, "/"); idx < 0 {
|
||||
break
|
||||
} else {
|
||||
dir = dir[:idx]
|
||||
}
|
||||
if dirs[dir] {
|
||||
break
|
||||
}
|
||||
dirs[dir] = true
|
||||
}
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"internal/obscuretestdata"
|
||||
"io"
|
||||
"io/fs"
|
||||
@@ -1876,3 +1877,83 @@ func TestBaseOffsetPlusOverflow(t *testing.T) {
|
||||
// as the section reader offset & size were < 0.
|
||||
NewReader(bytes.NewReader(data), int64(len(data))+1875)
|
||||
}
|
||||
|
||||
func BenchmarkReaderOneDeepDir(b *testing.B) {
|
||||
var buf bytes.Buffer
|
||||
zw := NewWriter(&buf)
|
||||
|
||||
for i := range 4000 {
|
||||
name := strings.Repeat("a/", i) + "data"
|
||||
zw.CreateHeader(&FileHeader{
|
||||
Name: name,
|
||||
Method: Store,
|
||||
})
|
||||
}
|
||||
|
||||
if err := zw.Close(); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
data := buf.Bytes()
|
||||
|
||||
for b.Loop() {
|
||||
zr, err := NewReader(bytes.NewReader(data), int64(len(data)))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
zr.Open("does-not-exist")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReaderManyDeepDirs(b *testing.B) {
|
||||
var buf bytes.Buffer
|
||||
zw := NewWriter(&buf)
|
||||
|
||||
for i := range 2850 {
|
||||
name := fmt.Sprintf("%x", i)
|
||||
name = strings.Repeat("/"+name, i+1)[1:]
|
||||
|
||||
zw.CreateHeader(&FileHeader{
|
||||
Name: name,
|
||||
Method: Store,
|
||||
})
|
||||
}
|
||||
|
||||
if err := zw.Close(); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
data := buf.Bytes()
|
||||
|
||||
for b.Loop() {
|
||||
zr, err := NewReader(bytes.NewReader(data), int64(len(data)))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
zr.Open("does-not-exist")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReaderManyShallowFiles(b *testing.B) {
|
||||
var buf bytes.Buffer
|
||||
zw := NewWriter(&buf)
|
||||
|
||||
for i := range 310000 {
|
||||
name := fmt.Sprintf("%v", i)
|
||||
zw.CreateHeader(&FileHeader{
|
||||
Name: name,
|
||||
Method: Store,
|
||||
})
|
||||
}
|
||||
|
||||
if err := zw.Close(); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
data := buf.Bytes()
|
||||
|
||||
for b.Loop() {
|
||||
zr, err := NewReader(bytes.NewReader(data), int64(len(data)))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
zr.Open("does-not-exist")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -128,14 +128,29 @@ func Info(ctxt *obj.Link, fnsym *obj.LSym, infosym *obj.LSym, curfn obj.Func) (s
|
||||
// already referenced by a dwarf var, attach an R_USETYPE relocation to
|
||||
// the function symbol to insure that the type included in DWARF
|
||||
// processing during linking.
|
||||
// Do the same with R_USEIFACE relocations from the function symbol for the
|
||||
// same reason.
|
||||
// All these R_USETYPE relocations are only looked at if the function
|
||||
// survives deadcode elimination in the linker.
|
||||
typesyms := []*obj.LSym{}
|
||||
for t := range fnsym.Func().Autot {
|
||||
typesyms = append(typesyms, t)
|
||||
}
|
||||
for i := range fnsym.R {
|
||||
if fnsym.R[i].Type == objabi.R_USEIFACE && !strings.HasPrefix(fnsym.R[i].Sym.Name, "go:itab.") {
|
||||
// Types referenced through itab will be referenced from somewhere else
|
||||
typesyms = append(typesyms, fnsym.R[i].Sym)
|
||||
}
|
||||
}
|
||||
slices.SortFunc(typesyms, func(a, b *obj.LSym) int {
|
||||
return strings.Compare(a.Name, b.Name)
|
||||
})
|
||||
var lastsym *obj.LSym
|
||||
for _, sym := range typesyms {
|
||||
if sym == lastsym {
|
||||
continue
|
||||
}
|
||||
lastsym = sym
|
||||
infosym.AddRel(ctxt, obj.Reloc{Type: objabi.R_USETYPE, Sym: sym})
|
||||
}
|
||||
fnsym.Func().Autot = nil
|
||||
|
||||
@@ -49,9 +49,6 @@ type pkgReader struct {
|
||||
// but bitwise inverted so we can detect if we're missing the entry
|
||||
// or not.
|
||||
newindex []index
|
||||
|
||||
// indicates whether the data is reading during reshaping.
|
||||
reshaping bool
|
||||
}
|
||||
|
||||
func newPkgReader(pr pkgbits.PkgDecoder) *pkgReader {
|
||||
@@ -119,10 +116,6 @@ type reader struct {
|
||||
// find parameters/results.
|
||||
funarghack bool
|
||||
|
||||
// reshaping is used during reading exprReshape code, preventing
|
||||
// the reader from shapifying the re-shaped type.
|
||||
reshaping bool
|
||||
|
||||
// methodSym is the name of method's name, if reading a method.
|
||||
// It's nil if reading a normal function or closure body.
|
||||
methodSym *types.Sym
|
||||
@@ -937,8 +930,19 @@ func shapify(targ *types.Type, basic bool) *types.Type {
|
||||
// types, and discarding struct field names and tags. However, we'll
|
||||
// need to start tracking how type parameters are actually used to
|
||||
// implement some of these optimizations.
|
||||
pointerShaping := basic && targ.IsPtr() && !targ.Elem().NotInHeap()
|
||||
// The exception is when the type parameter is a pointer to a type
|
||||
// which `Type.HasShape()` returns true, but `Type.IsShape()` returns
|
||||
// false, like `*[]go.shape.T`. This is because the type parameter is
|
||||
// used to instantiate a generic function inside another generic function.
|
||||
// In this case, we want to keep the targ as-is, otherwise, we may lose the
|
||||
// original type after `*[]go.shape.T` is shapified to `*go.shape.uint8`.
|
||||
// See issue #54535, #71184.
|
||||
if pointerShaping && !targ.Elem().IsShape() && targ.Elem().HasShape() {
|
||||
return targ
|
||||
}
|
||||
under := targ.Underlying()
|
||||
if basic && targ.IsPtr() && !targ.Elem().NotInHeap() {
|
||||
if pointerShaping {
|
||||
under = types.NewPtr(types.Types[types.TUINT8])
|
||||
}
|
||||
|
||||
@@ -1014,25 +1018,7 @@ func (pr *pkgReader) objDictIdx(sym *types.Sym, idx index, implicits, explicits
|
||||
// arguments.
|
||||
for i, targ := range dict.targs {
|
||||
basic := r.Bool()
|
||||
isPointerShape := basic && targ.IsPtr() && !targ.Elem().NotInHeap()
|
||||
// We should not do shapify during the reshaping process, see #71184.
|
||||
// However, this only matters for shapify a pointer type, which will
|
||||
// lose the original underlying type.
|
||||
//
|
||||
// Example with a pointer type:
|
||||
//
|
||||
// - First, shapifying *[]T -> *uint8
|
||||
// - During the reshaping process, *uint8 is shapified to *go.shape.uint8
|
||||
// - This ends up with a different type with the original *[]T
|
||||
//
|
||||
// For a non-pointer type:
|
||||
//
|
||||
// - int -> go.shape.int
|
||||
// - go.shape.int -> go.shape.int
|
||||
//
|
||||
// We always end up with the identical type.
|
||||
canShapify := !pr.reshaping || !isPointerShape
|
||||
if dict.shaped && canShapify {
|
||||
if dict.shaped {
|
||||
dict.targs[i] = shapify(targ, basic)
|
||||
}
|
||||
}
|
||||
@@ -2470,10 +2456,7 @@ func (r *reader) expr() (res ir.Node) {
|
||||
|
||||
case exprReshape:
|
||||
typ := r.typ()
|
||||
old := r.reshaping
|
||||
r.reshaping = true
|
||||
x := r.expr()
|
||||
r.reshaping = old
|
||||
|
||||
if types.IdenticalStrict(x.Type(), typ) {
|
||||
return x
|
||||
@@ -2596,10 +2579,7 @@ func (r *reader) funcInst(pos src.XPos) (wrapperFn, baseFn, dictPtr ir.Node) {
|
||||
info := r.dict.subdicts[idx]
|
||||
explicits := r.p.typListIdx(info.explicits, r.dict)
|
||||
|
||||
old := r.p.reshaping
|
||||
r.p.reshaping = r.reshaping
|
||||
baseFn = r.p.objIdx(info.idx, implicits, explicits, true).(*ir.Name)
|
||||
r.p.reshaping = old
|
||||
|
||||
// TODO(mdempsky): Is there a more robust way to get the
|
||||
// dictionary pointer type here?
|
||||
|
||||
@@ -2555,7 +2555,7 @@ func rewriteStructStore(v *Value) *Value {
|
||||
|
||||
// isDirectType reports whether v represents a type
|
||||
// (a *runtime._type) whose value is stored directly in an
|
||||
// interface (i.e., is pointer or pointer-like).
|
||||
// interface (i.e., is pointer or pointer-like) and is comparable.
|
||||
func isDirectType(v *Value) bool {
|
||||
return isDirectType1(v)
|
||||
}
|
||||
@@ -2571,7 +2571,8 @@ func isDirectType1(v *Value) bool {
|
||||
return false
|
||||
}
|
||||
if ti, ok := (*lsym.Extra).(*obj.TypeInfo); ok {
|
||||
return types.IsDirectIface(ti.Type.(*types.Type))
|
||||
t := ti.Type.(*types.Type)
|
||||
return types.IsDirectIface(t) && types.IsComparable(t)
|
||||
}
|
||||
}
|
||||
return false
|
||||
@@ -2588,7 +2589,7 @@ func isDirectType2(v *Value) bool {
|
||||
|
||||
// isDirectIface reports whether v represents an itab
|
||||
// (a *runtime._itab) for a type whose value is stored directly
|
||||
// in an interface (i.e., is pointer or pointer-like).
|
||||
// in an interface (i.e., is pointer or pointer-like) and is comparable.
|
||||
func isDirectIface(v *Value) bool {
|
||||
return isDirectIface1(v, 9)
|
||||
}
|
||||
@@ -2607,7 +2608,8 @@ func isDirectIface1(v *Value, depth int) bool {
|
||||
return false
|
||||
}
|
||||
if ii, ok := (*lsym.Extra).(*obj.ItabInfo); ok {
|
||||
return types.IsDirectIface(ii.Type.(*types.Type))
|
||||
t := ii.Type.(*types.Type)
|
||||
return types.IsDirectIface(t) && types.IsComparable(t)
|
||||
}
|
||||
case OpConstNil:
|
||||
// We can treat this as direct, because if the itab is
|
||||
|
||||
@@ -511,6 +511,10 @@ func (t *worklist) propagate(block *Block) {
|
||||
branchIdx = 1 - condLattice.val.AuxInt
|
||||
} else {
|
||||
branchIdx = condLattice.val.AuxInt
|
||||
if branchIdx < 0 || branchIdx >= int64(len(block.Succs)) {
|
||||
// unreachable code, do nothing then
|
||||
break
|
||||
}
|
||||
}
|
||||
t.edges = append(t.edges, block.Succs[branchIdx])
|
||||
} else {
|
||||
|
||||
@@ -124,18 +124,21 @@ func tighten(f *Func) {
|
||||
|
||||
// If the target location is inside a loop,
|
||||
// move the target location up to just before the loop head.
|
||||
for _, b := range f.Blocks {
|
||||
origloop := loops.b2l[b.ID]
|
||||
for _, v := range b.Values {
|
||||
t := target[v.ID]
|
||||
if t == nil {
|
||||
continue
|
||||
}
|
||||
targetloop := loops.b2l[t.ID]
|
||||
for targetloop != nil && (origloop == nil || targetloop.depth > origloop.depth) {
|
||||
t = idom[targetloop.header.ID]
|
||||
target[v.ID] = t
|
||||
targetloop = loops.b2l[t.ID]
|
||||
if !loops.hasIrreducible {
|
||||
// Loop info might not be correct for irreducible loops. See issue 75569.
|
||||
for _, b := range f.Blocks {
|
||||
origloop := loops.b2l[b.ID]
|
||||
for _, v := range b.Values {
|
||||
t := target[v.ID]
|
||||
if t == nil {
|
||||
continue
|
||||
}
|
||||
targetloop := loops.b2l[t.ID]
|
||||
for targetloop != nil && (origloop == nil || targetloop.depth > origloop.depth) {
|
||||
t = idom[targetloop.header.ID]
|
||||
target[v.ID] = t
|
||||
targetloop = loops.b2l[t.ID]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
78
src/cmd/compile/testdata/script/issue75461.txt
vendored
Normal file
78
src/cmd/compile/testdata/script/issue75461.txt
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
go build main.go
|
||||
! stdout .
|
||||
! stderr .
|
||||
|
||||
-- main.go --
|
||||
package main
|
||||
|
||||
import (
|
||||
"demo/registry"
|
||||
)
|
||||
|
||||
func main() {
|
||||
_ = registry.NewUserRegistry()
|
||||
}
|
||||
|
||||
-- go.mod --
|
||||
module demo
|
||||
|
||||
go 1.24
|
||||
|
||||
-- model/user.go --
|
||||
package model
|
||||
|
||||
type User struct {
|
||||
ID int
|
||||
}
|
||||
|
||||
func (c *User) String() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
-- ordered/map.go --
|
||||
package ordered
|
||||
|
||||
type OrderedMap[K comparable, V any] struct {
|
||||
m map[K]V
|
||||
}
|
||||
|
||||
func New[K comparable, V any](options ...any) *OrderedMap[K, V] {
|
||||
orderedMap := &OrderedMap[K, V]{}
|
||||
return orderedMap
|
||||
}
|
||||
|
||||
-- registry/user.go --
|
||||
package registry
|
||||
|
||||
import (
|
||||
"demo/model"
|
||||
"demo/ordered"
|
||||
)
|
||||
|
||||
type baseRegistry = Registry[model.User, *model.User]
|
||||
|
||||
type UserRegistry struct {
|
||||
*baseRegistry
|
||||
}
|
||||
|
||||
type Registry[T any, P PStringer[T]] struct {
|
||||
m *ordered.OrderedMap[string, P]
|
||||
}
|
||||
|
||||
type PStringer[T any] interface {
|
||||
*T
|
||||
String() string
|
||||
}
|
||||
|
||||
func NewRegistry[T any, P PStringer[T]]() *Registry[T, P] {
|
||||
r := &Registry[T, P]{
|
||||
m: ordered.New[string, P](),
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func NewUserRegistry() *UserRegistry {
|
||||
return &UserRegistry{
|
||||
baseRegistry: NewRegistry[model.User](),
|
||||
}
|
||||
}
|
||||
@@ -27,10 +27,10 @@ import (
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/mod/module"
|
||||
"golang.org/x/mod/semver"
|
||||
modzip "golang.org/x/mod/zip"
|
||||
)
|
||||
|
||||
@@ -61,7 +61,7 @@ func main() {
|
||||
|
||||
// Must have valid version, and must not overwrite existing file.
|
||||
version := flag.Arg(0)
|
||||
if !regexp.MustCompile(`^v\d+\.\d+\.\d+$`).MatchString(version) {
|
||||
if semver.Canonical(version) != version {
|
||||
log.Fatalf("invalid version %q; must be vX.Y.Z", version)
|
||||
}
|
||||
if _, err := os.Stat(version + ".zip"); err == nil {
|
||||
@@ -117,7 +117,9 @@ func main() {
|
||||
if !bytes.Contains(contents, []byte(returnLine)) {
|
||||
log.Fatalf("did not find %q in fips140.go", returnLine)
|
||||
}
|
||||
newLine := `return "` + version + `"`
|
||||
// Use only the vX.Y.Z part of a possible vX.Y.Z-hash version.
|
||||
v, _, _ := strings.Cut(version, "-")
|
||||
newLine := `return "` + v + `"`
|
||||
contents = bytes.ReplaceAll(contents, []byte(returnLine), []byte(newLine))
|
||||
wf, err := zw.Create(f.Name)
|
||||
if err != nil {
|
||||
|
||||
@@ -109,6 +109,9 @@ func ModIsPrefix(path, vers string) bool {
|
||||
// The caller is assumed to have checked that ModIsValid(path, vers) is true.
|
||||
func ModIsPrerelease(path, vers string) bool {
|
||||
if IsToolchain(path) {
|
||||
if path == "toolchain" {
|
||||
return IsPrerelease(FromToolchain(vers))
|
||||
}
|
||||
return IsPrerelease(vers)
|
||||
}
|
||||
return semver.Prerelease(vers) != ""
|
||||
|
||||
@@ -321,7 +321,10 @@ func runEdit(ctx context.Context, cmd *base.Command, args []string) {
|
||||
|
||||
// parsePathVersion parses -flag=arg expecting arg to be path@version.
|
||||
func parsePathVersion(flag, arg string) (path, version string) {
|
||||
before, after, found := strings.Cut(arg, "@")
|
||||
before, after, found, err := modload.ParsePathVersion(arg)
|
||||
if err != nil {
|
||||
base.Fatalf("go: -%s=%s: %v", flag, arg, err)
|
||||
}
|
||||
if !found {
|
||||
base.Fatalf("go: -%s=%s: need path@version", flag, arg)
|
||||
}
|
||||
@@ -355,7 +358,10 @@ func parsePathVersionOptional(adj, arg string, allowDirPath bool) (path, version
|
||||
if allowDirPath && modfile.IsDirectoryPath(arg) {
|
||||
return arg, "", nil
|
||||
}
|
||||
before, after, found := strings.Cut(arg, "@")
|
||||
before, after, found, err := modload.ParsePathVersion(arg)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if !found {
|
||||
path = arg
|
||||
} else {
|
||||
|
||||
@@ -248,7 +248,7 @@ func (r *gitRepo) loadRefs(ctx context.Context) (map[string]string, error) {
|
||||
r.refsErr = err
|
||||
return
|
||||
}
|
||||
out, gitErr := r.runGit(ctx, "git", "ls-remote", "-q", r.remote)
|
||||
out, gitErr := r.runGit(ctx, "git", "ls-remote", "-q", "--end-of-options", r.remote)
|
||||
release()
|
||||
|
||||
if gitErr != nil {
|
||||
@@ -534,7 +534,7 @@ func (r *gitRepo) stat(ctx context.Context, rev string) (info *RevInfo, err erro
|
||||
if fromTag && !slices.Contains(info.Tags, tag) {
|
||||
// The local repo includes the commit hash we want, but it is missing
|
||||
// the corresponding tag. Add that tag and try again.
|
||||
_, err := r.runGit(ctx, "git", "tag", tag, hash)
|
||||
_, err := r.runGit(ctx, "git", "tag", "--end-of-options", tag, hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -583,7 +583,7 @@ func (r *gitRepo) stat(ctx context.Context, rev string) (info *RevInfo, err erro
|
||||
// an apparent Git bug introduced in Git 2.21 (commit 61c771),
|
||||
// which causes the handler for protocol version 1 to sometimes miss
|
||||
// tags that point to the requested commit (see https://go.dev/issue/56881).
|
||||
_, err = r.runGit(ctx, "git", "-c", "protocol.version=2", "fetch", "-f", "--depth=1", r.remote, refspec)
|
||||
_, err = r.runGit(ctx, "git", "-c", "protocol.version=2", "fetch", "-f", "--depth=1", "--end-of-options", r.remote, refspec)
|
||||
release()
|
||||
|
||||
if err == nil {
|
||||
@@ -629,12 +629,12 @@ func (r *gitRepo) fetchRefsLocked(ctx context.Context) error {
|
||||
}
|
||||
defer release()
|
||||
|
||||
if _, err := r.runGit(ctx, "git", "fetch", "-f", r.remote, "refs/heads/*:refs/heads/*", "refs/tags/*:refs/tags/*"); err != nil {
|
||||
if _, err := r.runGit(ctx, "git", "fetch", "-f", "--end-of-options", r.remote, "refs/heads/*:refs/heads/*", "refs/tags/*:refs/tags/*"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := os.Stat(filepath.Join(r.dir, "shallow")); err == nil {
|
||||
if _, err := r.runGit(ctx, "git", "fetch", "--unshallow", "-f", r.remote); err != nil {
|
||||
if _, err := r.runGit(ctx, "git", "fetch", "--unshallow", "-f", "--end-of-options", r.remote); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -647,7 +647,7 @@ func (r *gitRepo) fetchRefsLocked(ctx context.Context) error {
|
||||
// statLocal returns a new RevInfo describing rev in the local git repository.
|
||||
// It uses version as info.Version.
|
||||
func (r *gitRepo) statLocal(ctx context.Context, version, rev string) (*RevInfo, error) {
|
||||
out, err := r.runGit(ctx, "git", "-c", "log.showsignature=false", "log", "--no-decorate", "-n1", "--format=format:%H %ct %D", rev, "--")
|
||||
out, err := r.runGit(ctx, "git", "-c", "log.showsignature=false", "log", "--no-decorate", "-n1", "--format=format:%H %ct %D", "--end-of-options", rev, "--")
|
||||
if err != nil {
|
||||
// Return info with Origin.RepoSum if possible to allow caching of negative lookup.
|
||||
var info *RevInfo
|
||||
@@ -737,7 +737,7 @@ func (r *gitRepo) ReadFile(ctx context.Context, rev, file string, maxSize int64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out, err := r.runGit(ctx, "git", "cat-file", "blob", info.Name+":"+file)
|
||||
out, err := r.runGit(ctx, "git", "cat-file", "--end-of-options", "blob", info.Name+":"+file)
|
||||
if err != nil {
|
||||
return nil, fs.ErrNotExist
|
||||
}
|
||||
@@ -755,7 +755,7 @@ func (r *gitRepo) RecentTag(ctx context.Context, rev, prefix string, allowed fun
|
||||
// result is definitive.
|
||||
describe := func() (definitive bool) {
|
||||
var out []byte
|
||||
out, err = r.runGit(ctx, "git", "for-each-ref", "--format", "%(refname)", "refs/tags", "--merged", rev)
|
||||
out, err = r.runGit(ctx, "git", "for-each-ref", "--format=%(refname)", "--merged="+rev)
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
@@ -904,7 +904,7 @@ func (r *gitRepo) ReadZip(ctx context.Context, rev, subdir string, maxSize int64
|
||||
// TODO: Use maxSize or drop it.
|
||||
args := []string{}
|
||||
if subdir != "" {
|
||||
args = append(args, "--", subdir)
|
||||
args = append(args, subdir)
|
||||
}
|
||||
info, err := r.Stat(ctx, rev) // download rev into local git repo
|
||||
if err != nil {
|
||||
@@ -926,7 +926,7 @@ func (r *gitRepo) ReadZip(ctx context.Context, rev, subdir string, maxSize int64
|
||||
// text file line endings. Setting -c core.autocrlf=input means only
|
||||
// translate files on the way into the repo, not on the way out (archive).
|
||||
// The -c core.eol=lf should be unnecessary but set it anyway.
|
||||
archive, err := r.runGit(ctx, "git", "-c", "core.autocrlf=input", "-c", "core.eol=lf", "archive", "--format=zip", "--prefix=prefix/", info.Name, args)
|
||||
archive, err := r.runGit(ctx, "git", "-c", "core.autocrlf=input", "-c", "core.eol=lf", "archive", "--format=zip", "--prefix=prefix/", "--end-of-options", info.Name, args)
|
||||
if err != nil {
|
||||
if bytes.Contains(err.(*RunError).Stderr, []byte("did not match any files")) {
|
||||
return nil, fs.ErrNotExist
|
||||
|
||||
@@ -176,20 +176,20 @@ var vcsCmds = map[string]*vcsCmd{
|
||||
branchRE: re(`(?m)^[^\n]+$`),
|
||||
badLocalRevRE: re(`(?m)^(tip)$`),
|
||||
statLocal: func(rev, remote string) []string {
|
||||
return []string{"hg", "log", "-l1", "-r", rev, "--template", "{node} {date|hgdate} {tags}"}
|
||||
return []string{"hg", "log", "-l1", fmt.Sprintf("--rev=%s", rev), "--template", "{node} {date|hgdate} {tags}"}
|
||||
},
|
||||
parseStat: hgParseStat,
|
||||
fetch: []string{"hg", "pull", "-f"},
|
||||
latest: "tip",
|
||||
readFile: func(rev, file, remote string) []string {
|
||||
return []string{"hg", "cat", "-r", rev, file}
|
||||
return []string{"hg", "cat", fmt.Sprintf("--rev=%s", rev), "--", file}
|
||||
},
|
||||
readZip: func(rev, subdir, remote, target string) []string {
|
||||
pattern := []string{}
|
||||
if subdir != "" {
|
||||
pattern = []string{"-I", subdir + "/**"}
|
||||
pattern = []string{fmt.Sprintf("--include=%s", subdir+"/**")}
|
||||
}
|
||||
return str.StringList("hg", "archive", "-t", "zip", "--no-decode", "-r", rev, "--prefix=prefix/", pattern, "--", target)
|
||||
return str.StringList("hg", "archive", "-t", "zip", "--no-decode", fmt.Sprintf("--rev=%s", rev), "--prefix=prefix/", pattern, "--", target)
|
||||
},
|
||||
},
|
||||
|
||||
@@ -229,19 +229,19 @@ var vcsCmds = map[string]*vcsCmd{
|
||||
tagRE: re(`(?m)^\S+`),
|
||||
badLocalRevRE: re(`^revno:-`),
|
||||
statLocal: func(rev, remote string) []string {
|
||||
return []string{"bzr", "log", "-l1", "--long", "--show-ids", "-r", rev}
|
||||
return []string{"bzr", "log", "-l1", "--long", "--show-ids", fmt.Sprintf("--revision=%s", rev)}
|
||||
},
|
||||
parseStat: bzrParseStat,
|
||||
latest: "revno:-1",
|
||||
readFile: func(rev, file, remote string) []string {
|
||||
return []string{"bzr", "cat", "-r", rev, file}
|
||||
return []string{"bzr", "cat", fmt.Sprintf("--revision=%s", rev), "--", file}
|
||||
},
|
||||
readZip: func(rev, subdir, remote, target string) []string {
|
||||
extra := []string{}
|
||||
if subdir != "" {
|
||||
extra = []string{"./" + subdir}
|
||||
}
|
||||
return str.StringList("bzr", "export", "--format=zip", "-r", rev, "--root=prefix/", "--", target, extra)
|
||||
return str.StringList("bzr", "export", "--format=zip", fmt.Sprintf("--revision=%s", rev), "--root=prefix/", "--", target, extra)
|
||||
},
|
||||
},
|
||||
|
||||
@@ -256,17 +256,17 @@ var vcsCmds = map[string]*vcsCmd{
|
||||
},
|
||||
tagRE: re(`XXXTODO`),
|
||||
statLocal: func(rev, remote string) []string {
|
||||
return []string{"fossil", "info", "-R", ".fossil", rev}
|
||||
return []string{"fossil", "info", "-R", ".fossil", "--", rev}
|
||||
},
|
||||
parseStat: fossilParseStat,
|
||||
latest: "trunk",
|
||||
readFile: func(rev, file, remote string) []string {
|
||||
return []string{"fossil", "cat", "-R", ".fossil", "-r", rev, file}
|
||||
return []string{"fossil", "cat", "-R", ".fossil", fmt.Sprintf("-r=%s", rev), "--", file}
|
||||
},
|
||||
readZip: func(rev, subdir, remote, target string) []string {
|
||||
extra := []string{}
|
||||
if subdir != "" && !strings.ContainsAny(subdir, "*?[],") {
|
||||
extra = []string{"--include", subdir}
|
||||
extra = []string{fmt.Sprintf("--include=%s", subdir)}
|
||||
}
|
||||
// Note that vcsRepo.ReadZip below rewrites this command
|
||||
// to run in a different directory, to work around a fossil bug.
|
||||
|
||||
@@ -139,7 +139,10 @@ func errSet(err error) pathSet { return pathSet{err: err} }
|
||||
// newQuery returns a new query parsed from the raw argument,
|
||||
// which must be either path or path@version.
|
||||
func newQuery(raw string) (*query, error) {
|
||||
pattern, rawVers, found := strings.Cut(raw, "@")
|
||||
pattern, rawVers, found, err := modload.ParsePathVersion(raw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if found && (strings.Contains(rawVers, "@") || rawVers == "") {
|
||||
return nil, fmt.Errorf("invalid module version syntax %q", raw)
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"cmd/go/internal/base"
|
||||
"cmd/go/internal/cfg"
|
||||
@@ -88,7 +87,16 @@ func ModuleInfo(ctx context.Context, path string) *modinfo.ModulePublic {
|
||||
return nil
|
||||
}
|
||||
|
||||
if path, vers, found := strings.Cut(path, "@"); found {
|
||||
path, vers, found, err := ParsePathVersion(path)
|
||||
if err != nil {
|
||||
return &modinfo.ModulePublic{
|
||||
Path: path,
|
||||
Error: &modinfo.ModuleError{
|
||||
Err: err.Error(),
|
||||
},
|
||||
}
|
||||
}
|
||||
if found {
|
||||
m := module.Version{Path: path, Version: vers}
|
||||
return moduleInfo(ctx, nil, m, 0, nil)
|
||||
}
|
||||
|
||||
@@ -150,7 +150,11 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List
|
||||
}
|
||||
continue
|
||||
}
|
||||
if path, vers, found := strings.Cut(arg, "@"); found {
|
||||
path, vers, found, err := ParsePathVersion(arg)
|
||||
if err != nil {
|
||||
base.Fatalf("go: %v", err)
|
||||
}
|
||||
if found {
|
||||
if vers == "upgrade" || vers == "patch" {
|
||||
if _, ok := rs.rootSelected(path); !ok || rs.pruning == unpruned {
|
||||
needFullGraph = true
|
||||
@@ -176,7 +180,11 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List
|
||||
|
||||
matchedModule := map[module.Version]bool{}
|
||||
for _, arg := range args {
|
||||
if path, vers, found := strings.Cut(arg, "@"); found {
|
||||
path, vers, found, err := ParsePathVersion(arg)
|
||||
if err != nil {
|
||||
base.Fatalf("go: %v", err)
|
||||
}
|
||||
if found {
|
||||
var current string
|
||||
if mg == nil {
|
||||
current, _ = rs.rootSelected(path)
|
||||
@@ -319,3 +327,21 @@ func modinfoError(path, vers string, err error) *modinfo.ModuleError {
|
||||
|
||||
return &modinfo.ModuleError{Err: err.Error()}
|
||||
}
|
||||
|
||||
// ParsePathVersion parses arg expecting arg to be path@version. If there is no
|
||||
// '@' in arg, found is false, vers is "", and path is arg. This mirrors the
|
||||
// typical usage of strings.Cut. ParsePathVersion is meant to be a general
|
||||
// replacement for strings.Cut in module version parsing. If the version is
|
||||
// invalid, an error is returned. The version is considered invalid if it is
|
||||
// prefixed with '-' or '/', which can cause security problems when constructing
|
||||
// commands to execute that use the version.
|
||||
func ParsePathVersion(arg string) (path, vers string, found bool, err error) {
|
||||
path, vers, found = strings.Cut(arg, "@")
|
||||
if !found {
|
||||
return arg, "", false, nil
|
||||
}
|
||||
if len(vers) > 0 && (vers[0] == '-' || vers[0] == '/') {
|
||||
return "", "", false, fmt.Errorf("invalid module version %q", vers)
|
||||
}
|
||||
return path, vers, true, nil
|
||||
}
|
||||
|
||||
@@ -277,6 +277,29 @@ func loadModTool(ctx context.Context, name string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func builtTool(runAction *work.Action) string {
|
||||
linkAction := runAction.Deps[0]
|
||||
if toolN {
|
||||
// #72824: If -n is set, use the cached path if we can.
|
||||
// This is only necessary if the binary wasn't cached
|
||||
// before this invocation of the go command: if the binary
|
||||
// was cached, BuiltTarget() will be the cached executable.
|
||||
// It's only in the "first run", where we actually do the build
|
||||
// and save the result to the cache that BuiltTarget is not
|
||||
// the cached binary. Ideally, we would set BuiltTarget
|
||||
// to the cached path even in the first run, but if we
|
||||
// copy the binary to the cached path, and try to run it
|
||||
// in the same process, we'll run into the dreaded #22315
|
||||
// resulting in occasional ETXTBSYs. Instead of getting the
|
||||
// ETXTBSY and then retrying just don't use the cached path
|
||||
// on the first run if we're going to actually run the binary.
|
||||
if cached := linkAction.CachedExecutable(); cached != "" {
|
||||
return cached
|
||||
}
|
||||
}
|
||||
return linkAction.BuiltTarget()
|
||||
}
|
||||
|
||||
func buildAndRunBuiltinTool(ctx context.Context, toolName, tool string, args []string) {
|
||||
// Override GOOS and GOARCH for the build to build the tool using
|
||||
// the same GOOS and GOARCH as this go command.
|
||||
@@ -288,7 +311,7 @@ func buildAndRunBuiltinTool(ctx context.Context, toolName, tool string, args []s
|
||||
modload.RootMode = modload.NoRoot
|
||||
|
||||
runFunc := func(b *work.Builder, ctx context.Context, a *work.Action) error {
|
||||
cmdline := str.StringList(a.Deps[0].BuiltTarget(), a.Args)
|
||||
cmdline := str.StringList(builtTool(a), a.Args)
|
||||
return runBuiltTool(toolName, nil, cmdline)
|
||||
}
|
||||
|
||||
@@ -300,7 +323,7 @@ func buildAndRunModtool(ctx context.Context, toolName, tool string, args []strin
|
||||
// Use the ExecCmd to run the binary, as go run does. ExecCmd allows users
|
||||
// to provide a runner to run the binary, for example a simulator for binaries
|
||||
// that are cross-compiled to a different platform.
|
||||
cmdline := str.StringList(work.FindExecCmd(), a.Deps[0].BuiltTarget(), a.Args)
|
||||
cmdline := str.StringList(work.FindExecCmd(), builtTool(a), a.Args)
|
||||
// Use same environment go run uses to start the executable:
|
||||
// the original environment with cfg.GOROOTbin added to the path.
|
||||
env := slices.Clip(cfg.OrigEnv)
|
||||
|
||||
@@ -670,7 +670,10 @@ func maybeSwitchForGoInstallVersion(minVers string) {
|
||||
if !strings.Contains(pkgArg, "@") || build.IsLocalImport(pkgArg) || filepath.IsAbs(pkgArg) {
|
||||
return
|
||||
}
|
||||
path, version, _ := strings.Cut(pkgArg, "@")
|
||||
path, version, _, err := modload.ParsePathVersion(pkgArg)
|
||||
if err != nil {
|
||||
base.Fatalf("go: %v", err)
|
||||
}
|
||||
if path == "" || version == "" || gover.IsToolchain(path) {
|
||||
return
|
||||
}
|
||||
@@ -705,7 +708,7 @@ func maybeSwitchForGoInstallVersion(minVers string) {
|
||||
allowed = nil
|
||||
}
|
||||
noneSelected := func(path string) (version string) { return "none" }
|
||||
_, err := modload.QueryPackages(ctx, path, version, noneSelected, allowed)
|
||||
_, err = modload.QueryPackages(ctx, path, version, noneSelected, allowed)
|
||||
if errors.Is(err, gover.ErrTooNew) {
|
||||
// Run early switch, same one go install or go run would eventually do,
|
||||
// if it understood all the command-line flags.
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -41,20 +40,10 @@ type Cmd struct {
|
||||
Env []string // any environment values to set/override
|
||||
RootNames []rootName // filename and mode indicating the root of a checkout directory
|
||||
|
||||
CreateCmd []string // commands to download a fresh copy of a repository
|
||||
DownloadCmd []string // commands to download updates into an existing repository
|
||||
|
||||
TagCmd []tagCmd // commands to list tags
|
||||
TagLookupCmd []tagCmd // commands to lookup tags before running tagSyncCmd
|
||||
TagSyncCmd []string // commands to sync to specific tag
|
||||
TagSyncDefault []string // commands to sync to default tag
|
||||
|
||||
Scheme []string
|
||||
PingCmd string
|
||||
|
||||
RemoteRepo func(v *Cmd, rootDir string) (remoteRepo string, err error)
|
||||
ResolveRepo func(v *Cmd, rootDir, remoteRepo string) (realRepo string, err error)
|
||||
Status func(v *Cmd, rootDir string) (Status, error)
|
||||
Status func(v *Cmd, rootDir string) (Status, error)
|
||||
}
|
||||
|
||||
// Status is the current state of a local repository.
|
||||
@@ -157,40 +146,16 @@ var vcsHg = &Cmd{
|
||||
Name: "Mercurial",
|
||||
Cmd: "hg",
|
||||
|
||||
// HGPLAIN=1 turns off additional output that a user may have enabled via
|
||||
// config options or certain extensions.
|
||||
Env: []string{"HGPLAIN=1"},
|
||||
// HGPLAIN=+strictflags turns off additional output that a user may have
|
||||
// enabled via config options or certain extensions.
|
||||
Env: []string{"HGPLAIN=+strictflags"},
|
||||
RootNames: []rootName{
|
||||
{filename: ".hg", isDir: true},
|
||||
},
|
||||
|
||||
CreateCmd: []string{"clone -U -- {repo} {dir}"},
|
||||
DownloadCmd: []string{"pull"},
|
||||
|
||||
// We allow both tag and branch names as 'tags'
|
||||
// for selecting a version. This lets people have
|
||||
// a go.release.r60 branch and a go1 branch
|
||||
// and make changes in both, without constantly
|
||||
// editing .hgtags.
|
||||
TagCmd: []tagCmd{
|
||||
{"tags", `^(\S+)`},
|
||||
{"branches", `^(\S+)`},
|
||||
},
|
||||
TagSyncCmd: []string{"update -r {tag}"},
|
||||
TagSyncDefault: []string{"update default"},
|
||||
|
||||
Scheme: []string{"https", "http", "ssh"},
|
||||
PingCmd: "identify -- {scheme}://{repo}",
|
||||
RemoteRepo: hgRemoteRepo,
|
||||
Status: hgStatus,
|
||||
}
|
||||
|
||||
func hgRemoteRepo(vcsHg *Cmd, rootDir string) (remoteRepo string, err error) {
|
||||
out, err := vcsHg.runOutput(rootDir, "paths default")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(out)), nil
|
||||
Scheme: []string{"https", "http", "ssh"},
|
||||
PingCmd: "identify -- {scheme}://{repo}",
|
||||
Status: hgStatus,
|
||||
}
|
||||
|
||||
func hgStatus(vcsHg *Cmd, rootDir string) (Status, error) {
|
||||
@@ -253,25 +218,6 @@ var vcsGit = &Cmd{
|
||||
{filename: ".git", isDir: true},
|
||||
},
|
||||
|
||||
CreateCmd: []string{"clone -- {repo} {dir}", "-go-internal-cd {dir} submodule update --init --recursive"},
|
||||
DownloadCmd: []string{"pull --ff-only", "submodule update --init --recursive"},
|
||||
|
||||
TagCmd: []tagCmd{
|
||||
// tags/xxx matches a git tag named xxx
|
||||
// origin/xxx matches a git branch named xxx on the default remote repository
|
||||
{"show-ref", `(?:tags|origin)/(\S+)$`},
|
||||
},
|
||||
TagLookupCmd: []tagCmd{
|
||||
{"show-ref tags/{tag} origin/{tag}", `((?:tags|origin)/\S+)$`},
|
||||
},
|
||||
TagSyncCmd: []string{"checkout {tag}", "submodule update --init --recursive"},
|
||||
// both createCmd and downloadCmd update the working dir.
|
||||
// No need to do more here. We used to 'checkout master'
|
||||
// but that doesn't work if the default branch is not named master.
|
||||
// DO NOT add 'checkout master' here.
|
||||
// See golang.org/issue/9032.
|
||||
TagSyncDefault: []string{"submodule update --init --recursive"},
|
||||
|
||||
Scheme: []string{"git", "https", "http", "git+ssh", "ssh"},
|
||||
|
||||
// Leave out the '--' separator in the ls-remote command: git 2.7.4 does not
|
||||
@@ -280,54 +226,7 @@ var vcsGit = &Cmd{
|
||||
// See golang.org/issue/33836.
|
||||
PingCmd: "ls-remote {scheme}://{repo}",
|
||||
|
||||
RemoteRepo: gitRemoteRepo,
|
||||
Status: gitStatus,
|
||||
}
|
||||
|
||||
// scpSyntaxRe matches the SCP-like addresses used by Git to access
|
||||
// repositories by SSH.
|
||||
var scpSyntaxRe = lazyregexp.New(`^(\w+)@([\w.-]+):(.*)$`)
|
||||
|
||||
func gitRemoteRepo(vcsGit *Cmd, rootDir string) (remoteRepo string, err error) {
|
||||
const cmd = "config remote.origin.url"
|
||||
outb, err := vcsGit.run1(rootDir, cmd, nil, false)
|
||||
if err != nil {
|
||||
// if it doesn't output any message, it means the config argument is correct,
|
||||
// but the config value itself doesn't exist
|
||||
if outb != nil && len(outb) == 0 {
|
||||
return "", errors.New("remote origin not found")
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
out := strings.TrimSpace(string(outb))
|
||||
|
||||
var repoURL *urlpkg.URL
|
||||
if m := scpSyntaxRe.FindStringSubmatch(out); m != nil {
|
||||
// Match SCP-like syntax and convert it to a URL.
|
||||
// Eg, "git@github.com:user/repo" becomes
|
||||
// "ssh://git@github.com/user/repo".
|
||||
repoURL = &urlpkg.URL{
|
||||
Scheme: "ssh",
|
||||
User: urlpkg.User(m[1]),
|
||||
Host: m[2],
|
||||
Path: m[3],
|
||||
}
|
||||
} else {
|
||||
repoURL, err = urlpkg.Parse(out)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
// Iterate over insecure schemes too, because this function simply
|
||||
// reports the state of the repo. If we can't see insecure schemes then
|
||||
// we can't report the actual repo URL.
|
||||
for _, s := range vcsGit.Scheme {
|
||||
if repoURL.Scheme == s {
|
||||
return repoURL.String(), nil
|
||||
}
|
||||
}
|
||||
return "", errors.New("unable to parse output of git " + cmd)
|
||||
Status: gitStatus,
|
||||
}
|
||||
|
||||
func gitStatus(vcsGit *Cmd, rootDir string) (Status, error) {
|
||||
@@ -367,62 +266,9 @@ var vcsBzr = &Cmd{
|
||||
{filename: ".bzr", isDir: true},
|
||||
},
|
||||
|
||||
CreateCmd: []string{"branch -- {repo} {dir}"},
|
||||
|
||||
// Without --overwrite bzr will not pull tags that changed.
|
||||
// Replace by --overwrite-tags after http://pad.lv/681792 goes in.
|
||||
DownloadCmd: []string{"pull --overwrite"},
|
||||
|
||||
TagCmd: []tagCmd{{"tags", `^(\S+)`}},
|
||||
TagSyncCmd: []string{"update -r {tag}"},
|
||||
TagSyncDefault: []string{"update -r revno:-1"},
|
||||
|
||||
Scheme: []string{"https", "http", "bzr", "bzr+ssh"},
|
||||
PingCmd: "info -- {scheme}://{repo}",
|
||||
RemoteRepo: bzrRemoteRepo,
|
||||
ResolveRepo: bzrResolveRepo,
|
||||
Status: bzrStatus,
|
||||
}
|
||||
|
||||
func bzrRemoteRepo(vcsBzr *Cmd, rootDir string) (remoteRepo string, err error) {
|
||||
outb, err := vcsBzr.runOutput(rootDir, "config parent_location")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(outb)), nil
|
||||
}
|
||||
|
||||
func bzrResolveRepo(vcsBzr *Cmd, rootDir, remoteRepo string) (realRepo string, err error) {
|
||||
outb, err := vcsBzr.runOutput(rootDir, "info "+remoteRepo)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
out := string(outb)
|
||||
|
||||
// Expect:
|
||||
// ...
|
||||
// (branch root|repository branch): <URL>
|
||||
// ...
|
||||
|
||||
found := false
|
||||
for _, prefix := range []string{"\n branch root: ", "\n repository branch: "} {
|
||||
i := strings.Index(out, prefix)
|
||||
if i >= 0 {
|
||||
out = out[i+len(prefix):]
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return "", fmt.Errorf("unable to parse output of bzr info")
|
||||
}
|
||||
|
||||
i := strings.Index(out, "\n")
|
||||
if i < 0 {
|
||||
return "", fmt.Errorf("unable to parse output of bzr info")
|
||||
}
|
||||
out = out[:i]
|
||||
return strings.TrimSpace(out), nil
|
||||
Scheme: []string{"https", "http", "bzr", "bzr+ssh"},
|
||||
PingCmd: "info -- {scheme}://{repo}",
|
||||
Status: bzrStatus,
|
||||
}
|
||||
|
||||
func bzrStatus(vcsBzr *Cmd, rootDir string) (Status, error) {
|
||||
@@ -490,46 +336,12 @@ var vcsSvn = &Cmd{
|
||||
{filename: ".svn", isDir: true},
|
||||
},
|
||||
|
||||
CreateCmd: []string{"checkout -- {repo} {dir}"},
|
||||
DownloadCmd: []string{"update"},
|
||||
|
||||
// There is no tag command in subversion.
|
||||
// The branch information is all in the path names.
|
||||
|
||||
Scheme: []string{"https", "http", "svn", "svn+ssh"},
|
||||
PingCmd: "info -- {scheme}://{repo}",
|
||||
RemoteRepo: svnRemoteRepo,
|
||||
Status: svnStatus,
|
||||
}
|
||||
|
||||
func svnRemoteRepo(vcsSvn *Cmd, rootDir string) (remoteRepo string, err error) {
|
||||
outb, err := vcsSvn.runOutput(rootDir, "info")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
out := string(outb)
|
||||
|
||||
// Expect:
|
||||
//
|
||||
// ...
|
||||
// URL: <URL>
|
||||
// ...
|
||||
//
|
||||
// Note that we're not using the Repository Root line,
|
||||
// because svn allows checking out subtrees.
|
||||
// The URL will be the URL of the subtree (what we used with 'svn co')
|
||||
// while the Repository Root may be a much higher parent.
|
||||
i := strings.Index(out, "\nURL: ")
|
||||
if i < 0 {
|
||||
return "", fmt.Errorf("unable to parse output of svn info")
|
||||
}
|
||||
out = out[i+len("\nURL: "):]
|
||||
i = strings.Index(out, "\n")
|
||||
if i < 0 {
|
||||
return "", fmt.Errorf("unable to parse output of svn info")
|
||||
}
|
||||
out = out[:i]
|
||||
return strings.TrimSpace(out), nil
|
||||
Scheme: []string{"https", "http", "svn", "svn+ssh"},
|
||||
PingCmd: "info -- {scheme}://{repo}",
|
||||
Status: svnStatus,
|
||||
}
|
||||
|
||||
func svnStatus(vcsSvn *Cmd, rootDir string) (Status, error) {
|
||||
@@ -574,24 +386,8 @@ var vcsFossil = &Cmd{
|
||||
{filename: "_FOSSIL_", isDir: false},
|
||||
},
|
||||
|
||||
CreateCmd: []string{"-go-internal-mkdir {dir} clone -- {repo} " + filepath.Join("{dir}", fossilRepoName), "-go-internal-cd {dir} open .fossil"},
|
||||
DownloadCmd: []string{"up"},
|
||||
|
||||
TagCmd: []tagCmd{{"tag ls", `(.*)`}},
|
||||
TagSyncCmd: []string{"up tag:{tag}"},
|
||||
TagSyncDefault: []string{"up trunk"},
|
||||
|
||||
Scheme: []string{"https", "http"},
|
||||
RemoteRepo: fossilRemoteRepo,
|
||||
Status: fossilStatus,
|
||||
}
|
||||
|
||||
func fossilRemoteRepo(vcsFossil *Cmd, rootDir string) (remoteRepo string, err error) {
|
||||
out, err := vcsFossil.runOutput(rootDir, "remote-url")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(out)), nil
|
||||
Scheme: []string{"https", "http"},
|
||||
Status: fossilStatus,
|
||||
}
|
||||
|
||||
var errFossilInfo = errors.New("unable to parse output of fossil info")
|
||||
@@ -692,7 +488,7 @@ func (v *Cmd) run1(dir string, cmdline string, keyval []string, verbose bool) ([
|
||||
args[i] = expand(m, arg)
|
||||
}
|
||||
|
||||
if len(args) >= 2 && args[0] == "-go-internal-mkdir" {
|
||||
if len(args) >= 2 && args[0] == "--go-internal-mkdir" {
|
||||
var err error
|
||||
if filepath.IsAbs(args[1]) {
|
||||
err = os.Mkdir(args[1], fs.ModePerm)
|
||||
@@ -705,7 +501,7 @@ func (v *Cmd) run1(dir string, cmdline string, keyval []string, verbose bool) ([
|
||||
args = args[2:]
|
||||
}
|
||||
|
||||
if len(args) >= 2 && args[0] == "-go-internal-cd" {
|
||||
if len(args) >= 2 && args[0] == "--go-internal-cd" {
|
||||
if filepath.IsAbs(args[1]) {
|
||||
dir = args[1]
|
||||
} else {
|
||||
@@ -766,99 +562,6 @@ func (v *Cmd) Ping(scheme, repo string) error {
|
||||
return v.runVerboseOnly(dir, v.PingCmd, "scheme", scheme, "repo", repo)
|
||||
}
|
||||
|
||||
// Create creates a new copy of repo in dir.
|
||||
// The parent of dir must exist; dir must not.
|
||||
func (v *Cmd) Create(dir, repo string) error {
|
||||
release, err := base.AcquireNet()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer release()
|
||||
|
||||
for _, cmd := range v.CreateCmd {
|
||||
if err := v.run(filepath.Dir(dir), cmd, "dir", dir, "repo", repo); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Download downloads any new changes for the repo in dir.
|
||||
func (v *Cmd) Download(dir string) error {
|
||||
release, err := base.AcquireNet()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer release()
|
||||
|
||||
for _, cmd := range v.DownloadCmd {
|
||||
if err := v.run(dir, cmd); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Tags returns the list of available tags for the repo in dir.
|
||||
func (v *Cmd) Tags(dir string) ([]string, error) {
|
||||
var tags []string
|
||||
for _, tc := range v.TagCmd {
|
||||
out, err := v.runOutput(dir, tc.cmd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
re := regexp.MustCompile(`(?m-s)` + tc.pattern)
|
||||
for _, m := range re.FindAllStringSubmatch(string(out), -1) {
|
||||
tags = append(tags, m[1])
|
||||
}
|
||||
}
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
// TagSync syncs the repo in dir to the named tag,
|
||||
// which either is a tag returned by tags or is v.tagDefault.
|
||||
func (v *Cmd) TagSync(dir, tag string) error {
|
||||
if v.TagSyncCmd == nil {
|
||||
return nil
|
||||
}
|
||||
if tag != "" {
|
||||
for _, tc := range v.TagLookupCmd {
|
||||
out, err := v.runOutput(dir, tc.cmd, "tag", tag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
re := regexp.MustCompile(`(?m-s)` + tc.pattern)
|
||||
m := re.FindStringSubmatch(string(out))
|
||||
if len(m) > 1 {
|
||||
tag = m[1]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
release, err := base.AcquireNet()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer release()
|
||||
|
||||
if tag == "" && v.TagSyncDefault != nil {
|
||||
for _, cmd := range v.TagSyncDefault {
|
||||
if err := v.run(dir, cmd); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, cmd := range v.TagSyncCmd {
|
||||
if err := v.run(dir, cmd, "tag", tag); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A vcsPath describes how to convert an import path into a
|
||||
// version control system and repository name.
|
||||
type vcsPath struct {
|
||||
@@ -1385,6 +1088,10 @@ func repoRootForImportDynamic(importPath string, mod ModuleMode, security web.Se
|
||||
}
|
||||
}
|
||||
|
||||
if err := validateRepoSubDir(mmi.SubDir); err != nil {
|
||||
return nil, fmt.Errorf("%s: invalid subdirectory %q: %v", resp.URL, mmi.SubDir, err)
|
||||
}
|
||||
|
||||
if err := validateRepoRoot(mmi.RepoRoot); err != nil {
|
||||
return nil, fmt.Errorf("%s: invalid repo root %q: %v", resp.URL, mmi.RepoRoot, err)
|
||||
}
|
||||
@@ -1416,6 +1123,22 @@ func repoRootForImportDynamic(importPath string, mod ModuleMode, security web.Se
|
||||
return rr, nil
|
||||
}
|
||||
|
||||
// validateRepoSubDir returns an error if subdir is not a valid subdirectory path.
|
||||
// We consider a subdirectory path to be valid as long as it doesn't have a leading
|
||||
// slash (/) or hyphen (-).
|
||||
func validateRepoSubDir(subdir string) error {
|
||||
if subdir == "" {
|
||||
return nil
|
||||
}
|
||||
if subdir[0] == '/' {
|
||||
return errors.New("leading slash")
|
||||
}
|
||||
if subdir[0] == '-' {
|
||||
return errors.New("leading hyphen")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateRepoRoot returns an error if repoRoot does not seem to be
|
||||
// a valid URL with scheme.
|
||||
func validateRepoRoot(repoRoot string) error {
|
||||
|
||||
@@ -507,6 +507,42 @@ func TestValidateRepoRoot(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateRepoSubDir(t *testing.T) {
|
||||
tests := []struct {
|
||||
subdir string
|
||||
ok bool
|
||||
}{
|
||||
{
|
||||
subdir: "",
|
||||
ok: true,
|
||||
},
|
||||
{
|
||||
subdir: "sub/dir",
|
||||
ok: true,
|
||||
},
|
||||
{
|
||||
subdir: "/leading/slash",
|
||||
ok: false,
|
||||
},
|
||||
{
|
||||
subdir: "-leading/hyphen",
|
||||
ok: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
err := validateRepoSubDir(test.subdir)
|
||||
ok := err == nil
|
||||
if ok != test.ok {
|
||||
want := "error"
|
||||
if test.ok {
|
||||
want = "nil"
|
||||
}
|
||||
t.Errorf("validateRepoSubDir(%q) = %q, want %s", test.subdir, err, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var govcsTests = []struct {
|
||||
govcs string
|
||||
path string
|
||||
|
||||
@@ -97,11 +97,12 @@ type Action struct {
|
||||
CacheExecutable bool // Whether to cache executables produced by link steps
|
||||
|
||||
// Generated files, directories.
|
||||
Objdir string // directory for intermediate objects
|
||||
Target string // goal of the action: the created package or executable
|
||||
built string // the actual created package or executable
|
||||
actionID cache.ActionID // cache ID of action input
|
||||
buildID string // build ID of action output
|
||||
Objdir string // directory for intermediate objects
|
||||
Target string // goal of the action: the created package or executable
|
||||
built string // the actual created package or executable
|
||||
cachedExecutable string // the cached executable, if CacheExecutable was set
|
||||
actionID cache.ActionID // cache ID of action input
|
||||
buildID string // build ID of action output
|
||||
|
||||
VetxOnly bool // Mode=="vet": only being called to supply info about dependencies
|
||||
needVet bool // Mode=="build": need to fill in vet config
|
||||
@@ -133,6 +134,10 @@ func (a *Action) BuildID() string { return a.buildID }
|
||||
// from Target when the result was cached.
|
||||
func (a *Action) BuiltTarget() string { return a.built }
|
||||
|
||||
// CachedExecutable returns the cached executable, if CacheExecutable
|
||||
// was set and the executable could be cached, and "" otherwise.
|
||||
func (a *Action) CachedExecutable() string { return a.cachedExecutable }
|
||||
|
||||
// An actionQueue is a priority queue of actions.
|
||||
type actionQueue []*Action
|
||||
|
||||
|
||||
@@ -745,8 +745,9 @@ func (b *Builder) updateBuildID(a *Action, target string) error {
|
||||
}
|
||||
outputID, _, err := c.PutExecutable(a.actionID, name+cfg.ExeSuffix, r)
|
||||
r.Close()
|
||||
a.cachedExecutable = c.OutputFile(outputID)
|
||||
if err == nil && cfg.BuildX {
|
||||
sh.ShowCmd("", "%s # internal", joinUnambiguously(str.StringList("cp", target, c.OutputFile(outputID))))
|
||||
sh.ShowCmd("", "%s # internal", joinUnambiguously(str.StringList("cp", target, a.cachedExecutable)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1636,6 +1636,14 @@ func (b *Builder) getPkgConfigFlags(a *Action) (cflags, ldflags []string, err er
|
||||
return nil, nil, fmt.Errorf("invalid pkg-config package name: %s", pkg)
|
||||
}
|
||||
}
|
||||
|
||||
// Running 'pkg-config' can cause execution of
|
||||
// arbitrary code using flags that are not in
|
||||
// the safelist.
|
||||
if err := checkCompilerFlags("CFLAGS", "pkg-config --cflags", pcflags); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var out []byte
|
||||
out, err = sh.runOut(p.Dir, nil, b.PkgconfigCmd(), "--cflags", pcflags, "--", pkgs)
|
||||
if err != nil {
|
||||
|
||||
@@ -130,6 +130,7 @@ var validCompilerFlags = []*lazyregexp.Regexp{
|
||||
re(`-pedantic(-errors)?`),
|
||||
re(`-pipe`),
|
||||
re(`-pthread`),
|
||||
re(`--static`),
|
||||
re(`-?-std=([^@\-].*)`),
|
||||
re(`-?-stdlib=([^@\-].*)`),
|
||||
re(`--sysroot=([^@\-].*)`),
|
||||
|
||||
@@ -278,7 +278,10 @@ func allowedVersionArg(arg string) bool {
|
||||
// parsePathVersionOptional parses path[@version], using adj to
|
||||
// describe any errors.
|
||||
func parsePathVersionOptional(adj, arg string, allowDirPath bool) (path, version string, err error) {
|
||||
before, after, found := strings.Cut(arg, "@")
|
||||
before, after, found, err := modload.ParsePathVersion(arg)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if !found {
|
||||
path = arg
|
||||
} else {
|
||||
|
||||
5
src/cmd/go/testdata/script/fipssnap.txt
vendored
5
src/cmd/go/testdata/script/fipssnap.txt
vendored
@@ -1,4 +1,4 @@
|
||||
env snap=v1.0.0
|
||||
env snap=v1.0.0-c2097c7c
|
||||
env alias=inprocess
|
||||
|
||||
env GOFIPS140=$snap
|
||||
@@ -23,8 +23,7 @@ stdout crypto/internal/fips140/$snap/sha256
|
||||
! stdout crypto/internal/fips140/check
|
||||
|
||||
# again with GOFIPS140=$alias
|
||||
# TODO: enable when we add inprocess.txt
|
||||
# env GOFIPS140=$alias
|
||||
env GOFIPS140=$alias
|
||||
|
||||
# default GODEBUG includes fips140=on
|
||||
go list -f '{{.DefaultGODEBUG}}'
|
||||
|
||||
10
src/cmd/go/testdata/script/mod_get_toolchain.txt
vendored
10
src/cmd/go/testdata/script/mod_get_toolchain.txt
vendored
@@ -94,12 +94,14 @@ stderr '^go: added toolchain go1.24rc1$'
|
||||
grep 'go 1.22.9' go.mod # no longer implied
|
||||
grep 'toolchain go1.24rc1' go.mod
|
||||
|
||||
# go get toolchain@latest finds go1.999testmod.
|
||||
# go get toolchain@latest finds go1.23.9.
|
||||
cp go.mod.orig go.mod
|
||||
go get toolchain@latest
|
||||
stderr '^go: added toolchain go1.999testmod$'
|
||||
stderr '^go: added toolchain go1.23.9$'
|
||||
grep 'go 1.21' go.mod
|
||||
grep 'toolchain go1.999testmod' go.mod
|
||||
grep 'toolchain go1.23.9' go.mod
|
||||
|
||||
|
||||
|
||||
# Bug fixes.
|
||||
|
||||
@@ -115,7 +117,7 @@ stderr '^go: upgraded go 1.19 => 1.21.0'
|
||||
|
||||
# go get toolchain@1.24rc1 is OK too.
|
||||
go get toolchain@1.24rc1
|
||||
stderr '^go: downgraded toolchain go1.999testmod => go1.24rc1$'
|
||||
stderr '^go: upgraded toolchain go1.23.9 => go1.24rc1$'
|
||||
|
||||
# go get go@1.21 should work if we are the Go 1.21 language version,
|
||||
# even though there's no toolchain for it.
|
||||
|
||||
27
src/cmd/go/testdata/script/tool_n_issue72824.txt
vendored
Normal file
27
src/cmd/go/testdata/script/tool_n_issue72824.txt
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
[short] skip 'does a build in using an empty cache'
|
||||
|
||||
# Start with a fresh cache because we want to verify the behavior
|
||||
# when the tool hasn't been cached previously.
|
||||
env GOCACHE=$WORK${/}cache
|
||||
|
||||
# Even when the tool hasn't been previously cached but was built and
|
||||
# saved to the cache in the invocation of 'go tool -n' we should return
|
||||
# its cached location.
|
||||
go tool -n foo
|
||||
stdout $GOCACHE
|
||||
|
||||
# And of course we should also return the cached location on subsequent
|
||||
# runs.
|
||||
go tool -n foo
|
||||
stdout $GOCACHE
|
||||
|
||||
-- go.mod --
|
||||
module example.com/foo
|
||||
|
||||
go 1.25
|
||||
|
||||
tool example.com/foo
|
||||
-- main.go --
|
||||
package main
|
||||
|
||||
func main() {}
|
||||
@@ -463,6 +463,8 @@ func (c *cancelCtx) Done() <-chan struct{} {
|
||||
func (c *cancelCtx) Err() error {
|
||||
// An atomic load is ~5x faster than a mutex, which can matter in tight loops.
|
||||
if err := c.err.Load(); err != nil {
|
||||
// Ensure the done channel has been closed before returning a non-nil error.
|
||||
<-c.Done()
|
||||
return err.(error)
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -1177,3 +1177,23 @@ func (c *customContext) Err() error {
|
||||
func (c *customContext) Value(key any) any {
|
||||
return c.parent.Value(key)
|
||||
}
|
||||
|
||||
// Issue #75533.
|
||||
func TestContextErrDoneRace(t *testing.T) {
|
||||
// 4 iterations reliably reproduced #75533.
|
||||
for range 10 {
|
||||
ctx, cancel := WithCancel(Background())
|
||||
donec := ctx.Done()
|
||||
go cancel()
|
||||
for ctx.Err() == nil {
|
||||
if runtime.GOARCH == "wasm" {
|
||||
runtime.Gosched() // need to explicitly yield
|
||||
}
|
||||
}
|
||||
select {
|
||||
case <-donec:
|
||||
default:
|
||||
t.Fatalf("ctx.Err is non-nil, but ctx.Done is not closed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ type MakeHash func() hash.Hash
|
||||
// TestHash performs a set of tests on hash.Hash implementations, checking the
|
||||
// documented requirements of Write, Sum, Reset, Size, and BlockSize.
|
||||
func TestHash(t *testing.T, mh MakeHash) {
|
||||
if boring.Enabled || fips140.Version() == "v1.0" {
|
||||
if boring.Enabled || fips140.Version() == "v1.0.0" {
|
||||
testhash.TestHashWithoutClone(t, testhash.MakeHash(mh))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -56,9 +56,10 @@ func CAST(name string, f func() error) {
|
||||
}
|
||||
|
||||
// PCT runs the named Pairwise Consistency Test (if operated in FIPS mode) and
|
||||
// returns any errors. If an error is returned, the key must not be used.
|
||||
// aborts the program (stopping the module input/output and entering the "error
|
||||
// state") if the test fails.
|
||||
//
|
||||
// PCTs are mandatory for every key pair that is generated/imported, including
|
||||
// PCTs are mandatory for every generated (but not imported) key pair, including
|
||||
// ephemeral keys (which effectively doubles the cost of key establishment). See
|
||||
// Implementation Guidance 10.3.A Additional Comment 1.
|
||||
//
|
||||
@@ -66,17 +67,23 @@ func CAST(name string, f func() error) {
|
||||
//
|
||||
// If a package p calls PCT during key generation, an invocation of that
|
||||
// function should be added to fipstest.TestConditionals.
|
||||
func PCT(name string, f func() error) error {
|
||||
func PCT(name string, f func() error) {
|
||||
if strings.ContainsAny(name, ",#=:") {
|
||||
panic("fips: invalid self-test name: " + name)
|
||||
}
|
||||
if !Enabled {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
err := f()
|
||||
if name == failfipscast {
|
||||
err = errors.New("simulated PCT failure")
|
||||
}
|
||||
return err
|
||||
if err != nil {
|
||||
fatal("FIPS 140-3 self-test failed: " + name + ": " + err.Error())
|
||||
panic("unreachable")
|
||||
}
|
||||
if debug {
|
||||
println("FIPS 140-3 PCT passed:", name)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -161,6 +161,27 @@ func GenerateKey[P Point[P]](c *Curve[P], rand io.Reader) (*PrivateKey, error) {
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// A "Pairwise Consistency Test" makes no sense if we just generated the
|
||||
// public key from an ephemeral private key. Moreover, there is no way to
|
||||
// check it aside from redoing the exact same computation again. SP 800-56A
|
||||
// Rev. 3, Section 5.6.2.1.4 acknowledges that, and doesn't require it.
|
||||
// However, ISO 19790:2012, Section 7.10.3.3 has a blanket requirement for a
|
||||
// PCT for all generated keys (AS10.35) and FIPS 140-3 IG 10.3.A, Additional
|
||||
// Comment 1 goes out of its way to say that "the PCT shall be performed
|
||||
// consistent [...], even if the underlying standard does not require a
|
||||
// PCT". So we do it. And make ECDH nearly 50% slower (only) in FIPS mode.
|
||||
fips140.PCT("ECDH PCT", func() error {
|
||||
p1, err := c.newPoint().ScalarBaseMult(privateKey.d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(p1.Bytes(), privateKey.pub.q) {
|
||||
return errors.New("crypto/ecdh: public key does not match private key")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return privateKey, nil
|
||||
}
|
||||
}
|
||||
@@ -188,28 +209,6 @@ func NewPrivateKey[P Point[P]](c *Curve[P], key []byte) (*PrivateKey, error) {
|
||||
panic("crypto/ecdh: internal error: public key is the identity element")
|
||||
}
|
||||
|
||||
// A "Pairwise Consistency Test" makes no sense if we just generated the
|
||||
// public key from an ephemeral private key. Moreover, there is no way to
|
||||
// check it aside from redoing the exact same computation again. SP 800-56A
|
||||
// Rev. 3, Section 5.6.2.1.4 acknowledges that, and doesn't require it.
|
||||
// However, ISO 19790:2012, Section 7.10.3.3 has a blanket requirement for a
|
||||
// PCT for all generated keys (AS10.35) and FIPS 140-3 IG 10.3.A, Additional
|
||||
// Comment 1 goes out of its way to say that "the PCT shall be performed
|
||||
// consistent [...], even if the underlying standard does not require a
|
||||
// PCT". So we do it. And make ECDH nearly 50% slower (only) in FIPS mode.
|
||||
if err := fips140.PCT("ECDH PCT", func() error {
|
||||
p1, err := c.newPoint().ScalarBaseMult(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(p1.Bytes(), publicKey) {
|
||||
return errors.New("crypto/ecdh: public key does not match private key")
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
k := &PrivateKey{d: bytes.Clone(key), pub: PublicKey{curve: c.curve, q: publicKey}}
|
||||
return k, nil
|
||||
}
|
||||
|
||||
@@ -51,8 +51,8 @@ func testHash() []byte {
|
||||
}
|
||||
}
|
||||
|
||||
func fipsPCT[P Point[P]](c *Curve[P], k *PrivateKey) error {
|
||||
return fips140.PCT("ECDSA PCT", func() error {
|
||||
func fipsPCT[P Point[P]](c *Curve[P], k *PrivateKey) {
|
||||
fips140.PCT("ECDSA PCT", func() error {
|
||||
hash := testHash()
|
||||
drbg := newDRBG(sha512.New, k.d, bits2octets(P256(), hash), nil)
|
||||
sig, err := sign(c, k, drbg, hash)
|
||||
|
||||
@@ -167,11 +167,6 @@ func NewPrivateKey[P Point[P]](c *Curve[P], D, Q []byte) (*PrivateKey, error) {
|
||||
return nil, err
|
||||
}
|
||||
priv := &PrivateKey{pub: *pub, d: d.Bytes(c.N)}
|
||||
if err := fipsPCT(c, priv); err != nil {
|
||||
// This can happen if the application went out of its way to make an
|
||||
// ecdsa.PrivateKey with a mismatching PublicKey.
|
||||
return nil, err
|
||||
}
|
||||
return priv, nil
|
||||
}
|
||||
|
||||
@@ -204,10 +199,7 @@ func GenerateKey[P Point[P]](c *Curve[P], rand io.Reader) (*PrivateKey, error) {
|
||||
},
|
||||
d: k.Bytes(c.N),
|
||||
}
|
||||
if err := fipsPCT(c, priv); err != nil {
|
||||
// This clearly can't happen, but FIPS 140-3 mandates that we check it.
|
||||
panic(err)
|
||||
}
|
||||
fipsPCT(c, priv)
|
||||
return priv, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -122,7 +122,7 @@ func newDRBG[H hash.Hash](hash func() H, entropy, nonce []byte, s personalizatio
|
||||
//
|
||||
// This should only be used for ACVP testing. hmacDRBG is not intended to be
|
||||
// used directly.
|
||||
func TestingOnlyNewDRBG(hash func() hash.Hash, entropy, nonce []byte, s []byte) *hmacDRBG {
|
||||
func TestingOnlyNewDRBG[H hash.Hash](hash func() H, entropy, nonce []byte, s []byte) *hmacDRBG {
|
||||
return newDRBG(hash, entropy, nonce, plainPersonalizationString(s))
|
||||
}
|
||||
|
||||
|
||||
@@ -12,8 +12,8 @@ import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
func fipsPCT(k *PrivateKey) error {
|
||||
return fips140.PCT("Ed25519 sign and verify PCT", func() error {
|
||||
func fipsPCT(k *PrivateKey) {
|
||||
fips140.PCT("Ed25519 sign and verify PCT", func() error {
|
||||
return pairwiseTest(k)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -69,10 +69,7 @@ func generateKey(priv *PrivateKey) (*PrivateKey, error) {
|
||||
fips140.RecordApproved()
|
||||
drbg.Read(priv.seed[:])
|
||||
precomputePrivateKey(priv)
|
||||
if err := fipsPCT(priv); err != nil {
|
||||
// This clearly can't happen, but FIPS 140-3 requires that we check.
|
||||
panic(err)
|
||||
}
|
||||
fipsPCT(priv)
|
||||
return priv, nil
|
||||
}
|
||||
|
||||
@@ -88,10 +85,6 @@ func newPrivateKeyFromSeed(priv *PrivateKey, seed []byte) (*PrivateKey, error) {
|
||||
}
|
||||
copy(priv.seed[:], seed)
|
||||
precomputePrivateKey(priv)
|
||||
if err := fipsPCT(priv); err != nil {
|
||||
// This clearly can't happen, but FIPS 140-3 requires that we check.
|
||||
panic(err)
|
||||
}
|
||||
return priv, nil
|
||||
}
|
||||
|
||||
@@ -137,12 +130,6 @@ func newPrivateKey(priv *PrivateKey, privBytes []byte) (*PrivateKey, error) {
|
||||
|
||||
copy(priv.prefix[:], h[32:])
|
||||
|
||||
if err := fipsPCT(priv); err != nil {
|
||||
// This can happen if the application messed with the private key
|
||||
// encoding, and the public key doesn't match the seed anymore.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return priv, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ package fips140
|
||||
import (
|
||||
"crypto/internal/fips140deps/godebug"
|
||||
"errors"
|
||||
"hash"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
@@ -63,16 +62,10 @@ func Name() string {
|
||||
return "Go Cryptographic Module"
|
||||
}
|
||||
|
||||
// Version returns the formal version (such as "v1.0") if building against a
|
||||
// Version returns the formal version (such as "v1.0.0") if building against a
|
||||
// frozen module with GOFIPS140. Otherwise, it returns "latest".
|
||||
func Version() string {
|
||||
// This return value is replaced by mkzip.go, it must not be changed or
|
||||
// moved to a different file.
|
||||
return "latest" //mkzip:version
|
||||
}
|
||||
|
||||
// Hash is a legacy compatibility alias for hash.Hash.
|
||||
//
|
||||
// It's only here because [crypto/internal/fips140/ecdsa.TestingOnlyNewDRBG]
|
||||
// takes a "func() fips140.Hash" in v1.0.0, instead of being generic.
|
||||
type Hash = hash.Hash
|
||||
|
||||
@@ -118,10 +118,7 @@ func generateKey1024(dk *DecapsulationKey1024) (*DecapsulationKey1024, error) {
|
||||
var z [32]byte
|
||||
drbg.Read(z[:])
|
||||
kemKeyGen1024(dk, &d, &z)
|
||||
if err := fips140.PCT("ML-KEM PCT", func() error { return kemPCT1024(dk) }); err != nil {
|
||||
// This clearly can't happen, but FIPS 140-3 requires us to check.
|
||||
panic(err)
|
||||
}
|
||||
fips140.PCT("ML-KEM PCT", func() error { return kemPCT1024(dk) })
|
||||
fips140.RecordApproved()
|
||||
return dk, nil
|
||||
}
|
||||
@@ -149,10 +146,6 @@ func newKeyFromSeed1024(dk *DecapsulationKey1024, seed []byte) (*DecapsulationKe
|
||||
d := (*[32]byte)(seed[:32])
|
||||
z := (*[32]byte)(seed[32:])
|
||||
kemKeyGen1024(dk, d, z)
|
||||
if err := fips140.PCT("ML-KEM PCT", func() error { return kemPCT1024(dk) }); err != nil {
|
||||
// This clearly can't happen, but FIPS 140-3 requires us to check.
|
||||
panic(err)
|
||||
}
|
||||
fips140.RecordApproved()
|
||||
return dk, nil
|
||||
}
|
||||
|
||||
@@ -177,10 +177,7 @@ func generateKey(dk *DecapsulationKey768) (*DecapsulationKey768, error) {
|
||||
var z [32]byte
|
||||
drbg.Read(z[:])
|
||||
kemKeyGen(dk, &d, &z)
|
||||
if err := fips140.PCT("ML-KEM PCT", func() error { return kemPCT(dk) }); err != nil {
|
||||
// This clearly can't happen, but FIPS 140-3 requires us to check.
|
||||
panic(err)
|
||||
}
|
||||
fips140.PCT("ML-KEM PCT", func() error { return kemPCT(dk) })
|
||||
fips140.RecordApproved()
|
||||
return dk, nil
|
||||
}
|
||||
@@ -208,10 +205,6 @@ func newKeyFromSeed(dk *DecapsulationKey768, seed []byte) (*DecapsulationKey768,
|
||||
d := (*[32]byte)(seed[:32])
|
||||
z := (*[32]byte)(seed[32:])
|
||||
kemKeyGen(dk, d, z)
|
||||
if err := fips140.PCT("ML-KEM PCT", func() error { return kemPCT(dk) }); err != nil {
|
||||
// This clearly can't happen, but FIPS 140-3 requires us to check.
|
||||
panic(err)
|
||||
}
|
||||
fips140.RecordApproved()
|
||||
return dk, nil
|
||||
}
|
||||
|
||||
@@ -105,7 +105,28 @@ func GenerateKey(rand io.Reader, bits int) (*PrivateKey, error) {
|
||||
// negligible chance of failure we can defer the check to the end of key
|
||||
// generation and return an error if it fails. See [checkPrivateKey].
|
||||
|
||||
return newPrivateKey(N, 65537, d, P, Q)
|
||||
k, err := newPrivateKey(N, 65537, d, P, Q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if k.fipsApproved {
|
||||
fips140.PCT("RSA sign and verify PCT", func() error {
|
||||
hash := []byte{
|
||||
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
|
||||
0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
|
||||
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
|
||||
0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
|
||||
}
|
||||
sig, err := signPKCS1v15(k, "SHA-256", hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return verifyPKCS1v15(k.PublicKey(), "SHA-256", hash, sig)
|
||||
})
|
||||
}
|
||||
|
||||
return k, nil
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -310,26 +310,6 @@ func checkPrivateKey(priv *PrivateKey) error {
|
||||
return errors.New("crypto/rsa: d too small")
|
||||
}
|
||||
|
||||
// If the key is still in scope for FIPS mode, perform a Pairwise
|
||||
// Consistency Test.
|
||||
if priv.fipsApproved {
|
||||
if err := fips140.PCT("RSA sign and verify PCT", func() error {
|
||||
hash := []byte{
|
||||
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
|
||||
0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
|
||||
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
|
||||
0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
|
||||
}
|
||||
sig, err := signPKCS1v15(priv, "SHA-256", hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return verifyPKCS1v15(priv.PublicKey(), "SHA-256", hash, sig)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build (amd64 || arm64 || mips || mipsle || mips64 || mips64le || ppc64 || ppc64le || riscv64) && !purego
|
||||
//go:build (amd64 || arm64 || ppc64 || ppc64le || riscv64) && !purego
|
||||
|
||||
package subtle
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build (!amd64 && !arm64 && !loong64 && !mips && !mipsle && !mips64 && !mips64le && !ppc64 && !ppc64le && !riscv64) || purego
|
||||
//go:build (!amd64 && !arm64 && !loong64 && !ppc64 && !ppc64le && !riscv64) || purego
|
||||
|
||||
package subtle
|
||||
|
||||
|
||||
@@ -1,153 +0,0 @@
|
||||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build (mips64 || mips64le) && !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// func xorBytes(dst, a, b *byte, n int)
|
||||
TEXT ·xorBytes(SB), NOSPLIT|NOFRAME, $0
|
||||
MOVV dst+0(FP), R1
|
||||
MOVV a+8(FP), R2
|
||||
MOVV b+16(FP), R3
|
||||
MOVV n+24(FP), R4
|
||||
|
||||
xor_64_check:
|
||||
SGTU $64, R4, R5 // R5 = 1 if (64 > R4)
|
||||
BNE R5, xor_32_check
|
||||
xor_64:
|
||||
MOVV (R2), R6
|
||||
MOVV 8(R2), R7
|
||||
MOVV 16(R2), R8
|
||||
MOVV 24(R2), R9
|
||||
MOVV (R3), R10
|
||||
MOVV 8(R3), R11
|
||||
MOVV 16(R3), R12
|
||||
MOVV 24(R3), R13
|
||||
XOR R6, R10
|
||||
XOR R7, R11
|
||||
XOR R8, R12
|
||||
XOR R9, R13
|
||||
MOVV R10, (R1)
|
||||
MOVV R11, 8(R1)
|
||||
MOVV R12, 16(R1)
|
||||
MOVV R13, 24(R1)
|
||||
MOVV 32(R2), R6
|
||||
MOVV 40(R2), R7
|
||||
MOVV 48(R2), R8
|
||||
MOVV 56(R2), R9
|
||||
MOVV 32(R3), R10
|
||||
MOVV 40(R3), R11
|
||||
MOVV 48(R3), R12
|
||||
MOVV 56(R3), R13
|
||||
XOR R6, R10
|
||||
XOR R7, R11
|
||||
XOR R8, R12
|
||||
XOR R9, R13
|
||||
MOVV R10, 32(R1)
|
||||
MOVV R11, 40(R1)
|
||||
MOVV R12, 48(R1)
|
||||
MOVV R13, 56(R1)
|
||||
ADDV $64, R2
|
||||
ADDV $64, R3
|
||||
ADDV $64, R1
|
||||
SUBV $64, R4
|
||||
SGTU $64, R4, R5
|
||||
BEQ R0, R5, xor_64
|
||||
BEQ R0, R4, end
|
||||
|
||||
xor_32_check:
|
||||
SGTU $32, R4, R5
|
||||
BNE R5, xor_16_check
|
||||
xor_32:
|
||||
MOVV (R2), R6
|
||||
MOVV 8(R2), R7
|
||||
MOVV 16(R2), R8
|
||||
MOVV 24(R2), R9
|
||||
MOVV (R3), R10
|
||||
MOVV 8(R3), R11
|
||||
MOVV 16(R3), R12
|
||||
MOVV 24(R3), R13
|
||||
XOR R6, R10
|
||||
XOR R7, R11
|
||||
XOR R8, R12
|
||||
XOR R9, R13
|
||||
MOVV R10, (R1)
|
||||
MOVV R11, 8(R1)
|
||||
MOVV R12, 16(R1)
|
||||
MOVV R13, 24(R1)
|
||||
ADDV $32, R2
|
||||
ADDV $32, R3
|
||||
ADDV $32, R1
|
||||
SUBV $32, R4
|
||||
BEQ R0, R4, end
|
||||
|
||||
xor_16_check:
|
||||
SGTU $16, R4, R5
|
||||
BNE R5, xor_8_check
|
||||
xor_16:
|
||||
MOVV (R2), R6
|
||||
MOVV 8(R2), R7
|
||||
MOVV (R3), R8
|
||||
MOVV 8(R3), R9
|
||||
XOR R6, R8
|
||||
XOR R7, R9
|
||||
MOVV R8, (R1)
|
||||
MOVV R9, 8(R1)
|
||||
ADDV $16, R2
|
||||
ADDV $16, R3
|
||||
ADDV $16, R1
|
||||
SUBV $16, R4
|
||||
BEQ R0, R4, end
|
||||
|
||||
xor_8_check:
|
||||
SGTU $8, R4, R5
|
||||
BNE R5, xor_4_check
|
||||
xor_8:
|
||||
MOVV (R2), R6
|
||||
MOVV (R3), R7
|
||||
XOR R6, R7
|
||||
MOVV R7, (R1)
|
||||
ADDV $8, R1
|
||||
ADDV $8, R2
|
||||
ADDV $8, R3
|
||||
SUBV $8, R4
|
||||
BEQ R0, R4, end
|
||||
|
||||
xor_4_check:
|
||||
SGTU $4, R4, R5
|
||||
BNE R5, xor_2_check
|
||||
xor_4:
|
||||
MOVW (R2), R6
|
||||
MOVW (R3), R7
|
||||
XOR R6, R7
|
||||
MOVW R7, (R1)
|
||||
ADDV $4, R2
|
||||
ADDV $4, R3
|
||||
ADDV $4, R1
|
||||
SUBV $4, R4
|
||||
BEQ R0, R4, end
|
||||
|
||||
xor_2_check:
|
||||
SGTU $2, R4, R5
|
||||
BNE R5, xor_1
|
||||
xor_2:
|
||||
MOVH (R2), R6
|
||||
MOVH (R3), R7
|
||||
XOR R6, R7
|
||||
MOVH R7, (R1)
|
||||
ADDV $2, R2
|
||||
ADDV $2, R3
|
||||
ADDV $2, R1
|
||||
SUBV $2, R4
|
||||
BEQ R0, R4, end
|
||||
|
||||
xor_1:
|
||||
MOVB (R2), R6
|
||||
MOVB (R3), R7
|
||||
XOR R6, R7
|
||||
MOVB R7, (R1)
|
||||
|
||||
end:
|
||||
RET
|
||||
@@ -1,212 +0,0 @@
|
||||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build (mips || mipsle) && !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// func xorBytes(dst, a, b *byte, n int)
|
||||
TEXT ·xorBytes(SB), NOSPLIT|NOFRAME, $0
|
||||
MOVW dst+0(FP), R1
|
||||
MOVW a+4(FP), R2
|
||||
MOVW b+8(FP), R3
|
||||
MOVW n+12(FP), R4
|
||||
|
||||
SGTU $64, R4, R5 // R5 = 1 if (64 > R4)
|
||||
BNE R5, xor_32_check
|
||||
xor_64:
|
||||
MOVW (R2), R6
|
||||
MOVW 4(R2), R7
|
||||
MOVW 8(R2), R8
|
||||
MOVW 12(R2), R9
|
||||
MOVW (R3), R10
|
||||
MOVW 4(R3), R11
|
||||
MOVW 8(R3), R12
|
||||
MOVW 12(R3), R13
|
||||
XOR R6, R10
|
||||
XOR R7, R11
|
||||
XOR R8, R12
|
||||
XOR R9, R13
|
||||
MOVW R10, (R1)
|
||||
MOVW R11, 4(R1)
|
||||
MOVW R12, 8(R1)
|
||||
MOVW R13, 12(R1)
|
||||
MOVW 16(R2), R6
|
||||
MOVW 20(R2), R7
|
||||
MOVW 24(R2), R8
|
||||
MOVW 28(R2), R9
|
||||
MOVW 16(R3), R10
|
||||
MOVW 20(R3), R11
|
||||
MOVW 24(R3), R12
|
||||
MOVW 28(R3), R13
|
||||
XOR R6, R10
|
||||
XOR R7, R11
|
||||
XOR R8, R12
|
||||
XOR R9, R13
|
||||
MOVW R10, 16(R1)
|
||||
MOVW R11, 20(R1)
|
||||
MOVW R12, 24(R1)
|
||||
MOVW R13, 28(R1)
|
||||
MOVW 32(R2), R6
|
||||
MOVW 36(R2), R7
|
||||
MOVW 40(R2), R8
|
||||
MOVW 44(R2), R9
|
||||
MOVW 32(R3), R10
|
||||
MOVW 36(R3), R11
|
||||
MOVW 40(R3), R12
|
||||
MOVW 44(R3), R13
|
||||
XOR R6, R10
|
||||
XOR R7, R11
|
||||
XOR R8, R12
|
||||
XOR R9, R13
|
||||
MOVW R10, 32(R1)
|
||||
MOVW R11, 36(R1)
|
||||
MOVW R12, 40(R1)
|
||||
MOVW R13, 44(R1)
|
||||
MOVW 48(R2), R6
|
||||
MOVW 52(R2), R7
|
||||
MOVW 56(R2), R8
|
||||
MOVW 60(R2), R9
|
||||
MOVW 48(R3), R10
|
||||
MOVW 52(R3), R11
|
||||
MOVW 56(R3), R12
|
||||
MOVW 60(R3), R13
|
||||
XOR R6, R10
|
||||
XOR R7, R11
|
||||
XOR R8, R12
|
||||
XOR R9, R13
|
||||
MOVW R10, 48(R1)
|
||||
MOVW R11, 52(R1)
|
||||
MOVW R12, 56(R1)
|
||||
MOVW R13, 60(R1)
|
||||
ADD $64, R2
|
||||
ADD $64, R3
|
||||
ADD $64, R1
|
||||
SUB $64, R4
|
||||
SGTU $64, R4, R5
|
||||
BEQ R0, R5, xor_64
|
||||
BEQ R0, R4, end
|
||||
|
||||
xor_32_check:
|
||||
SGTU $32, R4, R5
|
||||
BNE R5, xor_16_check
|
||||
xor_32:
|
||||
MOVW (R2), R6
|
||||
MOVW 4(R2), R7
|
||||
MOVW 8(R2), R8
|
||||
MOVW 12(R2), R9
|
||||
MOVW (R3), R10
|
||||
MOVW 4(R3), R11
|
||||
MOVW 8(R3), R12
|
||||
MOVW 12(R3), R13
|
||||
XOR R6, R10
|
||||
XOR R7, R11
|
||||
XOR R8, R12
|
||||
XOR R9, R13
|
||||
MOVW R10, (R1)
|
||||
MOVW R11, 4(R1)
|
||||
MOVW R12, 8(R1)
|
||||
MOVW R13, 12(R1)
|
||||
MOVW 16(R2), R6
|
||||
MOVW 20(R2), R7
|
||||
MOVW 24(R2), R8
|
||||
MOVW 28(R2), R9
|
||||
MOVW 16(R3), R10
|
||||
MOVW 20(R3), R11
|
||||
MOVW 24(R3), R12
|
||||
MOVW 28(R3), R13
|
||||
XOR R6, R10
|
||||
XOR R7, R11
|
||||
XOR R8, R12
|
||||
XOR R9, R13
|
||||
MOVW R10, 16(R1)
|
||||
MOVW R11, 20(R1)
|
||||
MOVW R12, 24(R1)
|
||||
MOVW R13, 28(R1)
|
||||
ADD $32, R2
|
||||
ADD $32, R3
|
||||
ADD $32, R1
|
||||
SUB $32, R4
|
||||
BEQ R0, R4, end
|
||||
|
||||
xor_16_check:
|
||||
SGTU $16, R4, R5
|
||||
BNE R5, xor_8_check
|
||||
xor_16:
|
||||
MOVW (R2), R6
|
||||
MOVW 4(R2), R7
|
||||
MOVW 8(R2), R8
|
||||
MOVW 12(R2), R9
|
||||
MOVW (R3), R10
|
||||
MOVW 4(R3), R11
|
||||
MOVW 8(R3), R12
|
||||
MOVW 12(R3), R13
|
||||
XOR R6, R10
|
||||
XOR R7, R11
|
||||
XOR R8, R12
|
||||
XOR R9, R13
|
||||
MOVW R10, (R1)
|
||||
MOVW R11, 4(R1)
|
||||
MOVW R12, 8(R1)
|
||||
MOVW R13, 12(R1)
|
||||
ADD $16, R2
|
||||
ADD $16, R3
|
||||
ADD $16, R1
|
||||
SUB $16, R4
|
||||
BEQ R0, R4, end
|
||||
|
||||
xor_8_check:
|
||||
SGTU $8, R4, R5
|
||||
BNE R5, xor_4_check
|
||||
xor_8:
|
||||
MOVW (R2), R6
|
||||
MOVW 4(R2), R7
|
||||
MOVW (R3), R8
|
||||
MOVW 4(R3), R9
|
||||
XOR R6, R8
|
||||
XOR R7, R9
|
||||
MOVW R8, (R1)
|
||||
MOVW R9, 4(R1)
|
||||
ADD $8, R1
|
||||
ADD $8, R2
|
||||
ADD $8, R3
|
||||
SUB $8, R4
|
||||
BEQ R0, R4, end
|
||||
|
||||
xor_4_check:
|
||||
SGTU $4, R4, R5
|
||||
BNE R5, xor_2_check
|
||||
xor_4:
|
||||
MOVW (R2), R6
|
||||
MOVW (R3), R7
|
||||
XOR R6, R7
|
||||
MOVW R7, (R1)
|
||||
ADD $4, R2
|
||||
ADD $4, R3
|
||||
ADD $4, R1
|
||||
SUB $4, R4
|
||||
BEQ R0, R4, end
|
||||
|
||||
xor_2_check:
|
||||
SGTU $2, R4, R5
|
||||
BNE R5, xor_1
|
||||
xor_2:
|
||||
MOVH (R2), R6
|
||||
MOVH (R3), R7
|
||||
XOR R6, R7
|
||||
MOVH R7, (R1)
|
||||
ADD $2, R2
|
||||
ADD $2, R3
|
||||
ADD $2, R1
|
||||
SUB $2, R4
|
||||
BEQ R0, R4, end
|
||||
|
||||
xor_1:
|
||||
MOVB (R2), R6
|
||||
MOVB (R3), R7
|
||||
XOR R6, R7
|
||||
MOVB R7, (R1)
|
||||
|
||||
end:
|
||||
RET
|
||||
@@ -1624,7 +1624,7 @@ func cmdHmacDrbgAft(h func() hash.Hash) command {
|
||||
// * Uninstantiate
|
||||
// See Table 7 in draft-vassilev-acvp-drbg
|
||||
out := make([]byte, outLen)
|
||||
drbg := ecdsa.TestingOnlyNewDRBG(func() fips140.Hash { return h() }, entropy, nonce, personalization)
|
||||
drbg := ecdsa.TestingOnlyNewDRBG(h, entropy, nonce, personalization)
|
||||
drbg.Generate(out)
|
||||
drbg.Generate(out)
|
||||
|
||||
|
||||
@@ -5,9 +5,9 @@
|
||||
package fipstest
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/internal/fips140"
|
||||
"crypto/rand"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"internal/testenv"
|
||||
"io/fs"
|
||||
@@ -50,8 +50,6 @@ var allCASTs = []string{
|
||||
"KAS-ECC-SSC P-256",
|
||||
"ML-KEM PCT",
|
||||
"ML-KEM PCT",
|
||||
"ML-KEM PCT",
|
||||
"ML-KEM PCT",
|
||||
"ML-KEM-768",
|
||||
"PBKDF2",
|
||||
"RSA sign and verify PCT",
|
||||
@@ -107,60 +105,65 @@ func TestAllCASTs(t *testing.T) {
|
||||
// TestConditionals causes the conditional CASTs and PCTs to be invoked.
|
||||
func TestConditionals(t *testing.T) {
|
||||
mlkem.GenerateKey768()
|
||||
k, err := ecdh.GenerateKey(ecdh.P256(), rand.Reader)
|
||||
kDH, err := ecdh.GenerateKey(ecdh.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
t.Error(err)
|
||||
} else {
|
||||
ecdh.ECDH(ecdh.P256(), kDH, kDH.PublicKey())
|
||||
}
|
||||
ecdh.ECDH(ecdh.P256(), k, k.PublicKey())
|
||||
kDSA, err := ecdsa.GenerateKey(ecdsa.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
t.Error(err)
|
||||
} else {
|
||||
ecdsa.SignDeterministic(ecdsa.P256(), sha256.New, kDSA, make([]byte, 32))
|
||||
}
|
||||
ecdsa.SignDeterministic(ecdsa.P256(), sha256.New, kDSA, make([]byte, 32))
|
||||
k25519, err := ed25519.GenerateKey()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
t.Error(err)
|
||||
} else {
|
||||
ed25519.Sign(k25519, make([]byte, 32))
|
||||
}
|
||||
ed25519.Sign(k25519, make([]byte, 32))
|
||||
rsa.VerifyPKCS1v15(&rsa.PublicKey{}, "", nil, nil)
|
||||
// Parse an RSA key to hit the PCT rather than generating one (which is slow).
|
||||
block, _ := pem.Decode([]byte(strings.ReplaceAll(
|
||||
`-----BEGIN RSA TESTING KEY-----
|
||||
MIIEowIBAAKCAQEAsPnoGUOnrpiSqt4XynxA+HRP7S+BSObI6qJ7fQAVSPtRkqso
|
||||
tWxQYLEYzNEx5ZSHTGypibVsJylvCfuToDTfMul8b/CZjP2Ob0LdpYrNH6l5hvFE
|
||||
89FU1nZQF15oVLOpUgA7wGiHuEVawrGfey92UE68mOyUVXGweJIVDdxqdMoPvNNU
|
||||
l86BU02vlBiESxOuox+dWmuVV7vfYZ79Toh/LUK43YvJh+rhv4nKuF7iHjVjBd9s
|
||||
B6iDjj70HFldzOQ9r8SRI+9NirupPTkF5AKNe6kUhKJ1luB7S27ZkvB3tSTT3P59
|
||||
3VVJvnzOjaA1z6Cz+4+eRvcysqhrRgFlwI9TEwIDAQABAoIBAEEYiyDP29vCzx/+
|
||||
dS3LqnI5BjUuJhXUnc6AWX/PCgVAO+8A+gZRgvct7PtZb0sM6P9ZcLrweomlGezI
|
||||
FrL0/6xQaa8bBr/ve/a8155OgcjFo6fZEw3Dz7ra5fbSiPmu4/b/kvrg+Br1l77J
|
||||
aun6uUAs1f5B9wW+vbR7tzbT/mxaUeDiBzKpe15GwcvbJtdIVMa2YErtRjc1/5B2
|
||||
BGVXyvlJv0SIlcIEMsHgnAFOp1ZgQ08aDzvilLq8XVMOahAhP1O2A3X8hKdXPyrx
|
||||
IVWE9bS9ptTo+eF6eNl+d7htpKGEZHUxinoQpWEBTv+iOoHsVunkEJ3vjLP3lyI/
|
||||
fY0NQ1ECgYEA3RBXAjgvIys2gfU3keImF8e/TprLge1I2vbWmV2j6rZCg5r/AS0u
|
||||
pii5CvJ5/T5vfJPNgPBy8B/yRDs+6PJO1GmnlhOkG9JAIPkv0RBZvR0PMBtbp6nT
|
||||
Y3yo1lwamBVBfY6rc0sLTzosZh2aGoLzrHNMQFMGaauORzBFpY5lU50CgYEAzPHl
|
||||
u5DI6Xgep1vr8QvCUuEesCOgJg8Yh1UqVoY/SmQh6MYAv1I9bLGwrb3WW/7kqIoD
|
||||
fj0aQV5buVZI2loMomtU9KY5SFIsPV+JuUpy7/+VE01ZQM5FdY8wiYCQiVZYju9X
|
||||
Wz5LxMNoz+gT7pwlLCsC4N+R8aoBk404aF1gum8CgYAJ7VTq7Zj4TFV7Soa/T1eE
|
||||
k9y8a+kdoYk3BASpCHJ29M5R2KEA7YV9wrBklHTz8VzSTFTbKHEQ5W5csAhoL5Fo
|
||||
qoHzFFi3Qx7MHESQb9qHyolHEMNx6QdsHUn7rlEnaTTyrXh3ifQtD6C0yTmFXUIS
|
||||
CW9wKApOrnyKJ9nI0HcuZQKBgQCMtoV6e9VGX4AEfpuHvAAnMYQFgeBiYTkBKltQ
|
||||
XwozhH63uMMomUmtSG87Sz1TmrXadjAhy8gsG6I0pWaN7QgBuFnzQ/HOkwTm+qKw
|
||||
AsrZt4zeXNwsH7QXHEJCFnCmqw9QzEoZTrNtHJHpNboBuVnYcoueZEJrP8OnUG3r
|
||||
UjmopwKBgAqB2KYYMUqAOvYcBnEfLDmyZv9BTVNHbR2lKkMYqv5LlvDaBxVfilE0
|
||||
2riO4p6BaAdvzXjKeRrGNEKoHNBpOSfYCOM16NjL8hIZB1CaV3WbT5oY+jp7Mzd5
|
||||
7d56RZOE+ERK2uz/7JX9VSsM/LbH9pJibd4e8mikDS9ntciqOH/3
|
||||
-----END RSA TESTING KEY-----`, "TESTING KEY", "PRIVATE KEY")))
|
||||
if _, err := x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {
|
||||
t.Fatal(err)
|
||||
kRSA, err := rsa.GenerateKey(rand.Reader, 2048)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else {
|
||||
rsa.SignPKCS1v15(kRSA, crypto.SHA256.String(), make([]byte, 32))
|
||||
}
|
||||
t.Log("completed successfully")
|
||||
}
|
||||
|
||||
func TestCASTPasses(t *testing.T) {
|
||||
moduleStatus(t)
|
||||
testenv.MustHaveExec(t)
|
||||
if err := fips140.Supported(); err != nil {
|
||||
t.Skipf("test requires FIPS 140 mode: %v", err)
|
||||
}
|
||||
|
||||
cmd := testenv.Command(t, testenv.Executable(t), "-test.run=^TestConditionals$", "-test.v")
|
||||
cmd.Env = append(cmd.Env, "GODEBUG=fips140=debug")
|
||||
out, err := cmd.CombinedOutput()
|
||||
t.Logf("%s", out)
|
||||
if err != nil || !strings.Contains(string(out), "completed successfully") {
|
||||
t.Errorf("TestConditionals did not complete successfully")
|
||||
}
|
||||
|
||||
for _, name := range allCASTs {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
if !strings.Contains(string(out), fmt.Sprintf("passed: %s\n", name)) {
|
||||
t.Errorf("CAST/PCT %s success was not logged", name)
|
||||
} else {
|
||||
t.Logf("CAST/PCT succeeded: %s", name)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCASTFailures(t *testing.T) {
|
||||
moduleStatus(t)
|
||||
testenv.MustHaveExec(t)
|
||||
if err := fips140.Supported(); err != nil {
|
||||
t.Skipf("test requires FIPS 140 mode: %v", err)
|
||||
}
|
||||
|
||||
for _, name := range allCASTs {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
@@ -169,7 +172,6 @@ func TestCASTFailures(t *testing.T) {
|
||||
if !testing.Verbose() {
|
||||
t.Parallel()
|
||||
}
|
||||
t.Logf("CAST/PCT succeeded: %s", name)
|
||||
t.Logf("Testing CAST/PCT failure...")
|
||||
cmd := testenv.Command(t, testenv.Executable(t), "-test.run=^TestConditionals$", "-test.v")
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("GODEBUG=failfipscast=%s,fips140=on", name))
|
||||
@@ -180,6 +182,8 @@ func TestCASTFailures(t *testing.T) {
|
||||
}
|
||||
if strings.Contains(string(out), "completed successfully") {
|
||||
t.Errorf("CAST/PCT %s failure did not stop the program", name)
|
||||
} else if !strings.Contains(string(out), "self-test failed: "+name) {
|
||||
t.Errorf("CAST/PCT %s failure did not log the expected message", name)
|
||||
} else {
|
||||
t.Logf("CAST/PCT %s failed as expected and caused the program to exit", name)
|
||||
}
|
||||
|
||||
@@ -74,11 +74,9 @@ func TestVersion(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
exp := setting.Value
|
||||
if exp == "v1.0.0" {
|
||||
// Unfortunately we enshrined the version of the first module as
|
||||
// v1.0 before deciding to go for full versions.
|
||||
exp = "v1.0"
|
||||
}
|
||||
// Remove the -hash suffix, if any.
|
||||
// The version from fips140.Version omits it.
|
||||
exp, _, _ = strings.Cut(exp, "-")
|
||||
if v := fips140.Version(); v != exp {
|
||||
t.Errorf("Version is %q, expected %q", v, exp)
|
||||
}
|
||||
|
||||
@@ -942,6 +942,10 @@ const maxSessionTicketLifetime = 7 * 24 * time.Hour
|
||||
|
||||
// Clone returns a shallow clone of c or nil if c is nil. It is safe to clone a [Config] that is
|
||||
// being used concurrently by a TLS client or server.
|
||||
//
|
||||
// If Config.SessionTicketKey is unpopulated, and Config.SetSessionTicketKeys has not been
|
||||
// called, the clone will not share the same auto-rotated session ticket keys as the original
|
||||
// Config in order to prevent sessions from being resumed across Configs.
|
||||
func (c *Config) Clone() *Config {
|
||||
if c == nil {
|
||||
return nil
|
||||
@@ -982,7 +986,8 @@ func (c *Config) Clone() *Config {
|
||||
EncryptedClientHelloRejectionVerify: c.EncryptedClientHelloRejectionVerify,
|
||||
EncryptedClientHelloKeys: c.EncryptedClientHelloKeys,
|
||||
sessionTicketKeys: c.sessionTicketKeys,
|
||||
autoSessionTicketKeys: c.autoSessionTicketKeys,
|
||||
// We explicitly do not copy autoSessionTicketKeys, so that Configs do
|
||||
// not share the same auto-rotated keys.
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -226,6 +226,9 @@ func (hc *halfConn) changeCipherSpec() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// setTrafficSecret sets the traffic secret for the given encryption level. setTrafficSecret
|
||||
// should not be called directly, but rather through the Conn setWriteTrafficSecret and
|
||||
// setReadTrafficSecret wrapper methods.
|
||||
func (hc *halfConn) setTrafficSecret(suite *cipherSuiteTLS13, level QUICEncryptionLevel, secret []byte) {
|
||||
hc.trafficSecret = secret
|
||||
hc.level = level
|
||||
@@ -1343,9 +1346,6 @@ func (c *Conn) handleKeyUpdate(keyUpdate *keyUpdateMsg) error {
|
||||
return c.in.setErrorLocked(c.sendAlert(alertInternalError))
|
||||
}
|
||||
|
||||
newSecret := cipherSuite.nextTrafficSecret(c.in.trafficSecret)
|
||||
c.in.setTrafficSecret(cipherSuite, QUICEncryptionLevelInitial, newSecret)
|
||||
|
||||
if keyUpdate.updateRequested {
|
||||
c.out.Lock()
|
||||
defer c.out.Unlock()
|
||||
@@ -1363,7 +1363,12 @@ func (c *Conn) handleKeyUpdate(keyUpdate *keyUpdateMsg) error {
|
||||
}
|
||||
|
||||
newSecret := cipherSuite.nextTrafficSecret(c.out.trafficSecret)
|
||||
c.out.setTrafficSecret(cipherSuite, QUICEncryptionLevelInitial, newSecret)
|
||||
c.setWriteTrafficSecret(cipherSuite, QUICEncryptionLevelInitial, newSecret)
|
||||
}
|
||||
|
||||
newSecret := cipherSuite.nextTrafficSecret(c.in.trafficSecret)
|
||||
if err := c.setReadTrafficSecret(cipherSuite, QUICEncryptionLevelInitial, newSecret); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -1594,7 +1599,9 @@ func (c *Conn) handshakeContext(ctx context.Context) (ret error) {
|
||||
// Provide the 1-RTT read secret now that the handshake is complete.
|
||||
// The QUIC layer MUST NOT decrypt 1-RTT packets prior to completing
|
||||
// the handshake (RFC 9001, Section 5.7).
|
||||
c.quicSetReadSecret(QUICEncryptionLevelApplication, c.cipherSuite, c.in.trafficSecret)
|
||||
if err := c.quicSetReadSecret(QUICEncryptionLevelApplication, c.cipherSuite, c.in.trafficSecret); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
var a alert
|
||||
c.out.Lock()
|
||||
@@ -1690,3 +1697,25 @@ func (c *Conn) VerifyHostname(host string) error {
|
||||
}
|
||||
return c.peerCertificates[0].VerifyHostname(host)
|
||||
}
|
||||
|
||||
// setReadTrafficSecret sets the read traffic secret for the given encryption level. If
|
||||
// being called at the same time as setWriteTrafficSecret, the caller must ensure the call
|
||||
// to setWriteTrafficSecret happens first so any alerts are sent at the write level.
|
||||
func (c *Conn) setReadTrafficSecret(suite *cipherSuiteTLS13, level QUICEncryptionLevel, secret []byte) error {
|
||||
// Ensure that there are no buffered handshake messages before changing the
|
||||
// read keys, since that can cause messages to be parsed that were encrypted
|
||||
// using old keys which are no longer appropriate.
|
||||
if c.hand.Len() != 0 {
|
||||
c.sendAlert(alertUnexpectedMessage)
|
||||
return errors.New("tls: handshake buffer not empty before setting read traffic secret")
|
||||
}
|
||||
c.in.setTrafficSecret(suite, level, secret)
|
||||
return nil
|
||||
}
|
||||
|
||||
// setWriteTrafficSecret sets the write traffic secret for the given encryption level. If
|
||||
// being called at the same time as setReadTrafficSecret, the caller must ensure the call
|
||||
// to setWriteTrafficSecret happens first so any alerts are sent at the write level.
|
||||
func (c *Conn) setWriteTrafficSecret(suite *cipherSuiteTLS13, level QUICEncryptionLevel, secret []byte) {
|
||||
c.out.setTrafficSecret(suite, level, secret)
|
||||
}
|
||||
|
||||
@@ -317,7 +317,11 @@ func (c *Conn) clientHandshake(ctx context.Context) (err error) {
|
||||
if hello.earlyData {
|
||||
suite := cipherSuiteTLS13ByID(session.cipherSuite)
|
||||
transcript := suite.hash.New()
|
||||
if err := transcriptMsg(hello, transcript); err != nil {
|
||||
transcriptHello := hello
|
||||
if ech != nil {
|
||||
transcriptHello = ech.innerHello
|
||||
}
|
||||
if err := transcriptMsg(transcriptHello, transcript); err != nil {
|
||||
return err
|
||||
}
|
||||
earlyTrafficSecret := earlySecret.ClientEarlyTrafficSecret(transcript)
|
||||
|
||||
@@ -515,16 +515,17 @@ func (hs *clientHandshakeStateTLS13) establishHandshakeKeys() error {
|
||||
handshakeSecret := earlySecret.HandshakeSecret(sharedKey)
|
||||
|
||||
clientSecret := handshakeSecret.ClientHandshakeTrafficSecret(hs.transcript)
|
||||
c.out.setTrafficSecret(hs.suite, QUICEncryptionLevelHandshake, clientSecret)
|
||||
c.setWriteTrafficSecret(hs.suite, QUICEncryptionLevelHandshake, clientSecret)
|
||||
serverSecret := handshakeSecret.ServerHandshakeTrafficSecret(hs.transcript)
|
||||
c.in.setTrafficSecret(hs.suite, QUICEncryptionLevelHandshake, serverSecret)
|
||||
if err := c.setReadTrafficSecret(hs.suite, QUICEncryptionLevelHandshake, serverSecret); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.quic != nil {
|
||||
if c.hand.Len() != 0 {
|
||||
c.sendAlert(alertUnexpectedMessage)
|
||||
}
|
||||
c.quicSetWriteSecret(QUICEncryptionLevelHandshake, hs.suite.id, clientSecret)
|
||||
c.quicSetReadSecret(QUICEncryptionLevelHandshake, hs.suite.id, serverSecret)
|
||||
if err := c.quicSetReadSecret(QUICEncryptionLevelHandshake, hs.suite.id, serverSecret); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = c.config.writeKeyLog(keyLogLabelClientHandshake, hs.hello.random, clientSecret)
|
||||
@@ -735,7 +736,9 @@ func (hs *clientHandshakeStateTLS13) readServerFinished() error {
|
||||
|
||||
hs.trafficSecret = hs.masterSecret.ClientApplicationTrafficSecret(hs.transcript)
|
||||
serverSecret := hs.masterSecret.ServerApplicationTrafficSecret(hs.transcript)
|
||||
c.in.setTrafficSecret(hs.suite, QUICEncryptionLevelApplication, serverSecret)
|
||||
if err := c.setReadTrafficSecret(hs.suite, QUICEncryptionLevelApplication, serverSecret); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = c.config.writeKeyLog(keyLogLabelClientTraffic, hs.hello.random, hs.trafficSecret)
|
||||
if err != nil {
|
||||
@@ -838,16 +841,13 @@ func (hs *clientHandshakeStateTLS13) sendClientFinished() error {
|
||||
return err
|
||||
}
|
||||
|
||||
c.out.setTrafficSecret(hs.suite, QUICEncryptionLevelApplication, hs.trafficSecret)
|
||||
c.setWriteTrafficSecret(hs.suite, QUICEncryptionLevelApplication, hs.trafficSecret)
|
||||
|
||||
if !c.config.SessionTicketsDisabled && c.config.ClientSessionCache != nil {
|
||||
c.resumptionSecret = hs.masterSecret.ResumptionMasterSecret(hs.transcript)
|
||||
}
|
||||
|
||||
if c.quic != nil {
|
||||
if c.hand.Len() != 0 {
|
||||
c.sendAlert(alertUnexpectedMessage)
|
||||
}
|
||||
c.quicSetWriteSecret(QUICEncryptionLevelApplication, hs.suite.id, hs.trafficSecret)
|
||||
}
|
||||
|
||||
|
||||
@@ -357,7 +357,7 @@ func negotiateALPN(serverProtos, clientProtos []string, quic bool) (string, erro
|
||||
if http11fallback {
|
||||
return "", nil
|
||||
}
|
||||
return "", fmt.Errorf("tls: client requested unsupported application protocols (%s)", clientProtos)
|
||||
return "", fmt.Errorf("tls: client requested unsupported application protocols (%q)", clientProtos)
|
||||
}
|
||||
|
||||
// supportsECDHE returns whether ECDHE key exchanges can be used with this
|
||||
@@ -520,8 +520,13 @@ func (hs *serverHandshakeState) checkForResumption() error {
|
||||
if sessionHasClientCerts && c.config.ClientAuth == NoClientCert {
|
||||
return nil
|
||||
}
|
||||
if sessionHasClientCerts && c.config.time().After(sessionState.peerCertificates[0].NotAfter) {
|
||||
return nil
|
||||
if sessionHasClientCerts {
|
||||
now := c.config.time()
|
||||
for _, c := range sessionState.peerCertificates {
|
||||
if now.After(c.NotAfter) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if sessionHasClientCerts && c.config.ClientAuth >= VerifyClientCertIfGiven &&
|
||||
len(sessionState.verifiedChains) == 0 {
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"crypto/rand"
|
||||
"crypto/tls/internal/fips140tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -2121,3 +2122,103 @@ func TestHandshakeContextHierarchy(t *testing.T) {
|
||||
t.Errorf("Unexpected client error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandshakeChainExpiryResumptionTLS12(t *testing.T) {
|
||||
t.Run("TLS1.2", func(t *testing.T) {
|
||||
testHandshakeChainExpiryResumption(t, VersionTLS12)
|
||||
})
|
||||
t.Run("TLS1.3", func(t *testing.T) {
|
||||
testHandshakeChainExpiryResumption(t, VersionTLS13)
|
||||
})
|
||||
}
|
||||
|
||||
func testHandshakeChainExpiryResumption(t *testing.T, version uint16) {
|
||||
now := time.Now()
|
||||
createChain := func(leafNotAfter, rootNotAfter time.Time) (certDER []byte, root *x509.Certificate) {
|
||||
tmpl := &x509.Certificate{
|
||||
Subject: pkix.Name{CommonName: "root"},
|
||||
NotBefore: rootNotAfter.Add(-time.Hour * 24),
|
||||
NotAfter: rootNotAfter,
|
||||
IsCA: true,
|
||||
BasicConstraintsValid: true,
|
||||
}
|
||||
rootDER, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, &testECDSAPrivateKey.PublicKey, testECDSAPrivateKey)
|
||||
if err != nil {
|
||||
t.Fatalf("CreateCertificate: %v", err)
|
||||
}
|
||||
root, err = x509.ParseCertificate(rootDER)
|
||||
if err != nil {
|
||||
t.Fatalf("ParseCertificate: %v", err)
|
||||
}
|
||||
|
||||
tmpl = &x509.Certificate{
|
||||
Subject: pkix.Name{},
|
||||
DNSNames: []string{"expired-resume.example.com"},
|
||||
NotBefore: leafNotAfter.Add(-time.Hour * 24),
|
||||
NotAfter: leafNotAfter,
|
||||
KeyUsage: x509.KeyUsageDigitalSignature,
|
||||
}
|
||||
certDER, err = x509.CreateCertificate(rand.Reader, tmpl, root, &testECDSAPrivateKey.PublicKey, testECDSAPrivateKey)
|
||||
if err != nil {
|
||||
t.Fatalf("CreateCertificate: %v", err)
|
||||
}
|
||||
|
||||
return certDER, root
|
||||
}
|
||||
|
||||
initialLeafDER, initialRoot := createChain(now.Add(time.Hour), now.Add(2*time.Hour))
|
||||
|
||||
serverConfig := testConfig.Clone()
|
||||
serverConfig.MaxVersion = version
|
||||
serverConfig.Certificates = []Certificate{{
|
||||
Certificate: [][]byte{initialLeafDER},
|
||||
PrivateKey: testECDSAPrivateKey,
|
||||
}}
|
||||
serverConfig.ClientCAs = x509.NewCertPool()
|
||||
serverConfig.ClientCAs.AddCert(initialRoot)
|
||||
serverConfig.ClientAuth = RequireAndVerifyClientCert
|
||||
serverConfig.Time = func() time.Time {
|
||||
return now
|
||||
}
|
||||
|
||||
clientConfig := testConfig.Clone()
|
||||
clientConfig.MaxVersion = version
|
||||
clientConfig.Certificates = []Certificate{{
|
||||
Certificate: [][]byte{initialLeafDER},
|
||||
PrivateKey: testECDSAPrivateKey,
|
||||
}}
|
||||
clientConfig.RootCAs = x509.NewCertPool()
|
||||
clientConfig.RootCAs.AddCert(initialRoot)
|
||||
clientConfig.ServerName = "expired-resume.example.com"
|
||||
clientConfig.ClientSessionCache = NewLRUClientSessionCache(32)
|
||||
|
||||
testResume := func(t *testing.T, sc, cc *Config, expectResume bool) {
|
||||
t.Helper()
|
||||
ss, cs, err := testHandshake(t, cc, sc)
|
||||
if err != nil {
|
||||
t.Fatalf("handshake: %v", err)
|
||||
}
|
||||
if cs.DidResume != expectResume {
|
||||
t.Fatalf("DidResume = %v; want %v", cs.DidResume, expectResume)
|
||||
}
|
||||
if ss.DidResume != expectResume {
|
||||
t.Fatalf("DidResume = %v; want %v", cs.DidResume, expectResume)
|
||||
}
|
||||
}
|
||||
|
||||
testResume(t, serverConfig, clientConfig, false)
|
||||
testResume(t, serverConfig, clientConfig, true)
|
||||
|
||||
freshLeafDER, freshRoot := createChain(now.Add(2*time.Hour), now.Add(3*time.Hour))
|
||||
clientConfig.Certificates = []Certificate{{
|
||||
Certificate: [][]byte{freshLeafDER},
|
||||
PrivateKey: testECDSAPrivateKey,
|
||||
}}
|
||||
serverConfig.Time = func() time.Time {
|
||||
return now.Add(1*time.Hour + 30*time.Minute)
|
||||
}
|
||||
serverConfig.ClientCAs = x509.NewCertPool()
|
||||
serverConfig.ClientCAs.AddCert(freshRoot)
|
||||
|
||||
testResume(t, serverConfig, clientConfig, false)
|
||||
}
|
||||
|
||||
@@ -354,6 +354,7 @@ func (hs *serverHandshakeStateTLS13) checkForResumption() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
pskIdentityLoop:
|
||||
for i, identity := range hs.clientHello.pskIdentities {
|
||||
if i >= maxClientPSKIdentities {
|
||||
break
|
||||
@@ -406,8 +407,13 @@ func (hs *serverHandshakeStateTLS13) checkForResumption() error {
|
||||
if sessionHasClientCerts && c.config.ClientAuth == NoClientCert {
|
||||
continue
|
||||
}
|
||||
if sessionHasClientCerts && c.config.time().After(sessionState.peerCertificates[0].NotAfter) {
|
||||
continue
|
||||
if sessionHasClientCerts {
|
||||
now := c.config.time()
|
||||
for _, c := range sessionState.peerCertificates {
|
||||
if now.After(c.NotAfter) {
|
||||
continue pskIdentityLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
if sessionHasClientCerts && c.config.ClientAuth >= VerifyClientCertIfGiven &&
|
||||
len(sessionState.verifiedChains) == 0 {
|
||||
@@ -450,7 +456,9 @@ func (hs *serverHandshakeStateTLS13) checkForResumption() error {
|
||||
return err
|
||||
}
|
||||
earlyTrafficSecret := hs.earlySecret.ClientEarlyTrafficSecret(transcript)
|
||||
c.quicSetReadSecret(QUICEncryptionLevelEarly, hs.suite.id, earlyTrafficSecret)
|
||||
if err := c.quicSetReadSecret(QUICEncryptionLevelEarly, hs.suite.id, earlyTrafficSecret); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
c.didResume = true
|
||||
@@ -547,6 +555,14 @@ func (hs *serverHandshakeStateTLS13) sendDummyChangeCipherSpec() error {
|
||||
func (hs *serverHandshakeStateTLS13) doHelloRetryRequest(selectedGroup CurveID) (*keyShare, error) {
|
||||
c := hs.c
|
||||
|
||||
// Make sure the client didn't send extra handshake messages alongside
|
||||
// their initial client_hello. If they sent two client_hello messages,
|
||||
// we will consume the second before they respond to the server_hello.
|
||||
if c.hand.Len() != 0 {
|
||||
c.sendAlert(alertUnexpectedMessage)
|
||||
return nil, errors.New("tls: handshake buffer not empty before HelloRetryRequest")
|
||||
}
|
||||
|
||||
// The first ClientHello gets double-hashed into the transcript upon a
|
||||
// HelloRetryRequest. See RFC 8446, Section 4.4.1.
|
||||
if err := transcriptMsg(hs.clientHello, hs.transcript); err != nil {
|
||||
@@ -766,17 +782,18 @@ func (hs *serverHandshakeStateTLS13) sendServerParameters() error {
|
||||
}
|
||||
hs.handshakeSecret = earlySecret.HandshakeSecret(hs.sharedKey)
|
||||
|
||||
clientSecret := hs.handshakeSecret.ClientHandshakeTrafficSecret(hs.transcript)
|
||||
c.in.setTrafficSecret(hs.suite, QUICEncryptionLevelHandshake, clientSecret)
|
||||
serverSecret := hs.handshakeSecret.ServerHandshakeTrafficSecret(hs.transcript)
|
||||
c.out.setTrafficSecret(hs.suite, QUICEncryptionLevelHandshake, serverSecret)
|
||||
c.setWriteTrafficSecret(hs.suite, QUICEncryptionLevelHandshake, serverSecret)
|
||||
clientSecret := hs.handshakeSecret.ClientHandshakeTrafficSecret(hs.transcript)
|
||||
if err := c.setReadTrafficSecret(hs.suite, QUICEncryptionLevelHandshake, clientSecret); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.quic != nil {
|
||||
if c.hand.Len() != 0 {
|
||||
c.sendAlert(alertUnexpectedMessage)
|
||||
}
|
||||
c.quicSetWriteSecret(QUICEncryptionLevelHandshake, hs.suite.id, serverSecret)
|
||||
c.quicSetReadSecret(QUICEncryptionLevelHandshake, hs.suite.id, clientSecret)
|
||||
if err := c.quicSetReadSecret(QUICEncryptionLevelHandshake, hs.suite.id, clientSecret); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err := c.config.writeKeyLog(keyLogLabelClientHandshake, hs.clientHello.random, clientSecret)
|
||||
@@ -920,13 +937,9 @@ func (hs *serverHandshakeStateTLS13) sendServerFinished() error {
|
||||
|
||||
hs.trafficSecret = hs.masterSecret.ClientApplicationTrafficSecret(hs.transcript)
|
||||
serverSecret := hs.masterSecret.ServerApplicationTrafficSecret(hs.transcript)
|
||||
c.out.setTrafficSecret(hs.suite, QUICEncryptionLevelApplication, serverSecret)
|
||||
c.setWriteTrafficSecret(hs.suite, QUICEncryptionLevelApplication, serverSecret)
|
||||
|
||||
if c.quic != nil {
|
||||
if c.hand.Len() != 0 {
|
||||
// TODO: Handle this in setTrafficSecret?
|
||||
c.sendAlert(alertUnexpectedMessage)
|
||||
}
|
||||
c.quicSetWriteSecret(QUICEncryptionLevelApplication, hs.suite.id, serverSecret)
|
||||
}
|
||||
|
||||
@@ -1156,7 +1169,9 @@ func (hs *serverHandshakeStateTLS13) readClientFinished() error {
|
||||
return errors.New("tls: invalid client finished hash")
|
||||
}
|
||||
|
||||
c.in.setTrafficSecret(hs.suite, QUICEncryptionLevelApplication, hs.trafficSecret)
|
||||
if err := c.setReadTrafficSecret(hs.suite, QUICEncryptionLevelApplication, hs.trafficSecret); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ package tls
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/ed25519"
|
||||
"crypto/x509"
|
||||
"encoding/hex"
|
||||
@@ -632,3 +633,142 @@ var clientEd25519KeyPEM = testingKey(`
|
||||
-----BEGIN TESTING KEY-----
|
||||
MC4CAQAwBQYDK2VwBCIEINifzf07d9qx3d44e0FSbV4mC/xQxT644RRbpgNpin7I
|
||||
-----END TESTING KEY-----`)
|
||||
|
||||
func TestServerHelloTrailingMessage(t *testing.T) {
|
||||
// In TLS 1.3 the change cipher spec message is optional. If a CCS message
|
||||
// is not sent, after reading the ServerHello, the read traffic secret is
|
||||
// set, and all following messages must be encrypted. If the server sends
|
||||
// additional unencrypted messages in a record with the ServerHello, the
|
||||
// client must either fail or ignore the additional messages.
|
||||
|
||||
c, s := localPipe(t)
|
||||
go func() {
|
||||
ctx := context.Background()
|
||||
srv := Server(s, testConfig)
|
||||
clientHello, _, err := srv.readClientHello(ctx)
|
||||
if err != nil {
|
||||
testFatal(t, err)
|
||||
}
|
||||
|
||||
hs := serverHandshakeStateTLS13{
|
||||
c: srv,
|
||||
ctx: ctx,
|
||||
clientHello: clientHello,
|
||||
}
|
||||
if err := hs.processClientHello(); err != nil {
|
||||
testFatal(t, err)
|
||||
}
|
||||
if err := transcriptMsg(hs.clientHello, hs.transcript); err != nil {
|
||||
testFatal(t, err)
|
||||
}
|
||||
|
||||
record, err := concatHandshakeMessages(hs.hello, &encryptedExtensionsMsg{alpnProtocol: "h2"})
|
||||
if err != nil {
|
||||
testFatal(t, err)
|
||||
}
|
||||
|
||||
if _, err := s.Write(record); err != nil {
|
||||
testFatal(t, err)
|
||||
}
|
||||
srv.Close()
|
||||
}()
|
||||
|
||||
cli := Client(c, testConfig)
|
||||
expectedErr := "tls: handshake buffer not empty before setting read traffic secret"
|
||||
if err := cli.Handshake(); err == nil {
|
||||
t.Fatal("expected error from incomplete handshake, got nil")
|
||||
} else if err.Error() != expectedErr {
|
||||
t.Fatalf("expected error %q, got %q", expectedErr, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientHelloTrailingMessage(t *testing.T) {
|
||||
// Same as TestServerHelloTrailingMessage but for the client side.
|
||||
|
||||
c, s := localPipe(t)
|
||||
go func() {
|
||||
cli := Client(c, testConfig)
|
||||
|
||||
hello, _, _, err := cli.makeClientHello()
|
||||
if err != nil {
|
||||
testFatal(t, err)
|
||||
}
|
||||
|
||||
record, err := concatHandshakeMessages(hello, &certificateMsgTLS13{})
|
||||
if err != nil {
|
||||
testFatal(t, err)
|
||||
}
|
||||
|
||||
if _, err := c.Write(record); err != nil {
|
||||
testFatal(t, err)
|
||||
}
|
||||
cli.Close()
|
||||
}()
|
||||
|
||||
srv := Server(s, testConfig)
|
||||
expectedErr := "tls: handshake buffer not empty before setting read traffic secret"
|
||||
if err := srv.Handshake(); err == nil {
|
||||
t.Fatal("expected error from incomplete handshake, got nil")
|
||||
} else if err.Error() != expectedErr {
|
||||
t.Fatalf("expected error %q, got %q", expectedErr, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestDoubleClientHelloHRR(t *testing.T) {
|
||||
// If a client sends two ClientHello messages in a single record, and the
|
||||
// server sends a HRR after reading the first ClientHello, the server must
|
||||
// either fail or ignore the trailing ClientHello.
|
||||
|
||||
c, s := localPipe(t)
|
||||
|
||||
go func() {
|
||||
cli := Client(c, testConfig)
|
||||
|
||||
hello, _, _, err := cli.makeClientHello()
|
||||
if err != nil {
|
||||
testFatal(t, err)
|
||||
}
|
||||
hello.keyShares = nil
|
||||
|
||||
record, err := concatHandshakeMessages(hello, hello)
|
||||
if err != nil {
|
||||
testFatal(t, err)
|
||||
}
|
||||
|
||||
if _, err := c.Write(record); err != nil {
|
||||
testFatal(t, err)
|
||||
}
|
||||
cli.Close()
|
||||
}()
|
||||
|
||||
srv := Server(s, testConfig)
|
||||
expectedErr := "tls: handshake buffer not empty before HelloRetryRequest"
|
||||
if err := srv.Handshake(); err == nil {
|
||||
t.Fatal("expected error from incomplete handshake, got nil")
|
||||
} else if err.Error() != expectedErr {
|
||||
t.Fatalf("expected error %q, got %q", expectedErr, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// concatHandshakeMessages marshals and concatenates the given handshake
|
||||
// messages into a single record.
|
||||
func concatHandshakeMessages(msgs ...handshakeMessage) ([]byte, error) {
|
||||
var marshalled []byte
|
||||
for _, msg := range msgs {
|
||||
data, err := msg.marshal()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
marshalled = append(marshalled, data...)
|
||||
}
|
||||
m := len(marshalled)
|
||||
outBuf := make([]byte, recordHeaderLen)
|
||||
outBuf[0] = byte(recordTypeHandshake)
|
||||
vers := VersionTLS12
|
||||
outBuf[1] = byte(vers >> 8)
|
||||
outBuf[2] = byte(vers)
|
||||
outBuf[3] = byte(m >> 8)
|
||||
outBuf[4] = byte(m)
|
||||
outBuf = append(outBuf, marshalled...)
|
||||
return outBuf, nil
|
||||
}
|
||||
|
||||
@@ -381,13 +381,22 @@ func (c *Conn) quicReadHandshakeBytes(n int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) quicSetReadSecret(level QUICEncryptionLevel, suite uint16, secret []byte) {
|
||||
func (c *Conn) quicSetReadSecret(level QUICEncryptionLevel, suite uint16, secret []byte) error {
|
||||
// Ensure that there are no buffered handshake messages before changing the
|
||||
// read keys, since that can cause messages to be parsed that were encrypted
|
||||
// using old keys which are no longer appropriate.
|
||||
// TODO(roland): we should merge this check with the similar one in setReadTrafficSecret.
|
||||
if c.hand.Len() != 0 {
|
||||
c.sendAlert(alertUnexpectedMessage)
|
||||
return errors.New("tls: handshake buffer not empty before setting read traffic secret")
|
||||
}
|
||||
c.quic.events = append(c.quic.events, QUICEvent{
|
||||
Kind: QUICSetReadSecret,
|
||||
Level: level,
|
||||
Suite: suite,
|
||||
Data: secret,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) quicSetWriteSecret(level QUICEncryptionLevel, suite uint16, secret []byte) {
|
||||
|
||||
@@ -935,8 +935,8 @@ func TestCloneNonFuncFields(t *testing.T) {
|
||||
}
|
||||
}
|
||||
// Set the unexported fields related to session ticket keys, which are copied with Clone().
|
||||
c1.autoSessionTicketKeys = []ticketKey{c1.ticketKeyFromBytes(c1.SessionTicketKey)}
|
||||
c1.sessionTicketKeys = []ticketKey{c1.ticketKeyFromBytes(c1.SessionTicketKey)}
|
||||
// We explicitly don't copy autoSessionTicketKeys in Clone, so don't set it.
|
||||
|
||||
c2 := c1.Clone()
|
||||
if !reflect.DeepEqual(&c1, c2) {
|
||||
@@ -2347,3 +2347,12 @@ func TestECH(t *testing.T) {
|
||||
|
||||
check()
|
||||
}
|
||||
|
||||
func TestConfigCloneAutoSessionTicketKeys(t *testing.T) {
|
||||
orig := &Config{}
|
||||
orig.ticketKeys(nil)
|
||||
clone := orig.Clone()
|
||||
if slices.Equal(orig.autoSessionTicketKeys, clone.autoSessionTicketKeys) {
|
||||
t.Fatal("autoSessionTicketKeys slice copied in Clone")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1624,6 +1624,40 @@ var nameConstraintsTests = []nameConstraintsTest{
|
||||
},
|
||||
expectedError: "URI with IP",
|
||||
},
|
||||
// #87: subdomain excluded constraints preclude wildcard names
|
||||
{
|
||||
roots: []constraintsSpec{
|
||||
{
|
||||
bad: []string{"dns:foo.example.com"},
|
||||
},
|
||||
},
|
||||
intermediates: [][]constraintsSpec{
|
||||
{
|
||||
{},
|
||||
},
|
||||
},
|
||||
leaf: leafSpec{
|
||||
sans: []string{"dns:*.example.com"},
|
||||
},
|
||||
expectedError: "\"*.example.com\" is excluded by constraint \"foo.example.com\"",
|
||||
},
|
||||
// #88: wildcard names are not matched by subdomain permitted constraints
|
||||
{
|
||||
roots: []constraintsSpec{
|
||||
{
|
||||
ok: []string{"dns:foo.example.com"},
|
||||
},
|
||||
},
|
||||
intermediates: [][]constraintsSpec{
|
||||
{
|
||||
{},
|
||||
},
|
||||
},
|
||||
leaf: leafSpec{
|
||||
sans: []string{"dns:*.example.com"},
|
||||
},
|
||||
expectedError: "\"*.example.com\" is not permitted",
|
||||
},
|
||||
}
|
||||
|
||||
func makeConstraintsCACert(constraints constraintsSpec, name string, key *ecdsa.PrivateKey, parent *Certificate, parentKey *ecdsa.PrivateKey) (*Certificate, error) {
|
||||
|
||||
@@ -429,10 +429,8 @@ func parseSANExtension(der cryptobyte.String) (dnsNames, emailAddresses []string
|
||||
if err != nil {
|
||||
return fmt.Errorf("x509: cannot parse URI %q: %s", uriStr, err)
|
||||
}
|
||||
if len(uri.Host) > 0 {
|
||||
if _, ok := domainToReverseLabels(uri.Host); !ok {
|
||||
return fmt.Errorf("x509: cannot parse URI %q: invalid domain", uriStr)
|
||||
}
|
||||
if len(uri.Host) > 0 && !domainNameValid(uri.Host, false) {
|
||||
return fmt.Errorf("x509: cannot parse URI %q: invalid domain", uriStr)
|
||||
}
|
||||
uris = append(uris, uri)
|
||||
case nameTypeIP:
|
||||
@@ -598,15 +596,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandle
|
||||
return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error())
|
||||
}
|
||||
|
||||
trimmedDomain := domain
|
||||
if len(trimmedDomain) > 0 && trimmedDomain[0] == '.' {
|
||||
// constraints can have a leading
|
||||
// period to exclude the domain
|
||||
// itself, but that's not valid in a
|
||||
// normal domain name.
|
||||
trimmedDomain = trimmedDomain[1:]
|
||||
}
|
||||
if _, ok := domainToReverseLabels(trimmedDomain); !ok {
|
||||
if !domainNameValid(domain, true) {
|
||||
return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse dnsName constraint %q", domain)
|
||||
}
|
||||
dnsNames = append(dnsNames, domain)
|
||||
@@ -647,12 +637,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandle
|
||||
return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint)
|
||||
}
|
||||
} else {
|
||||
// Otherwise it's a domain name.
|
||||
domain := constraint
|
||||
if len(domain) > 0 && domain[0] == '.' {
|
||||
domain = domain[1:]
|
||||
}
|
||||
if _, ok := domainToReverseLabels(domain); !ok {
|
||||
if !domainNameValid(constraint, true) {
|
||||
return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint)
|
||||
}
|
||||
}
|
||||
@@ -668,15 +653,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandle
|
||||
return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse URI constraint %q: cannot be IP address", domain)
|
||||
}
|
||||
|
||||
trimmedDomain := domain
|
||||
if len(trimmedDomain) > 0 && trimmedDomain[0] == '.' {
|
||||
// constraints can have a leading
|
||||
// period to exclude the domain itself,
|
||||
// but that's not valid in a normal
|
||||
// domain name.
|
||||
trimmedDomain = trimmedDomain[1:]
|
||||
}
|
||||
if _, ok := domainToReverseLabels(trimmedDomain); !ok {
|
||||
if !domainNameValid(domain, true) {
|
||||
return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse URI constraint %q", domain)
|
||||
}
|
||||
uriDomains = append(uriDomains, domain)
|
||||
@@ -1317,3 +1294,62 @@ func ParseRevocationList(der []byte) (*RevocationList, error) {
|
||||
|
||||
return rl, nil
|
||||
}
|
||||
|
||||
// domainNameValid is an alloc-less version of the checks that
|
||||
// domainToReverseLabels does.
|
||||
func domainNameValid(s string, constraint bool) bool {
|
||||
// TODO(#75835): This function omits a number of checks which we
|
||||
// really should be doing to enforce that domain names are valid names per
|
||||
// RFC 1034. We previously enabled these checks, but this broke a
|
||||
// significant number of certificates we previously considered valid, and we
|
||||
// happily create via CreateCertificate (et al). We should enable these
|
||||
// checks, but will need to gate them behind a GODEBUG.
|
||||
//
|
||||
// I have left the checks we previously enabled, noted with "TODO(#75835)" so
|
||||
// that we can easily re-enable them once we unbreak everyone.
|
||||
|
||||
// TODO(#75835): this should only be true for constraints.
|
||||
if len(s) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
// Do not allow trailing period (FQDN format is not allowed in SANs or
|
||||
// constraints).
|
||||
if s[len(s)-1] == '.' {
|
||||
return false
|
||||
}
|
||||
|
||||
// TODO(#75835): domains must have at least one label, cannot have
|
||||
// a leading empty label, and cannot be longer than 253 characters.
|
||||
// if len(s) == 0 || (!constraint && s[0] == '.') || len(s) > 253 {
|
||||
// return false
|
||||
// }
|
||||
|
||||
lastDot := -1
|
||||
if constraint && s[0] == '.' {
|
||||
s = s[1:]
|
||||
}
|
||||
|
||||
for i := 0; i <= len(s); i++ {
|
||||
if i < len(s) && (s[i] < 33 || s[i] > 126) {
|
||||
// Invalid character.
|
||||
return false
|
||||
}
|
||||
if i == len(s) || s[i] == '.' {
|
||||
labelLen := i
|
||||
if lastDot >= 0 {
|
||||
labelLen -= lastDot + 1
|
||||
}
|
||||
if labelLen == 0 {
|
||||
return false
|
||||
}
|
||||
// TODO(#75835): labels cannot be longer than 63 characters.
|
||||
// if labelLen > 63 {
|
||||
// return false
|
||||
// }
|
||||
lastDot = i
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -5,9 +5,13 @@
|
||||
package x509
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"encoding/asn1"
|
||||
"encoding/pem"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1"
|
||||
@@ -251,3 +255,106 @@ d5l1tRhScKu2NBgm74nYmJxJYgvuTA38wGhRrGU=
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDomainNameValid(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
dnsName string
|
||||
constraint bool
|
||||
valid bool
|
||||
}{
|
||||
// TODO(#75835): these tests are for stricter name validation, which we
|
||||
// had to disable. Once we reenable these strict checks, behind a
|
||||
// GODEBUG, we should add them back in.
|
||||
// {"empty name, name", "", false, false},
|
||||
// {"254 char label, name", strings.Repeat("a.a", 84) + "aaa", false, false},
|
||||
// {"254 char label, constraint", strings.Repeat("a.a", 84) + "aaa", true, false},
|
||||
// {"253 char label, name", strings.Repeat("a.a", 84) + "aa", false, false},
|
||||
// {"253 char label, constraint", strings.Repeat("a.a", 84) + "aa", true, false},
|
||||
// {"64 char single label, name", strings.Repeat("a", 64), false, false},
|
||||
// {"64 char single label, constraint", strings.Repeat("a", 64), true, false},
|
||||
// {"64 char label, name", "a." + strings.Repeat("a", 64), false, false},
|
||||
// {"64 char label, constraint", "a." + strings.Repeat("a", 64), true, false},
|
||||
|
||||
// TODO(#75835): these are the inverse of the tests above, they should be removed
|
||||
// once the strict checking is enabled.
|
||||
{"254 char label, name", strings.Repeat("a.a", 84) + "aaa", false, true},
|
||||
{"254 char label, constraint", strings.Repeat("a.a", 84) + "aaa", true, true},
|
||||
{"253 char label, name", strings.Repeat("a.a", 84) + "aa", false, true},
|
||||
{"253 char label, constraint", strings.Repeat("a.a", 84) + "aa", true, true},
|
||||
{"64 char single label, name", strings.Repeat("a", 64), false, true},
|
||||
{"64 char single label, constraint", strings.Repeat("a", 64), true, true},
|
||||
{"64 char label, name", "a." + strings.Repeat("a", 64), false, true},
|
||||
{"64 char label, constraint", "a." + strings.Repeat("a", 64), true, true},
|
||||
|
||||
// Check we properly enforce properties of domain names.
|
||||
{"empty name, constraint", "", true, true},
|
||||
{"empty label, name", "a..a", false, false},
|
||||
{"empty label, constraint", "a..a", true, false},
|
||||
{"period, name", ".", false, false},
|
||||
{"period, constraint", ".", true, false}, // TODO(roland): not entirely clear if this is a valid constraint (require at least one label?)
|
||||
{"valid, name", "a.b.c", false, true},
|
||||
{"valid, constraint", "a.b.c", true, true},
|
||||
{"leading period, name", ".a.b.c", false, false},
|
||||
{"leading period, constraint", ".a.b.c", true, true},
|
||||
{"trailing period, name", "a.", false, false},
|
||||
{"trailing period, constraint", "a.", true, false},
|
||||
{"bare label, name", "a", false, true},
|
||||
{"bare label, constraint", "a", true, true},
|
||||
{"63 char single label, name", strings.Repeat("a", 63), false, true},
|
||||
{"63 char single label, constraint", strings.Repeat("a", 63), true, true},
|
||||
{"63 char label, name", "a." + strings.Repeat("a", 63), false, true},
|
||||
{"63 char label, constraint", "a." + strings.Repeat("a", 63), true, true},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
valid := domainNameValid(tc.dnsName, tc.constraint)
|
||||
if tc.valid != valid {
|
||||
t.Errorf("domainNameValid(%q, %t) = %v; want %v", tc.dnsName, tc.constraint, !tc.valid, tc.valid)
|
||||
}
|
||||
// Also check that we enforce the same properties as domainToReverseLabels
|
||||
trimmedName := tc.dnsName
|
||||
if tc.constraint && len(trimmedName) > 1 && trimmedName[0] == '.' {
|
||||
trimmedName = trimmedName[1:]
|
||||
}
|
||||
_, revValid := domainToReverseLabels(trimmedName)
|
||||
if valid != revValid {
|
||||
t.Errorf("domainNameValid(%q, %t) = %t != domainToReverseLabels(%q) = %t", tc.dnsName, tc.constraint, valid, trimmedName, revValid)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRoundtripWeirdSANs(t *testing.T) {
|
||||
// TODO(#75835): check that certificates we create with CreateCertificate that have malformed SAN values
|
||||
// can be parsed by ParseCertificate. We should eventually restrict this, but for now we have to maintain
|
||||
// this property as people have been relying on it.
|
||||
k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
badNames := []string{
|
||||
"baredomain",
|
||||
"baredomain.",
|
||||
strings.Repeat("a", 255),
|
||||
strings.Repeat("a", 65) + ".com",
|
||||
}
|
||||
tmpl := &Certificate{
|
||||
EmailAddresses: badNames,
|
||||
DNSNames: badNames,
|
||||
}
|
||||
b, err := CreateCertificate(rand.Reader, tmpl, tmpl, &k.PublicKey, k)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = ParseCertificate(b)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't roundtrip certificate: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func FuzzDomainNameValid(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, data string) {
|
||||
domainNameValid(data, false)
|
||||
domainNameValid(data, true)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -110,31 +110,38 @@ type HostnameError struct {
|
||||
|
||||
func (h HostnameError) Error() string {
|
||||
c := h.Certificate
|
||||
maxNamesIncluded := 100
|
||||
|
||||
if !c.hasSANExtension() && matchHostnames(c.Subject.CommonName, h.Host) {
|
||||
return "x509: certificate relies on legacy Common Name field, use SANs instead"
|
||||
}
|
||||
|
||||
var valid string
|
||||
var valid strings.Builder
|
||||
if ip := net.ParseIP(h.Host); ip != nil {
|
||||
// Trying to validate an IP
|
||||
if len(c.IPAddresses) == 0 {
|
||||
return "x509: cannot validate certificate for " + h.Host + " because it doesn't contain any IP SANs"
|
||||
}
|
||||
if len(c.IPAddresses) >= maxNamesIncluded {
|
||||
return fmt.Sprintf("x509: certificate is valid for %d IP SANs, but none matched %s", len(c.IPAddresses), h.Host)
|
||||
}
|
||||
for _, san := range c.IPAddresses {
|
||||
if len(valid) > 0 {
|
||||
valid += ", "
|
||||
if valid.Len() > 0 {
|
||||
valid.WriteString(", ")
|
||||
}
|
||||
valid += san.String()
|
||||
valid.WriteString(san.String())
|
||||
}
|
||||
} else {
|
||||
valid = strings.Join(c.DNSNames, ", ")
|
||||
if len(c.DNSNames) >= maxNamesIncluded {
|
||||
return fmt.Sprintf("x509: certificate is valid for %d names, but none matched %s", len(c.DNSNames), h.Host)
|
||||
}
|
||||
valid.WriteString(strings.Join(c.DNSNames, ", "))
|
||||
}
|
||||
|
||||
if len(valid) == 0 {
|
||||
if valid.Len() == 0 {
|
||||
return "x509: certificate is not valid for any names, but wanted to match " + h.Host
|
||||
}
|
||||
return "x509: certificate is valid for " + valid + ", not " + h.Host
|
||||
return "x509: certificate is valid for " + valid.String() + ", not " + h.Host
|
||||
}
|
||||
|
||||
// UnknownAuthorityError results when the certificate issuer is unknown
|
||||
@@ -391,6 +398,7 @@ func parseRFC2821Mailbox(in string) (mailbox rfc2821Mailbox, ok bool) {
|
||||
// domainToReverseLabels converts a textual domain name like foo.example.com to
|
||||
// the list of labels in reverse order, e.g. ["com", "example", "foo"].
|
||||
func domainToReverseLabels(domain string) (reverseLabels []string, ok bool) {
|
||||
reverseLabels = make([]string, 0, strings.Count(domain, ".")+1)
|
||||
for len(domain) > 0 {
|
||||
if i := strings.LastIndexByte(domain, '.'); i == -1 {
|
||||
reverseLabels = append(reverseLabels, domain)
|
||||
@@ -428,7 +436,7 @@ func domainToReverseLabels(domain string) (reverseLabels []string, ok bool) {
|
||||
return reverseLabels, true
|
||||
}
|
||||
|
||||
func matchEmailConstraint(mailbox rfc2821Mailbox, constraint string) (bool, error) {
|
||||
func matchEmailConstraint(mailbox rfc2821Mailbox, constraint string, excluded bool, reversedDomainsCache map[string][]string, reversedConstraintsCache map[string][]string) (bool, error) {
|
||||
// If the constraint contains an @, then it specifies an exact mailbox
|
||||
// name.
|
||||
if strings.Contains(constraint, "@") {
|
||||
@@ -441,10 +449,10 @@ func matchEmailConstraint(mailbox rfc2821Mailbox, constraint string) (bool, erro
|
||||
|
||||
// Otherwise the constraint is like a DNS constraint of the domain part
|
||||
// of the mailbox.
|
||||
return matchDomainConstraint(mailbox.domain, constraint)
|
||||
return matchDomainConstraint(mailbox.domain, constraint, excluded, reversedDomainsCache, reversedConstraintsCache)
|
||||
}
|
||||
|
||||
func matchURIConstraint(uri *url.URL, constraint string) (bool, error) {
|
||||
func matchURIConstraint(uri *url.URL, constraint string, excluded bool, reversedDomainsCache map[string][]string, reversedConstraintsCache map[string][]string) (bool, error) {
|
||||
// From RFC 5280, Section 4.2.1.10:
|
||||
// “a uniformResourceIdentifier that does not include an authority
|
||||
// component with a host name specified as a fully qualified domain
|
||||
@@ -473,7 +481,7 @@ func matchURIConstraint(uri *url.URL, constraint string) (bool, error) {
|
||||
return false, fmt.Errorf("URI with IP (%q) cannot be matched against constraints", uri.String())
|
||||
}
|
||||
|
||||
return matchDomainConstraint(host, constraint)
|
||||
return matchDomainConstraint(host, constraint, excluded, reversedDomainsCache, reversedConstraintsCache)
|
||||
}
|
||||
|
||||
func matchIPConstraint(ip net.IP, constraint *net.IPNet) (bool, error) {
|
||||
@@ -490,16 +498,26 @@ func matchIPConstraint(ip net.IP, constraint *net.IPNet) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func matchDomainConstraint(domain, constraint string) (bool, error) {
|
||||
func matchDomainConstraint(domain, constraint string, excluded bool, reversedDomainsCache map[string][]string, reversedConstraintsCache map[string][]string) (bool, error) {
|
||||
// The meaning of zero length constraints is not specified, but this
|
||||
// code follows NSS and accepts them as matching everything.
|
||||
if len(constraint) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
domainLabels, ok := domainToReverseLabels(domain)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("x509: internal error: cannot parse domain %q", domain)
|
||||
domainLabels, found := reversedDomainsCache[domain]
|
||||
if !found {
|
||||
var ok bool
|
||||
domainLabels, ok = domainToReverseLabels(domain)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("x509: internal error: cannot parse domain %q", domain)
|
||||
}
|
||||
reversedDomainsCache[domain] = domainLabels
|
||||
}
|
||||
|
||||
wildcardDomain := false
|
||||
if len(domain) > 0 && domain[0] == '*' {
|
||||
wildcardDomain = true
|
||||
}
|
||||
|
||||
// RFC 5280 says that a leading period in a domain name means that at
|
||||
@@ -513,9 +531,14 @@ func matchDomainConstraint(domain, constraint string) (bool, error) {
|
||||
constraint = constraint[1:]
|
||||
}
|
||||
|
||||
constraintLabels, ok := domainToReverseLabels(constraint)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("x509: internal error: cannot parse domain %q", constraint)
|
||||
constraintLabels, found := reversedConstraintsCache[constraint]
|
||||
if !found {
|
||||
var ok bool
|
||||
constraintLabels, ok = domainToReverseLabels(constraint)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("x509: internal error: cannot parse domain %q", constraint)
|
||||
}
|
||||
reversedConstraintsCache[constraint] = constraintLabels
|
||||
}
|
||||
|
||||
if len(domainLabels) < len(constraintLabels) ||
|
||||
@@ -523,6 +546,11 @@ func matchDomainConstraint(domain, constraint string) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if excluded && wildcardDomain && len(domainLabels) > 1 && len(constraintLabels) > 0 {
|
||||
domainLabels = domainLabels[:len(domainLabels)-1]
|
||||
constraintLabels = constraintLabels[:len(constraintLabels)-1]
|
||||
}
|
||||
|
||||
for i, constraintLabel := range constraintLabels {
|
||||
if !strings.EqualFold(constraintLabel, domainLabels[i]) {
|
||||
return false, nil
|
||||
@@ -542,7 +570,7 @@ func (c *Certificate) checkNameConstraints(count *int,
|
||||
nameType string,
|
||||
name string,
|
||||
parsedName any,
|
||||
match func(parsedName, constraint any) (match bool, err error),
|
||||
match func(parsedName, constraint any, excluded bool) (match bool, err error),
|
||||
permitted, excluded any) error {
|
||||
|
||||
excludedValue := reflect.ValueOf(excluded)
|
||||
@@ -554,7 +582,7 @@ func (c *Certificate) checkNameConstraints(count *int,
|
||||
|
||||
for i := 0; i < excludedValue.Len(); i++ {
|
||||
constraint := excludedValue.Index(i).Interface()
|
||||
match, err := match(parsedName, constraint)
|
||||
match, err := match(parsedName, constraint, true)
|
||||
if err != nil {
|
||||
return CertificateInvalidError{c, CANotAuthorizedForThisName, err.Error()}
|
||||
}
|
||||
@@ -576,7 +604,7 @@ func (c *Certificate) checkNameConstraints(count *int,
|
||||
constraint := permittedValue.Index(i).Interface()
|
||||
|
||||
var err error
|
||||
if ok, err = match(parsedName, constraint); err != nil {
|
||||
if ok, err = match(parsedName, constraint, false); err != nil {
|
||||
return CertificateInvalidError{c, CANotAuthorizedForThisName, err.Error()}
|
||||
}
|
||||
|
||||
@@ -636,6 +664,19 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
|
||||
}
|
||||
}
|
||||
|
||||
// Each time we do constraint checking, we need to check the constraints in
|
||||
// the current certificate against all of the names that preceded it. We
|
||||
// reverse these names using domainToReverseLabels, which is a relatively
|
||||
// expensive operation. Since we check each name against each constraint,
|
||||
// this requires us to do N*C calls to domainToReverseLabels (where N is the
|
||||
// total number of names that preceed the certificate, and C is the total
|
||||
// number of constraints in the certificate). By caching the results of
|
||||
// calling domainToReverseLabels, we can reduce that to N+C calls at the
|
||||
// cost of keeping all of the parsed names and constraints in memory until
|
||||
// we return from isValid.
|
||||
reversedDomainsCache := map[string][]string{}
|
||||
reversedConstraintsCache := map[string][]string{}
|
||||
|
||||
if (certType == intermediateCertificate || certType == rootCertificate) &&
|
||||
c.hasNameConstraints() {
|
||||
toCheck := []*Certificate{}
|
||||
@@ -655,21 +696,21 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
|
||||
}
|
||||
|
||||
if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "email address", name, mailbox,
|
||||
func(parsedName, constraint any) (bool, error) {
|
||||
return matchEmailConstraint(parsedName.(rfc2821Mailbox), constraint.(string))
|
||||
func(parsedName, constraint any, excluded bool) (bool, error) {
|
||||
return matchEmailConstraint(parsedName.(rfc2821Mailbox), constraint.(string), excluded, reversedDomainsCache, reversedConstraintsCache)
|
||||
}, c.PermittedEmailAddresses, c.ExcludedEmailAddresses); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case nameTypeDNS:
|
||||
name := string(data)
|
||||
if _, ok := domainToReverseLabels(name); !ok {
|
||||
if !domainNameValid(name, false) {
|
||||
return fmt.Errorf("x509: cannot parse dnsName %q", name)
|
||||
}
|
||||
|
||||
if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "DNS name", name, name,
|
||||
func(parsedName, constraint any) (bool, error) {
|
||||
return matchDomainConstraint(parsedName.(string), constraint.(string))
|
||||
func(parsedName, constraint any, excluded bool) (bool, error) {
|
||||
return matchDomainConstraint(parsedName.(string), constraint.(string), excluded, reversedDomainsCache, reversedConstraintsCache)
|
||||
}, c.PermittedDNSDomains, c.ExcludedDNSDomains); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -682,8 +723,8 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
|
||||
}
|
||||
|
||||
if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "URI", name, uri,
|
||||
func(parsedName, constraint any) (bool, error) {
|
||||
return matchURIConstraint(parsedName.(*url.URL), constraint.(string))
|
||||
func(parsedName, constraint any, excluded bool) (bool, error) {
|
||||
return matchURIConstraint(parsedName.(*url.URL), constraint.(string), excluded, reversedDomainsCache, reversedConstraintsCache)
|
||||
}, c.PermittedURIDomains, c.ExcludedURIDomains); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -695,7 +736,7 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V
|
||||
}
|
||||
|
||||
if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "IP address", ip.String(), ip,
|
||||
func(parsedName, constraint any) (bool, error) {
|
||||
func(parsedName, constraint any, _ bool) (bool, error) {
|
||||
return matchIPConstraint(parsedName.(net.IP), constraint.(*net.IPNet))
|
||||
}, c.PermittedIPRanges, c.ExcludedIPRanges); err != nil {
|
||||
return err
|
||||
@@ -927,7 +968,10 @@ func alreadyInChain(candidate *Certificate, chain []*Certificate) bool {
|
||||
if !bytes.Equal(candidate.RawSubject, cert.RawSubject) {
|
||||
continue
|
||||
}
|
||||
if !candidate.PublicKey.(pubKeyEqual).Equal(cert.PublicKey) {
|
||||
// We enforce the canonical encoding of SPKI (by only allowing the
|
||||
// correct AI paremeter encodings in parseCertificate), so it's safe to
|
||||
// directly compare the raw bytes.
|
||||
if !bytes.Equal(candidate.RawSubjectPublicKeyInfo, cert.RawSubjectPublicKeyInfo) {
|
||||
continue
|
||||
}
|
||||
var certSAN *pkix.Extension
|
||||
|
||||
@@ -6,16 +6,20 @@ package x509
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/dsa"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"internal/testenv"
|
||||
"log"
|
||||
"math/big"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
@@ -88,6 +92,26 @@ var verifyTests = []verifyTest{
|
||||
|
||||
errorCallback: expectHostnameError("certificate is valid for"),
|
||||
},
|
||||
{
|
||||
name: "TooManyDNS",
|
||||
leaf: generatePEMCertWithRepeatSAN(1677615892, 200, "fake.dns"),
|
||||
roots: []string{generatePEMCertWithRepeatSAN(1677615892, 200, "fake.dns")},
|
||||
currentTime: 1677615892,
|
||||
dnsName: "www.example.com",
|
||||
systemSkip: true, // does not chain to a system root
|
||||
|
||||
errorCallback: expectHostnameError("certificate is valid for 200 names, but none matched"),
|
||||
},
|
||||
{
|
||||
name: "TooManyIPs",
|
||||
leaf: generatePEMCertWithRepeatSAN(1677615892, 150, "4.3.2.1"),
|
||||
roots: []string{generatePEMCertWithRepeatSAN(1677615892, 150, "4.3.2.1")},
|
||||
currentTime: 1677615892,
|
||||
dnsName: "1.2.3.4",
|
||||
systemSkip: true, // does not chain to a system root
|
||||
|
||||
errorCallback: expectHostnameError("certificate is valid for 150 IP SANs, but none matched"),
|
||||
},
|
||||
{
|
||||
name: "IPMissing",
|
||||
leaf: googleLeaf,
|
||||
@@ -551,6 +575,30 @@ func nameToKey(name *pkix.Name) string {
|
||||
return strings.Join(name.Country, ",") + "/" + strings.Join(name.Organization, ",") + "/" + strings.Join(name.OrganizationalUnit, ",") + "/" + name.CommonName
|
||||
}
|
||||
|
||||
func generatePEMCertWithRepeatSAN(currentTime int64, count int, san string) string {
|
||||
cert := Certificate{
|
||||
NotBefore: time.Unix(currentTime, 0),
|
||||
NotAfter: time.Unix(currentTime, 0),
|
||||
}
|
||||
if ip := net.ParseIP(san); ip != nil {
|
||||
cert.IPAddresses = slices.Repeat([]net.IP{ip}, count)
|
||||
} else {
|
||||
cert.DNSNames = slices.Repeat([]string{san}, count)
|
||||
}
|
||||
privKey, err := rsa.GenerateKey(rand.Reader, 4096)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
certBytes, err := CreateCertificate(rand.Reader, &cert, &cert, &privKey.PublicKey, privKey)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return string(pem.EncodeToMemory(&pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: certBytes,
|
||||
}))
|
||||
}
|
||||
|
||||
const gtsIntermediate = `-----BEGIN CERTIFICATE-----
|
||||
MIIFljCCA36gAwIBAgINAgO8U1lrNMcY9QFQZjANBgkqhkiG9w0BAQsFADBHMQsw
|
||||
CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU
|
||||
@@ -1351,7 +1399,7 @@ var nameConstraintTests = []struct {
|
||||
|
||||
func TestNameConstraints(t *testing.T) {
|
||||
for i, test := range nameConstraintTests {
|
||||
result, err := matchDomainConstraint(test.domain, test.constraint)
|
||||
result, err := matchDomainConstraint(test.domain, test.constraint, false, map[string][]string{}, map[string][]string{})
|
||||
|
||||
if err != nil && !test.expectError {
|
||||
t.Errorf("unexpected error for test #%d: domain=%s, constraint=%s, err=%s", i, test.domain, test.constraint, err)
|
||||
@@ -3048,3 +3096,129 @@ func TestInvalidPolicyWithAnyKeyUsage(t *testing.T) {
|
||||
t.Fatalf("unexpected error, got %q, want %q", err, expectedErr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCertificateChainSignedByECDSA(t *testing.T) {
|
||||
caKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
root := &Certificate{
|
||||
SerialNumber: big.NewInt(1),
|
||||
Subject: pkix.Name{CommonName: "X"},
|
||||
NotBefore: time.Now().Add(-time.Hour),
|
||||
NotAfter: time.Now().Add(365 * 24 * time.Hour),
|
||||
IsCA: true,
|
||||
KeyUsage: KeyUsageCertSign | KeyUsageCRLSign,
|
||||
BasicConstraintsValid: true,
|
||||
}
|
||||
caDER, err := CreateCertificate(rand.Reader, root, root, &caKey.PublicKey, caKey)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
root, err = ParseCertificate(caDER)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
leafKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
leaf := &Certificate{
|
||||
SerialNumber: big.NewInt(42),
|
||||
Subject: pkix.Name{CommonName: "leaf"},
|
||||
NotBefore: time.Now().Add(-10 * time.Minute),
|
||||
NotAfter: time.Now().Add(24 * time.Hour),
|
||||
KeyUsage: KeyUsageDigitalSignature,
|
||||
ExtKeyUsage: []ExtKeyUsage{ExtKeyUsageServerAuth},
|
||||
BasicConstraintsValid: true,
|
||||
}
|
||||
leafDER, err := CreateCertificate(rand.Reader, leaf, root, &leafKey.PublicKey, caKey)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
leaf, err = ParseCertificate(leafDER)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
inter, err := ParseCertificate(dsaSelfSignedCNX(t))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
inters := NewCertPool()
|
||||
inters.AddCert(root)
|
||||
inters.AddCert(inter)
|
||||
|
||||
wantErr := "certificate signed by unknown authority"
|
||||
_, err = leaf.Verify(VerifyOptions{Intermediates: inters, Roots: NewCertPool()})
|
||||
if !strings.Contains(err.Error(), wantErr) {
|
||||
t.Errorf("got %v, want %q", err, wantErr)
|
||||
}
|
||||
}
|
||||
|
||||
// dsaSelfSignedCNX produces DER-encoded
|
||||
// certificate with the properties:
|
||||
//
|
||||
// Subject=Issuer=CN=X
|
||||
// DSA SPKI
|
||||
// Matching inner/outer signature OIDs
|
||||
// Dummy ECDSA signature
|
||||
func dsaSelfSignedCNX(t *testing.T) []byte {
|
||||
t.Helper()
|
||||
var params dsa.Parameters
|
||||
if err := dsa.GenerateParameters(¶ms, rand.Reader, dsa.L1024N160); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var dsaPriv dsa.PrivateKey
|
||||
dsaPriv.Parameters = params
|
||||
if err := dsa.GenerateKey(&dsaPriv, rand.Reader); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dsaPub := &dsaPriv.PublicKey
|
||||
|
||||
type dsaParams struct{ P, Q, G *big.Int }
|
||||
paramDER, err := asn1.Marshal(dsaParams{dsaPub.P, dsaPub.Q, dsaPub.G})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
yDER, err := asn1.Marshal(dsaPub.Y)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
spki := publicKeyInfo{
|
||||
Algorithm: pkix.AlgorithmIdentifier{
|
||||
Algorithm: oidPublicKeyDSA,
|
||||
Parameters: asn1.RawValue{FullBytes: paramDER},
|
||||
},
|
||||
PublicKey: asn1.BitString{Bytes: yDER, BitLength: 8 * len(yDER)},
|
||||
}
|
||||
|
||||
rdn := pkix.Name{CommonName: "X"}.ToRDNSequence()
|
||||
b, err := asn1.Marshal(rdn)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rawName := asn1.RawValue{FullBytes: b}
|
||||
|
||||
algoIdent := pkix.AlgorithmIdentifier{Algorithm: oidSignatureDSAWithSHA256}
|
||||
tbs := tbsCertificate{
|
||||
Version: 0,
|
||||
SerialNumber: big.NewInt(1002),
|
||||
SignatureAlgorithm: algoIdent,
|
||||
Issuer: rawName,
|
||||
Validity: validity{NotBefore: time.Now().Add(-time.Hour), NotAfter: time.Now().Add(24 * time.Hour)},
|
||||
Subject: rawName,
|
||||
PublicKey: spki,
|
||||
}
|
||||
c := certificate{
|
||||
TBSCertificate: tbs,
|
||||
SignatureAlgorithm: algoIdent,
|
||||
SignatureValue: asn1.BitString{Bytes: []byte{0}, BitLength: 8},
|
||||
}
|
||||
dsaDER, err := asn1.Marshal(c)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return dsaDER
|
||||
}
|
||||
|
||||
@@ -335,7 +335,6 @@ func convertAssignRows(dest, src any, rows *Rows) error {
|
||||
if rows == nil {
|
||||
return errors.New("invalid context to convert cursor rows, missing parent *Rows")
|
||||
}
|
||||
rows.closemu.Lock()
|
||||
*d = Rows{
|
||||
dc: rows.dc,
|
||||
releaseConn: func(error) {},
|
||||
@@ -351,7 +350,6 @@ func convertAssignRows(dest, src any, rows *Rows) error {
|
||||
parentCancel()
|
||||
}
|
||||
}
|
||||
rows.closemu.Unlock()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
package sql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
@@ -15,7 +16,6 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
@@ -91,8 +91,6 @@ func (cc *fakeDriverCtx) OpenConnector(name string) (driver.Connector, error) {
|
||||
type fakeDB struct {
|
||||
name string
|
||||
|
||||
useRawBytes atomic.Bool
|
||||
|
||||
mu sync.Mutex
|
||||
tables map[string]*table
|
||||
badConn bool
|
||||
@@ -684,8 +682,6 @@ func (c *fakeConn) PrepareContext(ctx context.Context, query string) (driver.Stm
|
||||
switch cmd {
|
||||
case "WIPE":
|
||||
// Nothing
|
||||
case "USE_RAWBYTES":
|
||||
c.db.useRawBytes.Store(true)
|
||||
case "SELECT":
|
||||
stmt, err = c.prepareSelect(stmt, parts)
|
||||
case "CREATE":
|
||||
@@ -789,9 +785,6 @@ func (s *fakeStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (d
|
||||
case "WIPE":
|
||||
db.wipe()
|
||||
return driver.ResultNoRows, nil
|
||||
case "USE_RAWBYTES":
|
||||
s.c.db.useRawBytes.Store(true)
|
||||
return driver.ResultNoRows, nil
|
||||
case "CREATE":
|
||||
if err := db.createTable(s.table, s.colName, s.colType); err != nil {
|
||||
return nil, err
|
||||
@@ -1076,10 +1069,9 @@ type rowsCursor struct {
|
||||
errPos int
|
||||
err error
|
||||
|
||||
// a clone of slices to give out to clients, indexed by the
|
||||
// original slice's first byte address. we clone them
|
||||
// just so we're able to corrupt them on close.
|
||||
bytesClone map[*byte][]byte
|
||||
// Data returned to clients.
|
||||
// We clone and stash it here so it can be invalidated by Close and Next.
|
||||
driverOwnedMemory [][]byte
|
||||
|
||||
// Every operation writes to line to enable the race detector
|
||||
// check for data races.
|
||||
@@ -1096,9 +1088,19 @@ func (rc *rowsCursor) touchMem() {
|
||||
rc.line++
|
||||
}
|
||||
|
||||
func (rc *rowsCursor) invalidateDriverOwnedMemory() {
|
||||
for _, buf := range rc.driverOwnedMemory {
|
||||
for i := range buf {
|
||||
buf[i] = 'x'
|
||||
}
|
||||
}
|
||||
rc.driverOwnedMemory = nil
|
||||
}
|
||||
|
||||
func (rc *rowsCursor) Close() error {
|
||||
rc.touchMem()
|
||||
rc.parentMem.touchMem()
|
||||
rc.invalidateDriverOwnedMemory()
|
||||
rc.closed = true
|
||||
return rc.closeErr
|
||||
}
|
||||
@@ -1129,6 +1131,8 @@ func (rc *rowsCursor) Next(dest []driver.Value) error {
|
||||
if rc.posRow >= len(rc.rows[rc.posSet]) {
|
||||
return io.EOF // per interface spec
|
||||
}
|
||||
// Corrupt any previously returned bytes.
|
||||
rc.invalidateDriverOwnedMemory()
|
||||
for i, v := range rc.rows[rc.posSet][rc.posRow].cols {
|
||||
// TODO(bradfitz): convert to subset types? naah, I
|
||||
// think the subset types should only be input to
|
||||
@@ -1136,20 +1140,13 @@ func (rc *rowsCursor) Next(dest []driver.Value) error {
|
||||
// a wider range of types coming out of drivers. all
|
||||
// for ease of drivers, and to prevent drivers from
|
||||
// messing up conversions or doing them differently.
|
||||
dest[i] = v
|
||||
|
||||
if bs, ok := v.([]byte); ok && !rc.db.useRawBytes.Load() {
|
||||
if rc.bytesClone == nil {
|
||||
rc.bytesClone = make(map[*byte][]byte)
|
||||
}
|
||||
clone, ok := rc.bytesClone[&bs[0]]
|
||||
if !ok {
|
||||
clone = make([]byte, len(bs))
|
||||
copy(clone, bs)
|
||||
rc.bytesClone[&bs[0]] = clone
|
||||
}
|
||||
dest[i] = clone
|
||||
if bs, ok := v.([]byte); ok {
|
||||
// Clone []bytes and stash for later invalidation.
|
||||
bs = bytes.Clone(bs)
|
||||
rc.driverOwnedMemory = append(rc.driverOwnedMemory, bs)
|
||||
v = bs
|
||||
}
|
||||
dest[i] = v
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3368,38 +3368,36 @@ func (rs *Rows) Scan(dest ...any) error {
|
||||
// without calling Next.
|
||||
return fmt.Errorf("sql: Scan called without calling Next (closemuScanHold)")
|
||||
}
|
||||
|
||||
rs.closemu.RLock()
|
||||
|
||||
if rs.lasterr != nil && rs.lasterr != io.EOF {
|
||||
rs.closemu.RUnlock()
|
||||
return rs.lasterr
|
||||
}
|
||||
if rs.closed {
|
||||
err := rs.lasterrOrErrLocked(errRowsClosed)
|
||||
rs.closemu.RUnlock()
|
||||
return err
|
||||
}
|
||||
|
||||
if scanArgsContainRawBytes(dest) {
|
||||
rs.raw = rs.raw[:0]
|
||||
err := rs.scanLocked(dest...)
|
||||
if err == nil && scanArgsContainRawBytes(dest) {
|
||||
rs.closemuScanHold = true
|
||||
rs.raw = rs.raw[:0]
|
||||
} else {
|
||||
rs.closemu.RUnlock()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (rs *Rows) scanLocked(dest ...any) error {
|
||||
if rs.lasterr != nil && rs.lasterr != io.EOF {
|
||||
return rs.lasterr
|
||||
}
|
||||
if rs.closed {
|
||||
return rs.lasterrOrErrLocked(errRowsClosed)
|
||||
}
|
||||
|
||||
if rs.lastcols == nil {
|
||||
rs.closemuRUnlockIfHeldByScan()
|
||||
return errors.New("sql: Scan called without calling Next")
|
||||
}
|
||||
if len(dest) != len(rs.lastcols) {
|
||||
rs.closemuRUnlockIfHeldByScan()
|
||||
return fmt.Errorf("sql: expected %d destination arguments in Scan, not %d", len(rs.lastcols), len(dest))
|
||||
}
|
||||
|
||||
for i, sv := range rs.lastcols {
|
||||
err := convertAssignRows(dest[i], sv, rs)
|
||||
if err != nil {
|
||||
rs.closemuRUnlockIfHeldByScan()
|
||||
return fmt.Errorf(`sql: Scan error on column index %d, name %q: %w`, i, rs.rowsi.Columns()[i], err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
package sql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
@@ -4434,10 +4435,6 @@ func testContextCancelDuringRawBytesScan(t *testing.T, mode string) {
|
||||
db := newTestDB(t, "people")
|
||||
defer closeDB(t, db)
|
||||
|
||||
if _, err := db.Exec("USE_RAWBYTES"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// cancel used to call close asynchronously.
|
||||
// This test checks that it waits so as not to interfere with RawBytes.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@@ -4529,6 +4526,61 @@ func TestContextCancelBetweenNextAndErr(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
type testScanner struct {
|
||||
scanf func(src any) error
|
||||
}
|
||||
|
||||
func (ts testScanner) Scan(src any) error { return ts.scanf(src) }
|
||||
|
||||
func TestContextCancelDuringScan(t *testing.T) {
|
||||
db := newTestDB(t, "people")
|
||||
defer closeDB(t, db)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
scanStart := make(chan any)
|
||||
scanEnd := make(chan error)
|
||||
scanner := &testScanner{
|
||||
scanf: func(src any) error {
|
||||
scanStart <- src
|
||||
return <-scanEnd
|
||||
},
|
||||
}
|
||||
|
||||
// Start a query, and pause it mid-scan.
|
||||
want := []byte("Alice")
|
||||
r, err := db.QueryContext(ctx, "SELECT|people|name|name=?", string(want))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !r.Next() {
|
||||
t.Fatalf("r.Next() = false, want true")
|
||||
}
|
||||
go func() {
|
||||
r.Scan(scanner)
|
||||
}()
|
||||
got := <-scanStart
|
||||
defer close(scanEnd)
|
||||
gotBytes, ok := got.([]byte)
|
||||
if !ok {
|
||||
t.Fatalf("r.Scan returned %T, want []byte", got)
|
||||
}
|
||||
if !bytes.Equal(gotBytes, want) {
|
||||
t.Fatalf("before cancel: r.Scan returned %q, want %q", gotBytes, want)
|
||||
}
|
||||
|
||||
// Cancel the query.
|
||||
// Sleep to give it a chance to finish canceling.
|
||||
cancel()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Cancelling the query should not have changed the result.
|
||||
if !bytes.Equal(gotBytes, want) {
|
||||
t.Fatalf("after cancel: r.Scan result is now %q, want %q", gotBytes, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNilErrorAfterClose(t *testing.T) {
|
||||
db := newTestDB(t, "people")
|
||||
defer closeDB(t, db)
|
||||
@@ -4562,10 +4614,6 @@ func TestRawBytesReuse(t *testing.T) {
|
||||
db := newTestDB(t, "people")
|
||||
defer closeDB(t, db)
|
||||
|
||||
if _, err := db.Exec("USE_RAWBYTES"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var raw RawBytes
|
||||
|
||||
// The RawBytes in this query aliases driver-owned memory.
|
||||
|
||||
@@ -98,7 +98,12 @@ func readCOFFSymbols(fh *FileHeader, r io.ReadSeeker) ([]COFFSymbol, error) {
|
||||
// isSymNameOffset checks symbol name if it is encoded as offset into string table.
|
||||
func isSymNameOffset(name [8]byte) (bool, uint32) {
|
||||
if name[0] == 0 && name[1] == 0 && name[2] == 0 && name[3] == 0 {
|
||||
return true, binary.LittleEndian.Uint32(name[4:])
|
||||
offset := binary.LittleEndian.Uint32(name[4:])
|
||||
if offset == 0 {
|
||||
// symbol has no name
|
||||
return false, 0
|
||||
}
|
||||
return true, offset
|
||||
}
|
||||
return false, 0
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ package asn1
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"internal/saferio"
|
||||
"math"
|
||||
"math/big"
|
||||
"reflect"
|
||||
@@ -666,10 +667,17 @@ func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type
|
||||
offset += t.length
|
||||
numElements++
|
||||
}
|
||||
ret = reflect.MakeSlice(sliceType, numElements, numElements)
|
||||
elemSize := uint64(elemType.Size())
|
||||
safeCap := saferio.SliceCapWithSize(elemSize, uint64(numElements))
|
||||
if safeCap < 0 {
|
||||
err = SyntaxError{fmt.Sprintf("%s slice too big: %d elements of %d bytes", elemType.Kind(), numElements, elemSize)}
|
||||
return
|
||||
}
|
||||
ret = reflect.MakeSlice(sliceType, 0, safeCap)
|
||||
params := fieldParameters{}
|
||||
offset := 0
|
||||
for i := 0; i < numElements; i++ {
|
||||
ret = reflect.Append(ret, reflect.Zero(elemType))
|
||||
offset, err = parseField(ret.Index(i), bytes, offset, params)
|
||||
if err != nil {
|
||||
return
|
||||
|
||||
@@ -7,10 +7,12 @@ package asn1
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -1216,3 +1218,39 @@ func TestImplicitTypeRoundtrip(t *testing.T) {
|
||||
t.Fatalf("Unexpected diff after roundtripping struct\na: %#v\nb: %#v", a, b)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsingMemoryConsumption(t *testing.T) {
|
||||
// Craft a syntatically valid, but empty, ~10 MB DER bomb. A successful
|
||||
// unmarshal of this bomb should yield ~280 MB. However, the parsing should
|
||||
// fail due to the empty content; and, in such cases, we want to make sure
|
||||
// that we do not unnecessarily allocate memories.
|
||||
derBomb := make([]byte, 10_000_000)
|
||||
for i := range derBomb {
|
||||
derBomb[i] = 0x30
|
||||
}
|
||||
derBomb = append([]byte{0x30, 0x83, 0x98, 0x96, 0x80}, derBomb...)
|
||||
|
||||
var m runtime.MemStats
|
||||
runtime.GC()
|
||||
runtime.ReadMemStats(&m)
|
||||
memBefore := m.TotalAlloc
|
||||
|
||||
var out []struct {
|
||||
Id []int
|
||||
Critical bool `asn1:"optional"`
|
||||
Value []byte
|
||||
}
|
||||
_, err := Unmarshal(derBomb, &out)
|
||||
if !errors.As(err, &SyntaxError{}) {
|
||||
t.Fatalf("Incorrect error result: want (%v), but got (%v) instead", &SyntaxError{}, err)
|
||||
}
|
||||
|
||||
runtime.ReadMemStats(&m)
|
||||
memDiff := m.TotalAlloc - memBefore
|
||||
|
||||
// Ensure that the memory allocated does not exceed 10<<21 (~20 MB) when
|
||||
// the parsing fails.
|
||||
if memDiff > 10<<21 {
|
||||
t.Errorf("Too much memory allocated while parsing DER: %v MiB", memDiff/1024/1024)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ type Block struct {
|
||||
// line bytes. The remainder of the byte array (also not including the new line
|
||||
// bytes) is also returned and this will always be smaller than the original
|
||||
// argument.
|
||||
func getLine(data []byte) (line, rest []byte) {
|
||||
func getLine(data []byte) (line, rest []byte, consumed int) {
|
||||
i := bytes.IndexByte(data, '\n')
|
||||
var j int
|
||||
if i < 0 {
|
||||
@@ -49,7 +49,7 @@ func getLine(data []byte) (line, rest []byte) {
|
||||
i--
|
||||
}
|
||||
}
|
||||
return bytes.TrimRight(data[0:i], " \t"), data[j:]
|
||||
return bytes.TrimRight(data[0:i], " \t"), data[j:], j
|
||||
}
|
||||
|
||||
// removeSpacesAndTabs returns a copy of its input with all spaces and tabs
|
||||
@@ -90,17 +90,37 @@ func Decode(data []byte) (p *Block, rest []byte) {
|
||||
// pemStart begins with a newline. However, at the very beginning of
|
||||
// the byte array, we'll accept the start string without it.
|
||||
rest = data
|
||||
|
||||
endTrailerIndex := 0
|
||||
for {
|
||||
if bytes.HasPrefix(rest, pemStart[1:]) {
|
||||
rest = rest[len(pemStart)-1:]
|
||||
} else if _, after, ok := bytes.Cut(rest, pemStart); ok {
|
||||
rest = after
|
||||
} else {
|
||||
// If we've already tried parsing a block, skip past the END we already
|
||||
// saw.
|
||||
if endTrailerIndex < 0 || endTrailerIndex > len(rest) {
|
||||
return nil, data
|
||||
}
|
||||
rest = rest[endTrailerIndex:]
|
||||
|
||||
// Find the first END line, and then find the last BEGIN line before
|
||||
// the end line. This lets us skip any repeated BEGIN lines that don't
|
||||
// have a matching END.
|
||||
endIndex := bytes.Index(rest, pemEnd)
|
||||
if endIndex < 0 {
|
||||
return nil, data
|
||||
}
|
||||
endTrailerIndex = endIndex + len(pemEnd)
|
||||
beginIndex := bytes.LastIndex(rest[:endIndex], pemStart[1:])
|
||||
if beginIndex < 0 || (beginIndex > 0 && rest[beginIndex-1] != '\n') {
|
||||
continue
|
||||
}
|
||||
rest = rest[beginIndex+len(pemStart)-1:]
|
||||
endIndex -= beginIndex + len(pemStart) - 1
|
||||
endTrailerIndex -= beginIndex + len(pemStart) - 1
|
||||
|
||||
var typeLine []byte
|
||||
typeLine, rest = getLine(rest)
|
||||
var consumed int
|
||||
typeLine, rest, consumed = getLine(rest)
|
||||
endIndex -= consumed
|
||||
endTrailerIndex -= consumed
|
||||
if !bytes.HasSuffix(typeLine, pemEndOfLine) {
|
||||
continue
|
||||
}
|
||||
@@ -117,7 +137,7 @@ func Decode(data []byte) (p *Block, rest []byte) {
|
||||
if len(rest) == 0 {
|
||||
return nil, data
|
||||
}
|
||||
line, next := getLine(rest)
|
||||
line, next, consumed := getLine(rest)
|
||||
|
||||
key, val, ok := bytes.Cut(line, colon)
|
||||
if !ok {
|
||||
@@ -129,21 +149,13 @@ func Decode(data []byte) (p *Block, rest []byte) {
|
||||
val = bytes.TrimSpace(val)
|
||||
p.Headers[string(key)] = string(val)
|
||||
rest = next
|
||||
endIndex -= consumed
|
||||
endTrailerIndex -= consumed
|
||||
}
|
||||
|
||||
var endIndex, endTrailerIndex int
|
||||
|
||||
// If there were no headers, the END line might occur
|
||||
// immediately, without a leading newline.
|
||||
if len(p.Headers) == 0 && bytes.HasPrefix(rest, pemEnd[1:]) {
|
||||
endIndex = 0
|
||||
endTrailerIndex = len(pemEnd) - 1
|
||||
} else {
|
||||
endIndex = bytes.Index(rest, pemEnd)
|
||||
endTrailerIndex = endIndex + len(pemEnd)
|
||||
}
|
||||
|
||||
if endIndex < 0 {
|
||||
// If there were headers, there must be a newline between the headers
|
||||
// and the END line, so endIndex should be >= 0.
|
||||
if len(p.Headers) > 0 && endIndex < 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -163,21 +175,24 @@ func Decode(data []byte) (p *Block, rest []byte) {
|
||||
}
|
||||
|
||||
// The line must end with only whitespace.
|
||||
if s, _ := getLine(restOfEndLine); len(s) != 0 {
|
||||
if s, _, _ := getLine(restOfEndLine); len(s) != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
base64Data := removeSpacesAndTabs(rest[:endIndex])
|
||||
p.Bytes = make([]byte, base64.StdEncoding.DecodedLen(len(base64Data)))
|
||||
n, err := base64.StdEncoding.Decode(p.Bytes, base64Data)
|
||||
if err != nil {
|
||||
continue
|
||||
p.Bytes = []byte{}
|
||||
if endIndex > 0 {
|
||||
base64Data := removeSpacesAndTabs(rest[:endIndex])
|
||||
p.Bytes = make([]byte, base64.StdEncoding.DecodedLen(len(base64Data)))
|
||||
n, err := base64.StdEncoding.Decode(p.Bytes, base64Data)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
p.Bytes = p.Bytes[:n]
|
||||
}
|
||||
p.Bytes = p.Bytes[:n]
|
||||
|
||||
// the -1 is because we might have only matched pemEnd without the
|
||||
// leading newline if the PEM block was empty.
|
||||
_, rest = getLine(rest[endIndex+len(pemEnd)-1:])
|
||||
_, rest, _ = getLine(rest[endIndex+len(pemEnd)-1:])
|
||||
return p, rest
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ var getLineTests = []GetLineTest{
|
||||
|
||||
func TestGetLine(t *testing.T) {
|
||||
for i, test := range getLineTests {
|
||||
x, y := getLine([]byte(test.in))
|
||||
x, y, _ := getLine([]byte(test.in))
|
||||
if string(x) != test.out1 || string(y) != test.out2 {
|
||||
t.Errorf("#%d got:%+v,%+v want:%s,%s", i, x, y, test.out1, test.out2)
|
||||
}
|
||||
@@ -46,6 +46,7 @@ func TestDecode(t *testing.T) {
|
||||
if !reflect.DeepEqual(result, certificate) {
|
||||
t.Errorf("#0 got:%#v want:%#v", result, certificate)
|
||||
}
|
||||
|
||||
result, remainder = Decode(remainder)
|
||||
if !reflect.DeepEqual(result, privateKey) {
|
||||
t.Errorf("#1 got:%#v want:%#v", result, privateKey)
|
||||
@@ -68,7 +69,7 @@ func TestDecode(t *testing.T) {
|
||||
}
|
||||
|
||||
result, remainder = Decode(remainder)
|
||||
if result == nil || result.Type != "HEADERS" || len(result.Headers) != 1 {
|
||||
if result == nil || result.Type != "VALID HEADERS" || len(result.Headers) != 1 {
|
||||
t.Errorf("#5 expected single header block but got :%v", result)
|
||||
}
|
||||
|
||||
@@ -381,15 +382,15 @@ ZWAaUoVtWIQ52aKS0p19G99hhb+IVANC4akkdHV4SP8i7MVNZhfUmg==
|
||||
|
||||
# This shouldn't be recognised because of the missing newline after the
|
||||
headers.
|
||||
-----BEGIN HEADERS-----
|
||||
-----BEGIN INVALID HEADERS-----
|
||||
Header: 1
|
||||
-----END HEADERS-----
|
||||
-----END INVALID HEADERS-----
|
||||
|
||||
# This should be valid, however.
|
||||
-----BEGIN HEADERS-----
|
||||
-----BEGIN VALID HEADERS-----
|
||||
Header: 1
|
||||
|
||||
-----END HEADERS-----`)
|
||||
-----END VALID HEADERS-----`)
|
||||
|
||||
var certificate = &Block{Type: "CERTIFICATE",
|
||||
Headers: map[string]string{},
|
||||
@@ -638,3 +639,104 @@ func TestBadEncode(t *testing.T) {
|
||||
}
|
||||
|
||||
func testingKey(s string) string { return strings.ReplaceAll(s, "TESTING KEY", "PRIVATE KEY") }
|
||||
|
||||
func TestDecodeStrangeCases(t *testing.T) {
|
||||
sentinelType := "TEST BLOCK"
|
||||
sentinelBytes := []byte("hello")
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
pem string
|
||||
}{
|
||||
{
|
||||
name: "invalid section (not base64)",
|
||||
pem: `-----BEGIN COMMENT-----
|
||||
foo foo foo
|
||||
-----END COMMENT-----
|
||||
-----BEGIN TEST BLOCK-----
|
||||
aGVsbG8=
|
||||
-----END TEST BLOCK-----`,
|
||||
},
|
||||
{
|
||||
name: "leading garbage on block",
|
||||
pem: `foo foo foo-----BEGIN CERTIFICATE-----
|
||||
MCowBQYDK2VwAyEApVjJeLW5MoP6uR3+OeITokM+rBDng6dgl1vvhcy+wws=
|
||||
-----END PUBLIC KEY-----
|
||||
-----BEGIN TEST BLOCK-----
|
||||
aGVsbG8=
|
||||
-----END TEST BLOCK-----`,
|
||||
},
|
||||
{
|
||||
name: "leading garbage",
|
||||
pem: `foo foo foo
|
||||
-----BEGIN TEST BLOCK-----
|
||||
aGVsbG8=
|
||||
-----END TEST BLOCK-----`,
|
||||
},
|
||||
{
|
||||
name: "leading partial block",
|
||||
pem: `foo foo foo
|
||||
-----END COMMENT-----
|
||||
-----BEGIN TEST BLOCK-----
|
||||
aGVsbG8=
|
||||
-----END TEST BLOCK-----`,
|
||||
},
|
||||
{
|
||||
name: "multiple BEGIN",
|
||||
pem: `-----BEGIN TEST BLOCK-----
|
||||
-----BEGIN TEST BLOCK-----
|
||||
-----BEGIN TEST BLOCK-----
|
||||
aGVsbG8=
|
||||
-----END TEST BLOCK-----`,
|
||||
},
|
||||
{
|
||||
name: "multiple END",
|
||||
pem: `-----BEGIN TEST BLOCK-----
|
||||
aGVsbG8=
|
||||
-----END TEST BLOCK-----
|
||||
-----END TEST BLOCK-----
|
||||
-----END TEST BLOCK-----`,
|
||||
},
|
||||
{
|
||||
name: "leading malformed BEGIN",
|
||||
pem: `-----BEGIN PUBLIC KEY
|
||||
aGVsbG8=
|
||||
-----END PUBLIC KEY-----
|
||||
-----BEGIN TEST BLOCK-----
|
||||
aGVsbG8=
|
||||
-----END TEST BLOCK-----`,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
block, _ := Decode([]byte(tc.pem))
|
||||
if block == nil {
|
||||
t.Fatal("expected valid block")
|
||||
}
|
||||
if block.Type != sentinelType {
|
||||
t.Fatalf("unexpected block returned, got type %q, want type %q", block.Type, sentinelType)
|
||||
}
|
||||
if !bytes.Equal(block.Bytes, sentinelBytes) {
|
||||
t.Fatalf("unexpected block content, got %x, want %x", block.Bytes, sentinelBytes)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestJustEnd(t *testing.T) {
|
||||
pemData := `
|
||||
-----END PUBLIC KEY-----`
|
||||
|
||||
block, _ := Decode([]byte(pemData))
|
||||
if block != nil {
|
||||
t.Fatal("unexpected block")
|
||||
}
|
||||
}
|
||||
|
||||
func FuzzDecode(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, data []byte) {
|
||||
Decode(data)
|
||||
})
|
||||
}
|
||||
|
||||
func TestMissingEndTrailer(t *testing.T) {
|
||||
Decode([]byte{0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x42, 0x45, 0x47, 0x49, 0x4e, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0xa, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x45, 0x4e, 0x44, 0x20})
|
||||
}
|
||||
|
||||
@@ -26,18 +26,6 @@ func Join(errs ...error) error {
|
||||
if n == 0 {
|
||||
return nil
|
||||
}
|
||||
if n == 1 {
|
||||
for _, err := range errs {
|
||||
if err != nil {
|
||||
if _, ok := err.(interface {
|
||||
Unwrap() []error
|
||||
}); ok {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
e := &joinError{
|
||||
errs: make([]error, 0, n),
|
||||
}
|
||||
|
||||
@@ -70,37 +70,3 @@ func TestJoinErrorMethod(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkJoin(b *testing.B) {
|
||||
for _, bb := range []struct {
|
||||
name string
|
||||
errs []error
|
||||
}{
|
||||
{
|
||||
name: "no error",
|
||||
},
|
||||
{
|
||||
name: "single non-nil error",
|
||||
errs: []error{errors.New("err")},
|
||||
},
|
||||
{
|
||||
name: "multiple errors",
|
||||
errs: []error{errors.New("err"), errors.New("newerr"), errors.New("newerr2")},
|
||||
},
|
||||
{
|
||||
name: "unwrappable single error",
|
||||
errs: []error{errors.Join(errors.New("err"))},
|
||||
},
|
||||
{
|
||||
name: "nil first error",
|
||||
errs: []error{nil, errors.New("newerr")},
|
||||
},
|
||||
} {
|
||||
b.Run(bb.name, func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = errors.Join(bb.errs...)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -235,7 +235,6 @@ var depsRules = `
|
||||
internal/types/errors,
|
||||
mime/quotedprintable,
|
||||
net/internal/socktest,
|
||||
net/url,
|
||||
runtime/trace,
|
||||
text/scanner,
|
||||
text/tabwriter;
|
||||
@@ -298,6 +297,12 @@ var depsRules = `
|
||||
FMT
|
||||
< text/template/parse;
|
||||
|
||||
internal/bytealg, internal/itoa, math/bits, slices, strconv, unique
|
||||
< net/netip;
|
||||
|
||||
FMT, net/netip
|
||||
< net/url;
|
||||
|
||||
net/url, text/template/parse
|
||||
< text/template
|
||||
< internal/lazytemplate;
|
||||
@@ -412,9 +417,6 @@ var depsRules = `
|
||||
< golang.org/x/net/dns/dnsmessage,
|
||||
golang.org/x/net/lif;
|
||||
|
||||
internal/bytealg, internal/itoa, math/bits, slices, strconv, unique
|
||||
< net/netip;
|
||||
|
||||
os, net/netip
|
||||
< internal/routebsd;
|
||||
|
||||
@@ -557,7 +559,7 @@ var depsRules = `
|
||||
|
||||
# CRYPTO-MATH is crypto that exposes math/big APIs - no cgo, net; fmt now ok.
|
||||
|
||||
CRYPTO, FMT, math/big
|
||||
CRYPTO, FMT, math/big, internal/saferio
|
||||
< crypto/internal/boring/bbig
|
||||
< crypto/internal/fips140cache
|
||||
< crypto/rand
|
||||
|
||||
@@ -85,7 +85,7 @@ func gofips140() string {
|
||||
}
|
||||
|
||||
// isFIPSVersion reports whether v is a valid FIPS version,
|
||||
// of the form vX.Y.Z.
|
||||
// of the form vX.Y.Z or vX.Y.Z-hash.
|
||||
func isFIPSVersion(v string) bool {
|
||||
if !strings.HasPrefix(v, "v") {
|
||||
return false
|
||||
@@ -99,7 +99,8 @@ func isFIPSVersion(v string) bool {
|
||||
return false
|
||||
}
|
||||
v, ok = skipNum(v[len("."):])
|
||||
return ok && v == ""
|
||||
hasHash := strings.HasPrefix(v, "-") && len(v) == len("-")+8
|
||||
return ok && (v == "" || hasHash)
|
||||
}
|
||||
|
||||
// skipNum skips the leading text matching [0-9]+
|
||||
|
||||
@@ -42,6 +42,7 @@ var All = []Info{
|
||||
{Name: "http2client", Package: "net/http"},
|
||||
{Name: "http2debug", Package: "net/http", Opaque: true},
|
||||
{Name: "http2server", Package: "net/http"},
|
||||
{Name: "httpcookiemaxnum", Package: "net/http", Changed: 24, Old: "0"},
|
||||
{Name: "httplaxcontentlength", Package: "net/http", Changed: 22, Old: "1"},
|
||||
{Name: "httpmuxgo121", Package: "net/http", Changed: 22, Old: "1"},
|
||||
{Name: "httpservecontentkeepheaders", Package: "net/http", Changed: 23, Old: "1"},
|
||||
@@ -66,6 +67,7 @@ var All = []Info{
|
||||
{Name: "tlssha1", Package: "crypto/tls", Changed: 25, Old: "1"},
|
||||
{Name: "tlsunsafeekm", Package: "crypto/tls", Changed: 22, Old: "1"},
|
||||
{Name: "updatemaxprocs", Package: "runtime", Changed: 25, Old: "0"},
|
||||
{Name: "urlmaxqueryparams", Package: "net/url", Changed: 24, Old: "0"},
|
||||
{Name: "winreadlinkvolume", Package: "os", Changed: 23, Old: "0"},
|
||||
{Name: "winsymlink", Package: "os", Changed: 23, Old: "0"},
|
||||
{Name: "x509keypairleaf", Package: "crypto/tls", Changed: 23, Old: "0"},
|
||||
|
||||
@@ -402,6 +402,10 @@ func (fd *FD) Init(net string, pollable bool) error {
|
||||
fd.rop.fd = fd
|
||||
fd.wop.fd = fd
|
||||
|
||||
if !pollable {
|
||||
return nil
|
||||
}
|
||||
|
||||
// It is safe to add overlapped handles that also perform I/O
|
||||
// outside of the runtime poller. The runtime poller will ignore
|
||||
// I/O completion notifications not initiated by us.
|
||||
@@ -636,12 +640,22 @@ func (fd *FD) Pread(b []byte, off int64) (int, error) {
|
||||
|
||||
fd.l.Lock()
|
||||
defer fd.l.Unlock()
|
||||
curoffset, err := syscall.Seek(fd.Sysfd, 0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
if fd.isBlocking {
|
||||
curoffset, err := syscall.Seek(fd.Sysfd, 0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer syscall.Seek(fd.Sysfd, curoffset, io.SeekStart)
|
||||
defer fd.setOffset(curoffset)
|
||||
} else {
|
||||
// Overlapped handles don't have the file pointer updated
|
||||
// when performing I/O operations, so there is no need to
|
||||
// call Seek to reset the file pointer.
|
||||
// Also, some overlapped file handles don't support seeking.
|
||||
// See https://go.dev/issues/74951.
|
||||
curoffset := fd.offset
|
||||
defer fd.setOffset(curoffset)
|
||||
}
|
||||
defer syscall.Seek(fd.Sysfd, curoffset, io.SeekStart)
|
||||
defer fd.setOffset(curoffset)
|
||||
o := &fd.rop
|
||||
o.InitBuf(b)
|
||||
fd.setOffset(off)
|
||||
@@ -852,12 +866,22 @@ func (fd *FD) Pwrite(buf []byte, off int64) (int, error) {
|
||||
|
||||
fd.l.Lock()
|
||||
defer fd.l.Unlock()
|
||||
curoffset, err := syscall.Seek(fd.Sysfd, 0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
if fd.isBlocking {
|
||||
curoffset, err := syscall.Seek(fd.Sysfd, 0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer syscall.Seek(fd.Sysfd, curoffset, io.SeekStart)
|
||||
defer fd.setOffset(curoffset)
|
||||
} else {
|
||||
// Overlapped handles don't have the file pointer updated
|
||||
// when performing I/O operations, so there is no need to
|
||||
// call Seek to reset the file pointer.
|
||||
// Also, some overlapped file handles don't support seeking.
|
||||
// See https://go.dev/issues/74951.
|
||||
curoffset := fd.offset
|
||||
defer fd.setOffset(curoffset)
|
||||
}
|
||||
defer syscall.Seek(fd.Sysfd, curoffset, io.SeekStart)
|
||||
defer fd.setOffset(curoffset)
|
||||
|
||||
var ntotal int
|
||||
for {
|
||||
@@ -1106,6 +1130,12 @@ func (fd *FD) Seek(offset int64, whence int) (int64, error) {
|
||||
fd.l.Lock()
|
||||
defer fd.l.Unlock()
|
||||
|
||||
if !fd.isBlocking && whence == io.SeekCurrent {
|
||||
// Windows doesn't keep the file pointer for overlapped file handles.
|
||||
// We do it ourselves in case to account for any read or write
|
||||
// operations that may have occurred.
|
||||
offset += fd.offset
|
||||
}
|
||||
n, err := syscall.Seek(fd.Sysfd, offset, whence)
|
||||
fd.setOffset(n)
|
||||
return n, err
|
||||
|
||||
@@ -383,57 +383,59 @@ func TestChannelMovedOutOfBubble(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
desc string
|
||||
f func(chan struct{})
|
||||
wantPanic string
|
||||
wantFatal string
|
||||
}{{
|
||||
desc: "receive",
|
||||
f: func(ch chan struct{}) {
|
||||
<-ch
|
||||
},
|
||||
wantPanic: "receive on synctest channel from outside bubble",
|
||||
wantFatal: "receive on synctest channel from outside bubble",
|
||||
}, {
|
||||
desc: "send",
|
||||
f: func(ch chan struct{}) {
|
||||
ch <- struct{}{}
|
||||
},
|
||||
wantPanic: "send on synctest channel from outside bubble",
|
||||
wantFatal: "send on synctest channel from outside bubble",
|
||||
}, {
|
||||
desc: "close",
|
||||
f: func(ch chan struct{}) {
|
||||
close(ch)
|
||||
},
|
||||
wantPanic: "close of synctest channel from outside bubble",
|
||||
wantFatal: "close of synctest channel from outside bubble",
|
||||
}} {
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
// Bubbled channel accessed from outside any bubble.
|
||||
t.Run("outside_bubble", func(t *testing.T) {
|
||||
donec := make(chan struct{})
|
||||
ch := make(chan chan struct{})
|
||||
go func() {
|
||||
defer close(donec)
|
||||
defer wantPanic(t, test.wantPanic)
|
||||
test.f(<-ch)
|
||||
}()
|
||||
synctest.Run(func() {
|
||||
ch <- make(chan struct{})
|
||||
wantFatal(t, test.wantFatal, func() {
|
||||
donec := make(chan struct{})
|
||||
ch := make(chan chan struct{})
|
||||
go func() {
|
||||
defer close(donec)
|
||||
test.f(<-ch)
|
||||
}()
|
||||
synctest.Run(func() {
|
||||
ch <- make(chan struct{})
|
||||
})
|
||||
<-donec
|
||||
})
|
||||
<-donec
|
||||
})
|
||||
// Bubbled channel accessed from a different bubble.
|
||||
t.Run("different_bubble", func(t *testing.T) {
|
||||
donec := make(chan struct{})
|
||||
ch := make(chan chan struct{})
|
||||
go func() {
|
||||
defer close(donec)
|
||||
c := <-ch
|
||||
wantFatal(t, test.wantFatal, func() {
|
||||
donec := make(chan struct{})
|
||||
ch := make(chan chan struct{})
|
||||
go func() {
|
||||
defer close(donec)
|
||||
c := <-ch
|
||||
synctest.Run(func() {
|
||||
test.f(c)
|
||||
})
|
||||
}()
|
||||
synctest.Run(func() {
|
||||
defer wantPanic(t, test.wantPanic)
|
||||
test.f(c)
|
||||
ch <- make(chan struct{})
|
||||
})
|
||||
}()
|
||||
synctest.Run(func() {
|
||||
ch <- make(chan struct{})
|
||||
<-donec
|
||||
})
|
||||
<-donec
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -443,39 +445,40 @@ func TestTimerFromInsideBubble(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
desc string
|
||||
f func(tm *time.Timer)
|
||||
wantPanic string
|
||||
wantFatal string
|
||||
}{{
|
||||
desc: "read channel",
|
||||
f: func(tm *time.Timer) {
|
||||
<-tm.C
|
||||
},
|
||||
wantPanic: "receive on synctest channel from outside bubble",
|
||||
wantFatal: "receive on synctest channel from outside bubble",
|
||||
}, {
|
||||
desc: "Reset",
|
||||
f: func(tm *time.Timer) {
|
||||
tm.Reset(1 * time.Second)
|
||||
},
|
||||
wantPanic: "reset of synctest timer from outside bubble",
|
||||
wantFatal: "reset of synctest timer from outside bubble",
|
||||
}, {
|
||||
desc: "Stop",
|
||||
f: func(tm *time.Timer) {
|
||||
tm.Stop()
|
||||
},
|
||||
wantPanic: "stop of synctest timer from outside bubble",
|
||||
wantFatal: "stop of synctest timer from outside bubble",
|
||||
}} {
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
donec := make(chan struct{})
|
||||
ch := make(chan *time.Timer)
|
||||
go func() {
|
||||
defer close(donec)
|
||||
defer wantPanic(t, test.wantPanic)
|
||||
test.f(<-ch)
|
||||
}()
|
||||
synctest.Run(func() {
|
||||
tm := time.NewTimer(1 * time.Second)
|
||||
ch <- tm
|
||||
wantFatal(t, test.wantFatal, func() {
|
||||
donec := make(chan struct{})
|
||||
ch := make(chan *time.Timer)
|
||||
go func() {
|
||||
defer close(donec)
|
||||
test.f(<-ch)
|
||||
}()
|
||||
synctest.Run(func() {
|
||||
tm := time.NewTimer(1 * time.Second)
|
||||
ch <- tm
|
||||
})
|
||||
<-donec
|
||||
})
|
||||
<-donec
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -776,6 +779,28 @@ func TestWaitGroupHeapAllocated(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// Issue #75134: Many racing bubble associations.
|
||||
func TestWaitGroupManyBubbles(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
for range 100 {
|
||||
wg.Go(func() {
|
||||
synctest.Run(func() {
|
||||
cancelc := make(chan struct{})
|
||||
var wg2 sync.WaitGroup
|
||||
for range 100 {
|
||||
wg2.Go(func() {
|
||||
<-cancelc
|
||||
})
|
||||
}
|
||||
synctest.Wait()
|
||||
close(cancelc)
|
||||
wg2.Wait()
|
||||
})
|
||||
})
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestHappensBefore(t *testing.T) {
|
||||
// Use two parallel goroutines accessing different vars to ensure that
|
||||
// we correctly account for multiple goroutines in the bubble.
|
||||
|
||||
@@ -204,7 +204,7 @@ func Deleteat(dirfd syscall.Handle, name string, options uint32) error {
|
||||
var h syscall.Handle
|
||||
err := NtOpenFile(
|
||||
&h,
|
||||
SYNCHRONIZE|DELETE,
|
||||
SYNCHRONIZE|FILE_READ_ATTRIBUTES|DELETE,
|
||||
objAttrs,
|
||||
&IO_STATUS_BLOCK{},
|
||||
FILE_SHARE_DELETE|FILE_SHARE_READ|FILE_SHARE_WRITE,
|
||||
@@ -215,14 +215,22 @@ func Deleteat(dirfd syscall.Handle, name string, options uint32) error {
|
||||
}
|
||||
defer syscall.CloseHandle(h)
|
||||
|
||||
const (
|
||||
FileDispositionInformation = 13
|
||||
FileDispositionInformationEx = 64
|
||||
)
|
||||
if TestDeleteatFallback {
|
||||
return deleteatFallback(h)
|
||||
}
|
||||
|
||||
const FileDispositionInformationEx = 64
|
||||
|
||||
// First, attempt to delete the file using POSIX semantics
|
||||
// (which permit a file to be deleted while it is still open).
|
||||
// This matches the behavior of DeleteFileW.
|
||||
//
|
||||
// The following call uses features available on different Windows versions:
|
||||
// - FILE_DISPOSITION_INFORMATION_EX: Windows 10, version 1607 (aka RS1)
|
||||
// - FILE_DISPOSITION_POSIX_SEMANTICS: Windows 10, version 1607 (aka RS1)
|
||||
// - FILE_DISPOSITION_IGNORE_READONLY_ATTRIBUTE: Windows 10, version 1809 (aka RS5)
|
||||
//
|
||||
// Also, some file systems, like FAT32, don't support POSIX semantics.
|
||||
err = NtSetInformationFile(
|
||||
h,
|
||||
&IO_STATUS_BLOCK{},
|
||||
@@ -241,28 +249,57 @@ func Deleteat(dirfd syscall.Handle, name string, options uint32) error {
|
||||
switch err {
|
||||
case nil:
|
||||
return nil
|
||||
case STATUS_CANNOT_DELETE, STATUS_DIRECTORY_NOT_EMPTY:
|
||||
case STATUS_INVALID_INFO_CLASS, // the operating system doesn't support FileDispositionInformationEx
|
||||
STATUS_INVALID_PARAMETER, // the operating system doesn't support one of the flags
|
||||
STATUS_NOT_SUPPORTED: // the file system doesn't support FILE_DISPOSITION_INFORMATION_EX or one of the flags
|
||||
return deleteatFallback(h)
|
||||
default:
|
||||
return err.(NTStatus).Errno()
|
||||
}
|
||||
}
|
||||
|
||||
// If the prior deletion failed, the filesystem either doesn't support
|
||||
// POSIX semantics (for example, FAT), or hasn't implemented
|
||||
// FILE_DISPOSITION_INFORMATION_EX.
|
||||
//
|
||||
// Try again.
|
||||
err = NtSetInformationFile(
|
||||
// TestDeleteatFallback should only be used for testing purposes.
|
||||
// When set, [Deleteat] uses the fallback path unconditionally.
|
||||
var TestDeleteatFallback bool
|
||||
|
||||
// deleteatFallback is a deleteat implementation that strives
|
||||
// for compatibility with older Windows versions and file systems
|
||||
// over performance.
|
||||
func deleteatFallback(h syscall.Handle) error {
|
||||
var data syscall.ByHandleFileInformation
|
||||
if err := syscall.GetFileInformationByHandle(h, &data); err == nil && data.FileAttributes&syscall.FILE_ATTRIBUTE_READONLY != 0 {
|
||||
// Remove read-only attribute. Reopen the file, as it was previously open without FILE_WRITE_ATTRIBUTES access
|
||||
// in order to maximize compatibility in the happy path.
|
||||
wh, err := ReOpenFile(h,
|
||||
FILE_WRITE_ATTRIBUTES,
|
||||
FILE_SHARE_READ|FILE_SHARE_WRITE|FILE_SHARE_DELETE,
|
||||
syscall.FILE_FLAG_OPEN_REPARSE_POINT|syscall.FILE_FLAG_BACKUP_SEMANTICS,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = SetFileInformationByHandle(
|
||||
wh,
|
||||
FileBasicInfo,
|
||||
unsafe.Pointer(&FILE_BASIC_INFO{
|
||||
FileAttributes: data.FileAttributes &^ FILE_ATTRIBUTE_READONLY,
|
||||
}),
|
||||
uint32(unsafe.Sizeof(FILE_BASIC_INFO{})),
|
||||
)
|
||||
syscall.CloseHandle(wh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return SetFileInformationByHandle(
|
||||
h,
|
||||
&IO_STATUS_BLOCK{},
|
||||
unsafe.Pointer(&FILE_DISPOSITION_INFORMATION{
|
||||
FileDispositionInfo,
|
||||
unsafe.Pointer(&FILE_DISPOSITION_INFO{
|
||||
DeleteFile: true,
|
||||
}),
|
||||
uint32(unsafe.Sizeof(FILE_DISPOSITION_INFORMATION{})),
|
||||
FileDispositionInformation,
|
||||
uint32(unsafe.Sizeof(FILE_DISPOSITION_INFO{})),
|
||||
)
|
||||
if st, ok := err.(NTStatus); ok {
|
||||
return st.Errno()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func Renameat(olddirfd syscall.Handle, oldpath string, newdirfd syscall.Handle, newpath string) error {
|
||||
|
||||
@@ -19,6 +19,7 @@ const (
|
||||
FileBasicInfo = 0 // FILE_BASIC_INFO
|
||||
FileStandardInfo = 1 // FILE_STANDARD_INFO
|
||||
FileNameInfo = 2 // FILE_NAME_INFO
|
||||
FileDispositionInfo = 4 // FILE_DISPOSITION_INFO
|
||||
FileStreamInfo = 7 // FILE_STREAM_INFO
|
||||
FileCompressionInfo = 8 // FILE_COMPRESSION_INFO
|
||||
FileAttributeTagInfo = 9 // FILE_ATTRIBUTE_TAG_INFO
|
||||
|
||||
@@ -529,6 +529,8 @@ const (
|
||||
//sys GetOverlappedResult(handle syscall.Handle, overlapped *syscall.Overlapped, done *uint32, wait bool) (err error)
|
||||
//sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW
|
||||
|
||||
//sys ReOpenFile(filehandle syscall.Handle, desiredAccess uint32, shareMode uint32, flagAndAttributes uint32) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle]
|
||||
|
||||
// NTStatus corresponds with NTSTATUS, error values returned by ntdll.dll and
|
||||
// other native functions.
|
||||
type NTStatus uint32
|
||||
@@ -554,6 +556,9 @@ const (
|
||||
STATUS_NOT_A_DIRECTORY NTStatus = 0xC0000103
|
||||
STATUS_CANNOT_DELETE NTStatus = 0xC0000121
|
||||
STATUS_REPARSE_POINT_ENCOUNTERED NTStatus = 0xC000050B
|
||||
STATUS_NOT_SUPPORTED NTStatus = 0xC00000BB
|
||||
STATUS_INVALID_PARAMETER NTStatus = 0xC000000D
|
||||
STATUS_INVALID_INFO_CLASS NTStatus = 0xC0000003
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -199,6 +199,11 @@ const (
|
||||
FILE_OPEN_FOR_FREE_SPACE_QUERY = 0x00800000
|
||||
)
|
||||
|
||||
// https://learn.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_disposition_info
|
||||
type FILE_DISPOSITION_INFO struct {
|
||||
DeleteFile bool
|
||||
}
|
||||
|
||||
// https://learn.microsoft.com/en-us/windows-hardware/drivers/ddi/ntddk/ns-ntddk-_file_disposition_information
|
||||
type FILE_DISPOSITION_INFORMATION struct {
|
||||
DeleteFile bool
|
||||
|
||||
@@ -85,6 +85,7 @@ var (
|
||||
procModule32NextW = modkernel32.NewProc("Module32NextW")
|
||||
procMoveFileExW = modkernel32.NewProc("MoveFileExW")
|
||||
procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar")
|
||||
procReOpenFile = modkernel32.NewProc("ReOpenFile")
|
||||
procRtlLookupFunctionEntry = modkernel32.NewProc("RtlLookupFunctionEntry")
|
||||
procRtlVirtualUnwind = modkernel32.NewProc("RtlVirtualUnwind")
|
||||
procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle")
|
||||
@@ -431,6 +432,15 @@ func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32,
|
||||
return
|
||||
}
|
||||
|
||||
func ReOpenFile(filehandle syscall.Handle, desiredAccess uint32, shareMode uint32, flagAndAttributes uint32) (handle syscall.Handle, err error) {
|
||||
r0, _, e1 := syscall.Syscall6(procReOpenFile.Addr(), 4, uintptr(filehandle), uintptr(desiredAccess), uintptr(shareMode), uintptr(flagAndAttributes), 0, 0)
|
||||
handle = syscall.Handle(r0)
|
||||
if handle == syscall.InvalidHandle {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func RtlLookupFunctionEntry(pc uintptr, baseAddress *uintptr, table unsafe.Pointer) (ret *RUNTIME_FUNCTION) {
|
||||
r0, _, _ := syscall.Syscall(procRtlLookupFunctionEntry.Addr(), 3, uintptr(pc), uintptr(unsafe.Pointer(baseAddress)), uintptr(table))
|
||||
ret = (*RUNTIME_FUNCTION)(unsafe.Pointer(r0))
|
||||
|
||||
@@ -62,7 +62,9 @@ func isTokenChar(c byte) bool {
|
||||
1<<'^' |
|
||||
1<<'_' |
|
||||
1<<'`' |
|
||||
1<<'{' |
|
||||
1<<'|' |
|
||||
1<<'}' |
|
||||
1<<'~'
|
||||
return ((uint64(1)<<c)&(mask&(1<<64-1)) |
|
||||
(uint64(1)<<(c-64))&(mask>>64)) != 0
|
||||
|
||||
@@ -413,6 +413,9 @@ func init() {
|
||||
// Issue #48866: duplicate parameters containing equal values should be allowed
|
||||
{`text; charset=utf-8; charset=utf-8; format=fixed`, "text", m("charset", "utf-8", "format", "fixed")},
|
||||
{`text; charset=utf-8; format=flowed; charset=utf-8`, "text", m("charset", "utf-8", "format", "flowed")},
|
||||
|
||||
// Issue #76236: '{' and '}' are token chars.
|
||||
{"attachment; filename={file}.png", "attachment", m("filename", "{file}.png")},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user