Compare commits

..

4 Commits

Author SHA1 Message Date
Gopher Robot
fa72f3e034 [release-branch.go1.22] go1.22rc1
Change-Id: Ia3eeb58ffd7acc4dc519a304f3ef934ab9c82175
Reviewed-on: https://go-review.googlesource.com/c/go/+/551536
Reviewed-by: Carlos Amedee <carlos@golang.org>
Auto-Submit: Gopher Robot <gobot@golang.org>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Than McIntosh <thanm@google.com>
2023-12-19 20:59:26 +00:00
Than McIntosh
fb23428a85 [release-branch.go1.22] all: merge master (0324250) into release-branch.go1.22
Merge List:

+ 2023-12-19 03242506de doc: comment out remaining TODOs in Go 1.22 relnotes (for now)
+ 2023-12-19 9dd1cde9ac doc/go1.22,cmd/go: document that 'go mod init' no longer imports from other vendoring tools
+ 2023-12-19 22284c34f2 doc/go1.22: document removal of 'go get' support in GOPATH mode
+ 2023-12-19 339177aa31 doc: typo fix for net/http.ServeMux
+ 2023-12-19 52dbffeac8 cmd/go/internal/toolchain: revert "make a best effort to parse 'go run' and 'go install' flags"

Change-Id: I102e8267373364d0ad6170d36442d19048268765
2023-12-19 10:57:36 -05:00
Than McIntosh
f06eaf0c4f [release-branch.go1.22] all: merge master (1d4b0b6) into release-branch.go1.22
Merge List:

+ 2023-12-19 1d4b0b6236 doc/go1.22.html: release notes for slog, testing/slogtest and net/http.ServeMux
+ 2023-12-19 6fe0d3758b cmd/compile: remove interfacecycles debug flag
+ 2023-12-19 90daaa0576 doc/go1.22: announcing support address sanitizer on Loong64
+ 2023-12-18 5b84d50038 test: skip rangegen.go on 32-bit platforms
+ 2023-12-18 4106de901a crypto/tls: align FIPS-only mode with BoringSSL policy
+ 2023-12-18 7383b2a4db crypto/internal/boring: upgrade module to fips-20220613
+ 2023-12-18 c564d4ae08 Revert "cmd/cgo/internal/testsanitizers: fix msan test failing with clang >= 16"
+ 2023-12-18 7058f09a8b cmd: go get golang.org/x/tools@83bceaf2 and revendor
+ 2023-12-18 761e10be88 cmd/link/internal/loadpe: update comment about @feat.00 symbol handling
+ 2023-12-18 450f5d90c2 doc: add math/rand/v2 release notes
+ 2023-12-18 08bec0db39 builtin: mention PanicNilError in comments of recover
+ 2023-12-18 2acbdd086d cmd/cgo/internal/testsanitizers: fix msan test failing with clang >= 16
+ 2023-12-18 a7097243e4 internal/syscall/windows: fix the signature of SetFileInformationByHandle
+ 2023-12-18 8e3930f258 runtime: skip TestRuntimeLockMetricsAndProfile for flakiness
+ 2023-12-15 9b4b3e5acc runtime: properly model rwmutex in lock ranking
+ 2023-12-15 793097161b all: fix copyright headers
+ 2023-12-15 f8170cc017 cmd/asm: for arm, rewrite argument shifted right by 0 to left by 0.
+ 2023-12-15 3313bbb405 runtime: add race annotations in IncNonDefault
+ 2023-12-15 b60bf8f8e1 cmd/asm: fix encoding for arm right shift by constant 0
+ 2023-12-15 5e939b3a9c doc: add crypto/tls and crypto/x509 release notes

Change-Id: I2a80e9d39aa1fbb22b06ecfa16f725bacb78eb3f
2023-12-19 09:29:26 -05:00
Michael Pratt
796f59df92 [release-branch.go1.22] update codereview.cfg for release-branch.go1.22
Change-Id: I3f5cd8d62cfb1e62240e2b9f0b22022ee57262a7
Reviewed-on: https://go-review.googlesource.com/c/go/+/550255
Auto-Submit: Michael Pratt <mpratt@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Carlos Amedee <carlos@golang.org>
2023-12-15 18:08:51 +00:00
6900 changed files with 215497 additions and 928238 deletions

View File

@@ -23,9 +23,9 @@ body:
required: true
- type: textarea
id: go-env
id: os-and-processor
attributes:
label: "Output of `go env` in your module/workspace:"
label: "What operating system and processor architecture are you using (`go env`)?"
placeholder: |
GO111MODULE=""
GOARCH="arm64"
@@ -78,17 +78,16 @@ body:
required: true
- type: textarea
id: actual-behavior
id: expected-behavior
attributes:
label: "What did you see happen?"
description: Command invocations and their associated output, functions with their arguments and return results, full stacktraces for panics (upload a file if it is very long), etc. Prefer copying text output over using screenshots.
label: "What did you expect to see?"
validations:
required: true
- type: textarea
id: expected-behavior
id: actual-behavior
attributes:
label: "What did you expect to see?"
description: Why is the current output incorrect, and any additional context we may need to understand the issue.
label: "What did you see instead?"
validations:
required: true

View File

@@ -33,15 +33,15 @@ body:
description: "If possible, provide a recipe for reproducing the error. Starting with a Private/Incognito tab/window may help rule out problematic browser extensions."
validations:
required: true
- type: textarea
id: actual-behavior
attributes:
label: "What did you see happen?"
validations:
required: true
- type: textarea
id: expected-behavior
attributes:
label: "What did you expect to see?"
validations:
required: true
- type: textarea
id: actual-behavior
attributes:
label: "What did you see instead?"
validations:
required: true

View File

@@ -10,33 +10,20 @@ body:
id: package-path
attributes:
label: "What is the path of the package that you would like to have removed?"
description: |
We can remove packages with a shared path prefix.
For example, a request for 'github.com/author' would remove all pkg.go.dev pages with that package path prefix.
description: "We can remove packages with a shared path prefix. For example, a request for 'github.com/author' would remove all pkg.go.dev pages with that package path prefix."
validations:
required: true
- type: textarea
id: package-owner
attributes:
label: "Are you the owner of this package?"
description: |
Only the package owners can request to have their packages removed from pkg.go.dev.
If the package path doesn't include your github username, please provide some other form of proof of ownership.
description: "Only the package owners can request to have their packages removed from pkg.go.dev."
validations:
required: true
- type: textarea
id: retraction-reason
attributes:
label: "What is the reason that you could not retract this package instead?"
description: |
Requesting we remove a module here only hides the generated documentation on pkg.go.dev.
It does not affect the behaviour of proxy.golang.org or the go command.
Instead we recommend using the retract directive which will be processed by all 3 of the above.
If you have deleted your repo, please recreate it and publish a retraction.
Retracting a module version involves adding a retract directive to your go.mod file and publishing a new version.
For example: https://github.com/jba/retract-demo/blob/main/go.mod#L5-L8.
See https://pkg.go.dev/about#removing-a-package for additional tips on retractions.
description: "If you would like to have your module removed from pkg.go.dev, we recommend that you retract them, so that they can be removed from the go command and proxy.golang.org as well. Retracting a module version involves adding a retract directive to your go.mod file and publishing a new version. For example: https://github.com/jba/retract-demo/blob/main/go.mod#L5-L8. See https://pkg.go.dev/about#removing-a-package for additional tips on retractions."
validations:
required: true

View File

@@ -6,7 +6,7 @@ body:
- type: markdown
attributes:
value: "Please answer these questions before submitting your issue. Thanks!"
- type: textarea
- type: input
id: gopls-version
attributes:
label: "gopls version"
@@ -25,13 +25,7 @@ body:
id: what-did-you-do
attributes:
label: "What did you do?"
description: "If possible, provide a recipe for reproducing the error. A complete runnable program is good. A link on [go.dev/play](https://go.dev/play) is better. A failing unit test is the best."
validations:
required: true
- type: textarea
id: actual-behavior
attributes:
label: "What did you see happen?"
description: "If possible, provide a recipe for reproducing the error. A complete runnable program is good. A link on go.dev/play is better. A failing unit test is the best."
validations:
required: true
- type: textarea
@@ -40,6 +34,12 @@ body:
label: "What did you expect to see?"
validations:
required: true
- type: textarea
id: actual-behavior
attributes:
label: "What did you see instead?"
validations:
required: true
- type: textarea
id: editor-and-settings
attributes:

View File

@@ -6,7 +6,7 @@ body:
- type: markdown
attributes:
value: "Please answer these questions before submitting your issue. Thanks! To add a new vulnerability to the Go vulnerability database (https://vuln.go.dev), see https://go.dev/s/vulndb-report-new. To report an issue about a report, see https://go.dev/s/vulndb-report-feedback."
- type: textarea
- type: input
id: govulncheck-version
attributes:
label: govulncheck version
@@ -25,9 +25,9 @@ body:
validations:
required: true
- type: textarea
id: go-env
id: os-and-processor
attributes:
label: "Output of `go env` in your module/workspace:"
label: "What operating system and processor architecture are you using (`go env`)?"
render: shell
validations:
required: true
@@ -38,15 +38,15 @@ body:
description: "If possible, provide a recipe for reproducing the error. A complete runnable program is good. A link on [go.dev/play](https://go.dev/play) is best."
validations:
required: true
- type: textarea
id: actual-behavior
attributes:
label: "What did you see happen?"
validations:
required: true
- type: textarea
id: expected-behavior
attributes:
label: "What did you expect to see?"
validations:
required: true
- type: textarea
id: actual-behavior
attributes:
label: "What did you see instead?"
validations:
required: true

View File

@@ -1,7 +1,7 @@
name: Language Change Proposals
description: Changes to the language
labels: ["Proposal", "LanguageChange", "LanguageChangeReview"]
title: "proposal: spec: proposal title"
labels: ["Proposal", "v2", "LanguageChange"]
title: "proposal: Go 2: proposal title"
body:

View File

@@ -1,30 +1,68 @@
name: Go Telemetry Proposals
description: Changes to the telemetry upload configuration
description: New telemetry counter or update on an existing one
title: "x/telemetry/config: proposal title"
labels: ["Telemetry-Proposal"]
projects: ["golang/29"]
body:
- type: textarea
attributes:
label: Summary
description: >
What change are you proposing to the upload configuration, and why?
For new upload configuration, which new counters will be collected, what
do they measure, and why is it important to collect them?
Note that uploaded data must not carry sensitive user information.
See [go.dev/doc/telemetry#proposals](https://go.dev/doc/telemetry#proposals)
for more details on telemetry proposals.
label: Counter names
description: Names of counters to add or update.
validations:
required: true
- type: input
- type: textarea
attributes:
label: Proposed Config Change
description: >
A CL containing proposed changes to the
[config.txt](https://go.googlesource.com/telemetry/+/master/internal/chartconfig/config.txt)
chart configuration.
See the [chartconfig](https://pkg.go.dev/golang.org/x/telemetry/internal/chartconfig)
package for an explanation of the chart config format.
For an example change, see [CL 564619](https://go.dev/cl/564619).
label: Description
description: What do these counters measure?
validations:
required: true
- type: textarea
attributes:
label: Rationale
description: |
Why is the counter important?
For example, what new insights will it provide, and how will that information be used?
If this is about updating existing counters, why is the change necessary?
validations:
required: true
- type: textarea
attributes:
label: Do the counters carry sensitive user information?
validations:
required: true
- type: textarea
attributes:
label: How?
description: |
How do we plan to compute the info?
If available, include the code location or cl that uses the golang.org/x/telemetry/counter API.
validations:
required: true
- type: textarea
attributes:
label: Proposed Graph Config
description: |
Approved telemetry counters are maintained as [Go Telemetry Graph Config](https://golang.org/x/telemetry/internal/graphconfig) records.
Please draft the record entry for your proposal here.
If multiple records need to be included, separate them with `---` lines.
You can check the list of the approved counters and their current configuration in [config.txt](https://go.googlesource.com/telemetry/+/master/internal/configgen/config.txt).
render: Text
value: |
counter: gopls/bug
title: Gopls bug reports
description: Stacks of bugs encountered on the gopls server.
type: partition, histogram, stack # choose only one.
program: golang.org/x/tools/gopls
counter: gopls/bug
depth: 16 # only if type is stack.
version: v0.13.0 # the first binary version containing this counter.
validations:
required: true
- type: dropdown
attributes:
label: New or Update
description: Is this a new counter? See [config.txt](https://go.googlesource.com/telemetry/+/master/internal/configgen/config.txt) for the list of approved counters.
options:
- New
- Update
default: 0

3
.gitignore vendored
View File

@@ -30,7 +30,6 @@ _testmain.go
/misc/cgo/testso/main
/pkg/
/src/*.*/
/src/_artifacts/
/src/cmd/cgo/zdefaultcc.go
/src/cmd/dist/dist
/src/cmd/go/internal/cfg/zdefaultcc.go
@@ -38,7 +37,7 @@ _testmain.go
/src/go/build/zcgo.go
/src/go/doc/headscan
/src/internal/buildcfg/zbootstrap.go
/src/internal/runtime/sys/zversion.go
/src/runtime/internal/sys/zversion.go
/src/unicode/maketables
/src/time/tzdata/zzipdata.go
/test.out

View File

@@ -1,4 +1,4 @@
Copyright 2009 The Go Authors.
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google LLC nor the names of its
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.

View File

@@ -4,7 +4,7 @@ Go is an open source programming language that makes it easy to build simple,
reliable, and efficient software.
![Gopher image](https://golang.org/doc/gopher/fiveyears.jpg)
*Gopher image by [Renee French][rf], licensed under [Creative Commons 4.0 Attribution license][cc4-by].*
*Gopher image by [Renee French][rf], licensed under [Creative Commons 4.0 Attributions license][cc4-by].*
Our canonical Git repository is located at https://go.googlesource.com/go.
There is a mirror of the repository at https://github.com/golang/go.

2
VERSION Normal file
View File

@@ -0,0 +1,2 @@
go1.22rc1
time 2023-12-19T16:29:36Z

View File

@@ -21,6 +21,3 @@ warning output from the go api tool. Each file should be named
nnnnn.txt, after the issue number for the accepted proposal.
(The #nnnnn suffix must also appear at the end of each line in the file;
that will be preserved when next/*.txt is concatenated into go1.XX.txt.)
When you add a file to the api/next directory, you must add at least one file
under doc/next. See doc/README.md for details.

View File

@@ -598,7 +598,3 @@ pkg syscall (freebsd-arm64-cgo), const SYS_MKNODAT = 498
pkg syscall (freebsd-arm64-cgo), const SYS_STAT = 188
pkg syscall (freebsd-arm64-cgo), const SYS_STAT ideal-int
pkg syscall (freebsd-arm64-cgo), const SYS_STATFS = 396
pkg syscall (openbsd-386), const ELAST = 91
pkg syscall (openbsd-386-cgo), const ELAST = 91
pkg syscall (openbsd-amd64), const ELAST = 91
pkg syscall (openbsd-amd64-cgo), const ELAST = 91

View File

@@ -1,4 +1,13 @@
pkg archive/tar, method (*Writer) AddFS(fs.FS) error #58000
pkg archive/tar, type FileInfoNames interface { Gname, IsDir, ModTime, Mode, Name, Size, Sys, Uname } #50102
pkg archive/tar, type FileInfoNames interface, Gname(int) (string, error) #50102
pkg archive/tar, type FileInfoNames interface, IsDir() bool #50102
pkg archive/tar, type FileInfoNames interface, ModTime() time.Time #50102
pkg archive/tar, type FileInfoNames interface, Mode() fs.FileMode #50102
pkg archive/tar, type FileInfoNames interface, Name() string #50102
pkg archive/tar, type FileInfoNames interface, Size() int64 #50102
pkg archive/tar, type FileInfoNames interface, Sys() interface{} #50102
pkg archive/tar, type FileInfoNames interface, Uname(int) (string, error) #50102
pkg archive/zip, method (*Writer) AddFS(fs.FS) error #54898
pkg cmp, func Or[$0 comparable](...$0) $0 #60204
pkg crypto/x509, func OIDFromInts([]uint64) (OID, error) #60665

View File

@@ -1,158 +0,0 @@
pkg archive/tar, type FileInfoNames interface { Gname, IsDir, ModTime, Mode, Name, Size, Sys, Uname } #50102
pkg archive/tar, type FileInfoNames interface, Gname() (string, error) #50102
pkg archive/tar, type FileInfoNames interface, IsDir() bool #50102
pkg archive/tar, type FileInfoNames interface, ModTime() time.Time #50102
pkg archive/tar, type FileInfoNames interface, Mode() fs.FileMode #50102
pkg archive/tar, type FileInfoNames interface, Name() string #50102
pkg archive/tar, type FileInfoNames interface, Size() int64 #50102
pkg archive/tar, type FileInfoNames interface, Sys() interface{} #50102
pkg archive/tar, type FileInfoNames interface, Uname() (string, error) #50102
pkg crypto/tls, const QUICResumeSession = 8 #63691
pkg crypto/tls, const QUICResumeSession QUICEventKind #63691
pkg crypto/tls, const QUICStoreSession = 9 #63691
pkg crypto/tls, const QUICStoreSession QUICEventKind #63691
pkg crypto/tls, method (*ECHRejectionError) Error() string #63369
pkg crypto/tls, method (*QUICConn) StoreSession(*SessionState) error #63691
pkg crypto/tls, type Config struct, EncryptedClientHelloConfigList []uint8 #63369
pkg crypto/tls, type Config struct, EncryptedClientHelloRejectionVerify func(ConnectionState) error #63369
pkg crypto/tls, type ConnectionState struct, ECHAccepted bool #63369
pkg crypto/tls, type ECHRejectionError struct #63369
pkg crypto/tls, type ECHRejectionError struct, RetryConfigList []uint8 #63369
pkg crypto/tls, type QUICConfig struct, EnableSessionEvents bool #63691
pkg crypto/tls, type QUICEvent struct, SessionState *SessionState #63691
pkg crypto/tls, type QUICSessionTicketOptions struct, Extra [][]uint8 #63691
pkg crypto/x509, func ParseOID(string) (OID, error) #66249
pkg crypto/x509, method (*OID) UnmarshalBinary([]uint8) error #66249
pkg crypto/x509, method (*OID) UnmarshalText([]uint8) error #66249
pkg crypto/x509, method (OID) MarshalBinary() ([]uint8, error) #66249
pkg crypto/x509, method (OID) MarshalText() ([]uint8, error) #66249
pkg debug/elf, const PT_OPENBSD_NOBTCFI = 1705237480 #66054
pkg debug/elf, const PT_OPENBSD_NOBTCFI ProgType #66054
pkg debug/elf, const STT_GNU_IFUNC = 10 #66836
pkg debug/elf, const STT_GNU_IFUNC SymType #66836
pkg debug/elf, const STT_RELC = 8 #66836
pkg debug/elf, const STT_RELC SymType #66836
pkg debug/elf, const STT_SRELC = 9 #66836
pkg debug/elf, const STT_SRELC SymType #66836
pkg encoding/binary, func Append([]uint8, ByteOrder, interface{}) ([]uint8, error) #60023
pkg encoding/binary, func Decode([]uint8, ByteOrder, interface{}) (int, error) #60023
pkg encoding/binary, func Encode([]uint8, ByteOrder, interface{}) (int, error) #60023
pkg go/ast, func Preorder(Node) iter.Seq[Node] #66339
pkg go/types, method (*Alias) Origin() *Alias #67143
pkg go/types, method (*Alias) Rhs() Type #66559
pkg go/types, method (*Alias) SetTypeParams([]*TypeParam) #67143
pkg go/types, method (*Alias) TypeArgs() *TypeList #67143
pkg go/types, method (*Alias) TypeParams() *TypeParamList #67143
pkg go/types, method (*Func) Signature() *Signature #65772
pkg iter, func Pull2[$0 interface{}, $1 interface{}](Seq2[$0, $1]) (func() ($0, $1, bool), func()) #61897
pkg iter, func Pull[$0 interface{}](Seq[$0]) (func() ($0, bool), func()) #61897
pkg iter, type Seq2[$0 interface{}, $1 interface{}] func(func($0, $1) bool) #61897
pkg iter, type Seq[$0 interface{}] func(func($0) bool) #61897
pkg maps, func All[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0) iter.Seq2[$1, $2] #61900
pkg maps, func Collect[$0 comparable, $1 interface{}](iter.Seq2[$0, $1]) map[$0]$1 #61900
pkg maps, func Insert[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0, iter.Seq2[$1, $2]) #61900
pkg maps, func Keys[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0) iter.Seq[$1] #61900
pkg maps, func Values[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0) iter.Seq[$2] #61900
pkg math/rand/v2, func Uint() uint #61716
pkg math/rand/v2, method (*ChaCha8) Read([]uint8) (int, error) #67059
pkg math/rand/v2, method (*Rand) Uint() uint #61716
pkg net, method (*DNSError) Unwrap() error #63116
pkg net, method (*TCPConn) SetKeepAliveConfig(KeepAliveConfig) error #62254
pkg net, type DNSError struct, UnwrapErr error #63116
pkg net, type Dialer struct, KeepAliveConfig KeepAliveConfig #62254
pkg net, type KeepAliveConfig struct #62254
pkg net, type KeepAliveConfig struct, Count int #62254
pkg net, type KeepAliveConfig struct, Enable bool #62254
pkg net, type KeepAliveConfig struct, Idle time.Duration #62254
pkg net, type KeepAliveConfig struct, Interval time.Duration #62254
pkg net, type ListenConfig struct, KeepAliveConfig KeepAliveConfig #62254
pkg net/http, func ParseCookie(string) ([]*Cookie, error) #66008
pkg net/http, func ParseSetCookie(string) (*Cookie, error) #66008
pkg net/http, method (*Request) CookiesNamed(string) []*Cookie #61472
pkg net/http, type Cookie struct, Partitioned bool #62490
pkg net/http, type Cookie struct, Quoted bool #46443
pkg net/http, type Request struct, Pattern string #66405
pkg net/http/httptest, func NewRequestWithContext(context.Context, string, string, io.Reader) *http.Request #59473
pkg os, func CopyFS(string, fs.FS) error #62484
pkg path/filepath, func Localize(string) (string, error) #57151
pkg reflect, func SliceAt(Type, unsafe.Pointer, int) Value #61308
pkg reflect, method (Value) Seq() iter.Seq[Value] #66056
pkg reflect, method (Value) Seq2() iter.Seq2[Value, Value] #66056
pkg reflect, type Type interface, CanSeq() bool #66056
pkg reflect, type Type interface, CanSeq2() bool #66056
pkg reflect, type Type interface, OverflowComplex(complex128) bool #60427
pkg reflect, type Type interface, OverflowFloat(float64) bool #60427
pkg reflect, type Type interface, OverflowInt(int64) bool #60427
pkg reflect, type Type interface, OverflowUint(uint64) bool #60427
pkg runtime/debug, func SetCrashOutput(*os.File, CrashOptions) error #42888
pkg runtime/debug, type CrashOptions struct #67182
pkg slices, func All[$0 interface{ ~[]$1 }, $1 interface{}]($0) iter.Seq2[int, $1] #61899
pkg slices, func AppendSeq[$0 interface{ ~[]$1 }, $1 interface{}]($0, iter.Seq[$1]) $0 #61899
pkg slices, func Backward[$0 interface{ ~[]$1 }, $1 interface{}]($0) iter.Seq2[int, $1] #61899
pkg slices, func Chunk[$0 interface{ ~[]$1 }, $1 interface{}]($0, int) iter.Seq[$0] #53987
pkg slices, func Collect[$0 interface{}](iter.Seq[$0]) []$0 #61899
pkg slices, func Repeat[$0 interface{ ~[]$1 }, $1 interface{}]($0, int) $0 #65238
pkg slices, func SortedFunc[$0 interface{}](iter.Seq[$0], func($0, $0) int) []$0 #61899
pkg slices, func SortedStableFunc[$0 interface{}](iter.Seq[$0], func($0, $0) int) []$0 #61899
pkg slices, func Sorted[$0 cmp.Ordered](iter.Seq[$0]) []$0 #61899
pkg slices, func Values[$0 interface{ ~[]$1 }, $1 interface{}]($0) iter.Seq[$1] #61899
pkg structs, type HostLayout struct #66408
pkg sync, method (*Map) Clear() #61696
pkg sync/atomic, func AndInt32(*int32, int32) int32 #61395
pkg sync/atomic, func AndInt64(*int64, int64) int64 #61395
pkg sync/atomic, func AndUint32(*uint32, uint32) uint32 #61395
pkg sync/atomic, func AndUint64(*uint64, uint64) uint64 #61395
pkg sync/atomic, func AndUintptr(*uintptr, uintptr) uintptr #61395
pkg sync/atomic, func OrInt32(*int32, int32) int32 #61395
pkg sync/atomic, func OrInt64(*int64, int64) int64 #61395
pkg sync/atomic, func OrUint32(*uint32, uint32) uint32 #61395
pkg sync/atomic, func OrUint64(*uint64, uint64) uint64 #61395
pkg sync/atomic, func OrUintptr(*uintptr, uintptr) uintptr #61395
pkg sync/atomic, method (*Int32) And(int32) int32 #61395
pkg sync/atomic, method (*Int32) Or(int32) int32 #61395
pkg sync/atomic, method (*Int64) And(int64) int64 #61395
pkg sync/atomic, method (*Int64) Or(int64) int64 #61395
pkg sync/atomic, method (*Uint32) And(uint32) uint32 #61395
pkg sync/atomic, method (*Uint32) Or(uint32) uint32 #61395
pkg sync/atomic, method (*Uint64) And(uint64) uint64 #61395
pkg sync/atomic, method (*Uint64) Or(uint64) uint64 #61395
pkg sync/atomic, method (*Uintptr) And(uintptr) uintptr #61395
pkg sync/atomic, method (*Uintptr) Or(uintptr) uintptr #61395
pkg syscall (openbsd-386), const EBADMSG = 92 #67998
pkg syscall (openbsd-386), const ELAST = 95 #67998
pkg syscall (openbsd-386), const ENOTRECOVERABLE = 93 #67998
pkg syscall (openbsd-386), const ENOTRECOVERABLE Errno #67998
pkg syscall (openbsd-386), const EOWNERDEAD = 94 #67998
pkg syscall (openbsd-386), const EOWNERDEAD Errno #67998
pkg syscall (openbsd-386), const EPROTO = 95 #67998
pkg syscall (openbsd-386-cgo), const EBADMSG = 92 #67998
pkg syscall (openbsd-386-cgo), const ELAST = 95 #67998
pkg syscall (openbsd-386-cgo), const ENOTRECOVERABLE = 93 #67998
pkg syscall (openbsd-386-cgo), const ENOTRECOVERABLE Errno #67998
pkg syscall (openbsd-386-cgo), const EOWNERDEAD = 94 #67998
pkg syscall (openbsd-386-cgo), const EOWNERDEAD Errno #67998
pkg syscall (openbsd-386-cgo), const EPROTO = 95 #67998
pkg syscall (openbsd-amd64), const EBADMSG = 92 #67998
pkg syscall (openbsd-amd64), const ELAST = 95 #67998
pkg syscall (openbsd-amd64), const ENOTRECOVERABLE = 93 #67998
pkg syscall (openbsd-amd64), const ENOTRECOVERABLE Errno #67998
pkg syscall (openbsd-amd64), const EOWNERDEAD = 94 #67998
pkg syscall (openbsd-amd64), const EOWNERDEAD Errno #67998
pkg syscall (openbsd-amd64), const EPROTO = 95 #67998
pkg syscall (openbsd-amd64-cgo), const EBADMSG = 92 #67998
pkg syscall (openbsd-amd64-cgo), const ELAST = 95 #67998
pkg syscall (openbsd-amd64-cgo), const ENOTRECOVERABLE = 93 #67998
pkg syscall (openbsd-amd64-cgo), const ENOTRECOVERABLE Errno #67998
pkg syscall (openbsd-amd64-cgo), const EOWNERDEAD = 94 #67998
pkg syscall (openbsd-amd64-cgo), const EOWNERDEAD Errno #67998
pkg syscall (openbsd-amd64-cgo), const EPROTO = 95 #67998
pkg syscall (windows-386), const WSAENOPROTOOPT = 10042 #62254
pkg syscall (windows-386), const WSAENOPROTOOPT Errno #62254
pkg syscall (windows-amd64), const WSAENOPROTOOPT = 10042 #62254
pkg syscall (windows-amd64), const WSAENOPROTOOPT Errno #62254
pkg syscall, const EBADMSG Errno #67998
pkg syscall, const EPROTO Errno #67998
pkg unicode/utf16, func RuneLen(int32) int #44940
pkg unique, func Make[$0 comparable]($0) Handle[$0] #62483
pkg unique, method (Handle[$0]) Value() $0 #62483
pkg unique, type Handle[$0 comparable] struct #62483

View File

@@ -1,223 +0,0 @@
pkg bytes, func FieldsFuncSeq([]uint8, func(int32) bool) iter.Seq[[]uint8] #61901
pkg bytes, func FieldsSeq([]uint8) iter.Seq[[]uint8] #61901
pkg bytes, func Lines([]uint8) iter.Seq[[]uint8] #61901
pkg bytes, func SplitAfterSeq([]uint8, []uint8) iter.Seq[[]uint8] #61901
pkg bytes, func SplitSeq([]uint8, []uint8) iter.Seq[[]uint8] #61901
pkg crypto/cipher, func NewCFBDecrypter //deprecated #69445
pkg crypto/cipher, func NewCFBEncrypter //deprecated #69445
pkg crypto/cipher, func NewGCMWithRandomNonce(Block) (AEAD, error) #69981
pkg crypto/cipher, func NewOFB //deprecated #69445
pkg crypto/fips140, func Enabled() bool #70123
pkg crypto/hkdf, func Expand[$0 hash.Hash](func() $0, []uint8, string, int) ([]uint8, error) #61477
pkg crypto/hkdf, func Extract[$0 hash.Hash](func() $0, []uint8, []uint8) ([]uint8, error) #61477
pkg crypto/hkdf, func Key[$0 hash.Hash](func() $0, []uint8, []uint8, string, int) ([]uint8, error) #61477
pkg crypto/mlkem, const CiphertextSize1024 = 1568 #70122
pkg crypto/mlkem, const CiphertextSize1024 ideal-int #70122
pkg crypto/mlkem, const CiphertextSize768 = 1088 #70122
pkg crypto/mlkem, const CiphertextSize768 ideal-int #70122
pkg crypto/mlkem, const EncapsulationKeySize1024 = 1568 #70122
pkg crypto/mlkem, const EncapsulationKeySize1024 ideal-int #70122
pkg crypto/mlkem, const EncapsulationKeySize768 = 1184 #70122
pkg crypto/mlkem, const EncapsulationKeySize768 ideal-int #70122
pkg crypto/mlkem, const SeedSize = 64 #70122
pkg crypto/mlkem, const SeedSize ideal-int #70122
pkg crypto/mlkem, const SharedKeySize = 32 #70122
pkg crypto/mlkem, const SharedKeySize ideal-int #70122
pkg crypto/mlkem, func GenerateKey1024() (*DecapsulationKey1024, error) #70122
pkg crypto/mlkem, func GenerateKey768() (*DecapsulationKey768, error) #70122
pkg crypto/mlkem, func NewDecapsulationKey1024([]uint8) (*DecapsulationKey1024, error) #70122
pkg crypto/mlkem, func NewDecapsulationKey768([]uint8) (*DecapsulationKey768, error) #70122
pkg crypto/mlkem, func NewEncapsulationKey1024([]uint8) (*EncapsulationKey1024, error) #70122
pkg crypto/mlkem, func NewEncapsulationKey768([]uint8) (*EncapsulationKey768, error) #70122
pkg crypto/mlkem, method (*DecapsulationKey1024) Bytes() []uint8 #70122
pkg crypto/mlkem, method (*DecapsulationKey1024) Decapsulate([]uint8) ([]uint8, error) #70122
pkg crypto/mlkem, method (*DecapsulationKey1024) EncapsulationKey() *EncapsulationKey1024 #70122
pkg crypto/mlkem, method (*DecapsulationKey768) Bytes() []uint8 #70122
pkg crypto/mlkem, method (*DecapsulationKey768) Decapsulate([]uint8) ([]uint8, error) #70122
pkg crypto/mlkem, method (*DecapsulationKey768) EncapsulationKey() *EncapsulationKey768 #70122
pkg crypto/mlkem, method (*EncapsulationKey1024) Bytes() []uint8 #70122
pkg crypto/mlkem, method (*EncapsulationKey1024) Encapsulate() ([]uint8, []uint8) #70122
pkg crypto/mlkem, method (*EncapsulationKey768) Bytes() []uint8 #70122
pkg crypto/mlkem, method (*EncapsulationKey768) Encapsulate() ([]uint8, []uint8) #70122
pkg crypto/mlkem, type DecapsulationKey1024 struct #70122
pkg crypto/mlkem, type DecapsulationKey768 struct #70122
pkg crypto/mlkem, type EncapsulationKey1024 struct #70122
pkg crypto/mlkem, type EncapsulationKey768 struct #70122
pkg crypto/pbkdf2, func Key[$0 hash.Hash](func() $0, string, []uint8, int, int) ([]uint8, error) #69488
pkg crypto/rand, func Text() string #67057
pkg crypto/sha3, func New224() *SHA3 #69982
pkg crypto/sha3, func New256() *SHA3 #69982
pkg crypto/sha3, func New384() *SHA3 #69982
pkg crypto/sha3, func New512() *SHA3 #69982
pkg crypto/sha3, func NewCSHAKE128([]uint8, []uint8) *SHAKE #69982
pkg crypto/sha3, func NewCSHAKE256([]uint8, []uint8) *SHAKE #69982
pkg crypto/sha3, func NewSHAKE128() *SHAKE #69982
pkg crypto/sha3, func NewSHAKE256() *SHAKE #69982
pkg crypto/sha3, func Sum224([]uint8) [28]uint8 #69982
pkg crypto/sha3, func Sum256([]uint8) [32]uint8 #69982
pkg crypto/sha3, func Sum384([]uint8) [48]uint8 #69982
pkg crypto/sha3, func Sum512([]uint8) [64]uint8 #69982
pkg crypto/sha3, func SumSHAKE128([]uint8, int) []uint8 #69982
pkg crypto/sha3, func SumSHAKE256([]uint8, int) []uint8 #69982
pkg crypto/sha3, method (*SHA3) AppendBinary([]uint8) ([]uint8, error) #69982
pkg crypto/sha3, method (*SHA3) BlockSize() int #69982
pkg crypto/sha3, method (*SHA3) MarshalBinary() ([]uint8, error) #69982
pkg crypto/sha3, method (*SHA3) Reset() #69982
pkg crypto/sha3, method (*SHA3) Size() int #69982
pkg crypto/sha3, method (*SHA3) Sum([]uint8) []uint8 #69982
pkg crypto/sha3, method (*SHA3) UnmarshalBinary([]uint8) error #69982
pkg crypto/sha3, method (*SHA3) Write([]uint8) (int, error) #69982
pkg crypto/sha3, method (*SHAKE) AppendBinary([]uint8) ([]uint8, error) #69982
pkg crypto/sha3, method (*SHAKE) BlockSize() int #69982
pkg crypto/sha3, method (*SHAKE) MarshalBinary() ([]uint8, error) #69982
pkg crypto/sha3, method (*SHAKE) Read([]uint8) (int, error) #69982
pkg crypto/sha3, method (*SHAKE) Reset() #69982
pkg crypto/sha3, method (*SHAKE) UnmarshalBinary([]uint8) error #69982
pkg crypto/sha3, method (*SHAKE) Write([]uint8) (int, error) #69982
pkg crypto/sha3, type SHA3 struct #69982
pkg crypto/sha3, type SHAKE struct #69982
pkg crypto/subtle, func WithDataIndependentTiming(func()) #66450
pkg crypto/tls, const X25519MLKEM768 = 4588 #69985
pkg crypto/tls, const X25519MLKEM768 CurveID #69985
pkg crypto/tls, type ClientHelloInfo struct, Extensions []uint16 #32936
pkg crypto/tls, type Config struct, EncryptedClientHelloKeys []EncryptedClientHelloKey #68500
pkg crypto/tls, type EncryptedClientHelloKey struct #68500
pkg crypto/tls, type EncryptedClientHelloKey struct, Config []uint8 #68500
pkg crypto/tls, type EncryptedClientHelloKey struct, PrivateKey []uint8 #68500
pkg crypto/tls, type EncryptedClientHelloKey struct, SendAsRetry bool #68500
pkg crypto/x509, const NoValidChains = 10 #68484
pkg crypto/x509, const NoValidChains InvalidReason #68484
pkg crypto/x509, method (OID) AppendBinary([]uint8) ([]uint8, error) #62384
pkg crypto/x509, method (OID) AppendText([]uint8) ([]uint8, error) #62384
pkg crypto/x509, type Certificate struct, InhibitAnyPolicy int #68484
pkg crypto/x509, type Certificate struct, InhibitAnyPolicyZero bool #68484
pkg crypto/x509, type Certificate struct, InhibitPolicyMapping int #68484
pkg crypto/x509, type Certificate struct, InhibitPolicyMappingZero bool #68484
pkg crypto/x509, type Certificate struct, PolicyMappings []PolicyMapping #68484
pkg crypto/x509, type Certificate struct, RequireExplicitPolicy int #68484
pkg crypto/x509, type Certificate struct, RequireExplicitPolicyZero bool #68484
pkg crypto/x509, type PolicyMapping struct #68484
pkg crypto/x509, type PolicyMapping struct, IssuerDomainPolicy OID #68484
pkg crypto/x509, type PolicyMapping struct, SubjectDomainPolicy OID #68484
pkg crypto/x509, type VerifyOptions struct, CertificatePolicies []OID #68484
pkg debug/elf, const VER_FLG_BASE = 1 #63952
pkg debug/elf, const VER_FLG_BASE DynamicVersionFlag #63952
pkg debug/elf, const VER_FLG_INFO = 4 #63952
pkg debug/elf, const VER_FLG_INFO DynamicVersionFlag #63952
pkg debug/elf, const VER_FLG_WEAK = 2 #63952
pkg debug/elf, const VER_FLG_WEAK DynamicVersionFlag #63952
pkg debug/elf, method (*File) DynamicVersionNeeds() ([]DynamicVersionNeed, error) #63952
pkg debug/elf, method (*File) DynamicVersions() ([]DynamicVersion, error) #63952
pkg debug/elf, type DynamicVersion struct #63952
pkg debug/elf, type DynamicVersion struct, Deps []string #63952
pkg debug/elf, type DynamicVersion struct, Flags DynamicVersionFlag #63952
pkg debug/elf, type DynamicVersion struct, Name string #63952
pkg debug/elf, type DynamicVersion struct, Index uint16 #63952
pkg debug/elf, type DynamicVersionDep struct #63952
pkg debug/elf, type DynamicVersionDep struct, Dep string #63952
pkg debug/elf, type DynamicVersionDep struct, Flags DynamicVersionFlag #63952
pkg debug/elf, type DynamicVersionDep struct, Index uint16 #63952
pkg debug/elf, type DynamicVersionFlag uint16 #63952
pkg debug/elf, type DynamicVersionNeed struct #63952
pkg debug/elf, type DynamicVersionNeed struct, Name string #63952
pkg debug/elf, type DynamicVersionNeed struct, Needs []DynamicVersionDep #63952
pkg debug/elf, type Symbol struct, HasVersion bool #63952
pkg debug/elf, type Symbol struct, VersionIndex VersionIndex #63952
pkg debug/elf, method (VersionIndex) Index() uint16 #63952
pkg debug/elf, method (VersionIndex) IsHidden() bool #63952
pkg debug/elf, type VersionIndex uint16 #63952
pkg encoding, type BinaryAppender interface { AppendBinary } #62384
pkg encoding, type BinaryAppender interface, AppendBinary([]uint8) ([]uint8, error) #62384
pkg encoding, type TextAppender interface { AppendText } #62384
pkg encoding, type TextAppender interface, AppendText([]uint8) ([]uint8, error) #62384
pkg go/types, method (*Interface) EmbeddedTypes() iter.Seq[Type] #66626
pkg go/types, method (*Interface) ExplicitMethods() iter.Seq[*Func] #66626
pkg go/types, method (*Interface) Methods() iter.Seq[*Func] #66626
pkg go/types, method (*MethodSet) Methods() iter.Seq[*Selection] #66626
pkg go/types, method (*Named) Methods() iter.Seq[*Func] #66626
pkg go/types, method (*Scope) Children() iter.Seq[*Scope] #66626
pkg go/types, method (*Struct) Fields() iter.Seq[*Var] #66626
pkg go/types, method (*Tuple) Variables() iter.Seq[*Var] #66626
pkg go/types, method (*TypeList) Types() iter.Seq[Type] #66626
pkg go/types, method (*TypeParamList) TypeParams() iter.Seq[*TypeParam] #66626
pkg go/types, method (*Union) Terms() iter.Seq[*Term] #66626
pkg hash/maphash, func Comparable[$0 comparable](Seed, $0) uint64 #54670
pkg hash/maphash, func WriteComparable[$0 comparable](*Hash, $0) #54670
pkg log/slog, method (*LevelVar) AppendText([]uint8) ([]uint8, error) #62384
pkg log/slog, method (Level) AppendText([]uint8) ([]uint8, error) #62384
pkg log/slog, var DiscardHandler Handler #62005
pkg math/big, method (*Float) AppendText([]uint8) ([]uint8, error) #62384
pkg math/big, method (*Int) AppendText([]uint8) ([]uint8, error) #62384
pkg math/big, method (*Rat) AppendText([]uint8) ([]uint8, error) #62384
pkg math/rand/v2, method (*ChaCha8) AppendBinary([]uint8) ([]uint8, error) #62384
pkg math/rand/v2, method (*PCG) AppendBinary([]uint8) ([]uint8, error) #62384
pkg net, method (IP) AppendText([]uint8) ([]uint8, error) #62384
pkg net/http, method (*Protocols) SetHTTP1(bool) #67814
pkg net/http, method (*Protocols) SetHTTP2(bool) #67814
pkg net/http, method (*Protocols) SetUnencryptedHTTP2(bool) #67816
pkg net/http, method (Protocols) HTTP1() bool #67814
pkg net/http, method (Protocols) HTTP2() bool #67814
pkg net/http, method (Protocols) String() string #67814
pkg net/http, method (Protocols) UnencryptedHTTP2() bool #67816
pkg net/http, type HTTP2Config struct #67813
pkg net/http, type HTTP2Config struct, CountError func(string) #67813
pkg net/http, type HTTP2Config struct, MaxConcurrentStreams int #67813
pkg net/http, type HTTP2Config struct, MaxDecoderHeaderTableSize int #67813
pkg net/http, type HTTP2Config struct, MaxEncoderHeaderTableSize int #67813
pkg net/http, type HTTP2Config struct, MaxReadFrameSize int #67813
pkg net/http, type HTTP2Config struct, MaxReceiveBufferPerConnection int #67813
pkg net/http, type HTTP2Config struct, MaxReceiveBufferPerStream int #67813
pkg net/http, type HTTP2Config struct, PermitProhibitedCipherSuites bool #67813
pkg net/http, type HTTP2Config struct, PingTimeout time.Duration #67813
pkg net/http, type HTTP2Config struct, SendPingTimeout time.Duration #67813
pkg net/http, type HTTP2Config struct, WriteByteTimeout time.Duration #67813
pkg net/http, type Protocols struct #67814
pkg net/http, type Server struct, HTTP2 *HTTP2Config #67813
pkg net/http, type Server struct, Protocols *Protocols #67814
pkg net/http, type Transport struct, HTTP2 *HTTP2Config #67813
pkg net/http, type Transport struct, Protocols *Protocols #67814
pkg net/netip, method (Addr) AppendBinary([]uint8) ([]uint8, error) #62384
pkg net/netip, method (Addr) AppendText([]uint8) ([]uint8, error) #62384
pkg net/netip, method (AddrPort) AppendBinary([]uint8) ([]uint8, error) #62384
pkg net/netip, method (AddrPort) AppendText([]uint8) ([]uint8, error) #62384
pkg net/netip, method (Prefix) AppendBinary([]uint8) ([]uint8, error) #62384
pkg net/netip, method (Prefix) AppendText([]uint8) ([]uint8, error) #62384
pkg net/url, method (*URL) AppendBinary([]uint8) ([]uint8, error) #62384
pkg os, func OpenInRoot(string, string) (*File, error) #67002
pkg os, func OpenRoot(string) (*Root, error) #67002
pkg os, method (*Root) Close() error #67002
pkg os, method (*Root) Create(string) (*File, error) #67002
pkg os, method (*Root) FS() fs.FS #67002
pkg os, method (*Root) Lstat(string) (fs.FileInfo, error) #67002
pkg os, method (*Root) Mkdir(string, fs.FileMode) error #67002
pkg os, method (*Root) Name() string #67002
pkg os, method (*Root) Open(string) (*File, error) #67002
pkg os, method (*Root) OpenFile(string, int, fs.FileMode) (*File, error) #67002
pkg os, method (*Root) OpenRoot(string) (*Root, error) #67002
pkg os, method (*Root) Remove(string) error #67002
pkg os, method (*Root) Stat(string) (fs.FileInfo, error) #67002
pkg os, type Root struct #67002
pkg regexp, method (*Regexp) AppendText([]uint8) ([]uint8, error) #62384
pkg runtime, func AddCleanup[$0 interface{}, $1 interface{}](*$0, func($1), $1) Cleanup #67535
pkg runtime, func GOROOT //deprecated #51473
pkg runtime, method (Cleanup) Stop() #67535
pkg runtime, type Cleanup struct #67535
pkg strings, func FieldsFuncSeq(string, func(int32) bool) iter.Seq[string] #61901
pkg strings, func FieldsSeq(string) iter.Seq[string] #61901
pkg strings, func Lines(string) iter.Seq[string] #61901
pkg strings, func SplitAfterSeq(string, string) iter.Seq[string] #61901
pkg strings, func SplitSeq(string, string) iter.Seq[string] #61901
pkg testing, method (*B) Chdir(string) #62516
pkg testing, method (*B) Context() context.Context #36532
pkg testing, method (*B) Loop() bool #61515
pkg testing, method (*F) Chdir(string) #62516
pkg testing, method (*F) Context() context.Context #36532
pkg testing, method (*T) Chdir(string) #62516
pkg testing, method (*T) Context() context.Context #36532
pkg testing, type TB interface, Chdir(string) #62516
pkg testing, type TB interface, Context() context.Context #36532
pkg time, method (Time) AppendBinary([]uint8) ([]uint8, error) #62384
pkg time, method (Time) AppendText([]uint8) ([]uint8, error) #62384
pkg weak, func Make[$0 interface{}](*$0) Pointer[$0] #67552
pkg weak, method (Pointer[$0]) Value() *$0 #67552
pkg weak, type Pointer[$0 interface{}] struct #67552

View File

@@ -1,111 +0,0 @@
pkg crypto, func SignMessage(Signer, io.Reader, []uint8, SignerOpts) ([]uint8, error) #63405
pkg crypto, type MessageSigner interface { Public, Sign, SignMessage } #63405
pkg crypto, type MessageSigner interface, Public() PublicKey #63405
pkg crypto, type MessageSigner interface, Sign(io.Reader, []uint8, SignerOpts) ([]uint8, error) #63405
pkg crypto, type MessageSigner interface, SignMessage(io.Reader, []uint8, SignerOpts) ([]uint8, error) #63405
pkg crypto/ecdsa, func ParseRawPrivateKey(elliptic.Curve, []uint8) (*PrivateKey, error) #63963
pkg crypto/ecdsa, func ParseUncompressedPublicKey(elliptic.Curve, []uint8) (*PublicKey, error) #63963
pkg crypto/ecdsa, method (*PrivateKey) Bytes() ([]uint8, error) #63963
pkg crypto/ecdsa, method (*PublicKey) Bytes() ([]uint8, error) #63963
pkg crypto/sha3, method (*SHA3) Clone() (hash.Cloner, error) #69521
pkg crypto/tls, type Config struct, GetEncryptedClientHelloKeys func(*ClientHelloInfo) ([]EncryptedClientHelloKey, error) #71920
pkg crypto/tls, type ConnectionState struct, CurveID CurveID #67516
pkg debug/elf, const PT_RISCV_ATTRIBUTES = 1879048195 #72843
pkg debug/elf, const PT_RISCV_ATTRIBUTES ProgType #72843
pkg debug/elf, const SHT_RISCV_ATTRIBUTES = 1879048195 #72843
pkg debug/elf, const SHT_RISCV_ATTRIBUTES SectionType #72843
pkg go/ast, const FilterFuncDuplicates //deprecated #73088
pkg go/ast, const FilterImportDuplicates //deprecated #73088
pkg go/ast, const FilterUnassociatedComments //deprecated #73088
pkg go/ast, func FilterPackage //deprecated #73088
pkg go/ast, func MergePackageFiles //deprecated #73088
pkg go/ast, func PackageExports //deprecated #73088
pkg go/ast, func PreorderStack(Node, []Node, func(Node, []Node) bool) #73319
pkg go/ast, type MergeMode //deprecated #73088
pkg go/parser, func ParseDir //deprecated #71122
pkg go/token, method (*FileSet) AddExistingFiles(...*File) #73205
pkg go/types, const FieldVar = 6 #70250
pkg go/types, const FieldVar VarKind #70250
pkg go/types, const LocalVar = 2 #70250
pkg go/types, const LocalVar VarKind #70250
pkg go/types, const PackageVar = 1 #70250
pkg go/types, const PackageVar VarKind #70250
pkg go/types, const ParamVar = 4 #70250
pkg go/types, const ParamVar VarKind #70250
pkg go/types, const RecvVar = 3 #70250
pkg go/types, const RecvVar VarKind #70250
pkg go/types, const ResultVar = 5 #70250
pkg go/types, const ResultVar VarKind #70250
pkg go/types, func LookupSelection(Type, bool, *Package, string) (Selection, bool) #70737
pkg go/types, method (*Var) Kind() VarKind #70250
pkg go/types, method (*Var) SetKind(VarKind) #70250
pkg go/types, method (VarKind) String() string #70250
pkg go/types, type VarKind uint8 #70250
pkg hash, type Cloner interface { BlockSize, Clone, Reset, Size, Sum, Write } #69521
pkg hash, type Cloner interface, BlockSize() int #69521
pkg hash, type Cloner interface, Clone() (Cloner, error) #69521
pkg hash, type Cloner interface, Reset() #69521
pkg hash, type Cloner interface, Size() int #69521
pkg hash, type Cloner interface, Sum([]uint8) []uint8 #69521
pkg hash, type Cloner interface, Write([]uint8) (int, error) #69521
pkg hash, type XOF interface { BlockSize, Read, Reset, Write } #69518
pkg hash, type XOF interface, BlockSize() int #69518
pkg hash, type XOF interface, Read([]uint8) (int, error) #69518
pkg hash, type XOF interface, Reset() #69518
pkg hash, type XOF interface, Write([]uint8) (int, error) #69518
pkg hash/maphash, method (*Hash) Clone() (hash.Cloner, error) #69521
pkg io/fs, func Lstat(FS, string) (FileInfo, error) #49580
pkg io/fs, func ReadLink(FS, string) (string, error) #49580
pkg io/fs, type ReadLinkFS interface { Lstat, Open, ReadLink } #49580
pkg io/fs, type ReadLinkFS interface, Lstat(string) (FileInfo, error) #49580
pkg io/fs, type ReadLinkFS interface, Open(string) (File, error) #49580
pkg io/fs, type ReadLinkFS interface, ReadLink(string) (string, error) #49580
pkg log/slog, func GroupAttrs(string, ...Attr) Attr #66365
pkg log/slog, method (Record) Source() *Source #70280
pkg mime/multipart, func FileContentDisposition(string, string) string #46771
pkg net/http, func NewCrossOriginProtection() *CrossOriginProtection #73626
pkg net/http, method (*CrossOriginProtection) AddInsecureBypassPattern(string) #73626
pkg net/http, method (*CrossOriginProtection) AddTrustedOrigin(string) error #73626
pkg net/http, method (*CrossOriginProtection) Check(*Request) error #73626
pkg net/http, method (*CrossOriginProtection) Handler(Handler) Handler #73626
pkg net/http, method (*CrossOriginProtection) SetDenyHandler(Handler) #73626
pkg net/http, type CrossOriginProtection struct #73626
pkg os, method (*Root) Chmod(string, fs.FileMode) error #67002
pkg os, method (*Root) Chown(string, int, int) error #67002
pkg os, method (*Root) Chtimes(string, time.Time, time.Time) error #67002
pkg os, method (*Root) Lchown(string, int, int) error #67002
pkg os, method (*Root) Link(string, string) error #67002
pkg os, method (*Root) MkdirAll(string, fs.FileMode) error #67002
pkg os, method (*Root) ReadFile(string) ([]uint8, error) #73126
pkg os, method (*Root) Readlink(string) (string, error) #67002
pkg os, method (*Root) RemoveAll(string) error #67002
pkg os, method (*Root) Rename(string, string) error #67002
pkg os, method (*Root) Symlink(string, string) error #67002
pkg os, method (*Root) WriteFile(string, []uint8, fs.FileMode) error #73126
pkg reflect, func TypeAssert[$0 interface{}](Value) ($0, bool) #62121
pkg runtime, func SetDefaultGOMAXPROCS() #73193
pkg runtime/trace, func NewFlightRecorder(FlightRecorderConfig) *FlightRecorder #63185
pkg runtime/trace, method (*FlightRecorder) Enabled() bool #63185
pkg runtime/trace, method (*FlightRecorder) Start() error #63185
pkg runtime/trace, method (*FlightRecorder) Stop() #63185
pkg runtime/trace, method (*FlightRecorder) WriteTo(io.Writer) (int64, error) #63185
pkg runtime/trace, type FlightRecorder struct #63185
pkg runtime/trace, type FlightRecorderConfig struct #63185
pkg runtime/trace, type FlightRecorderConfig struct, MaxBytes uint64 #63185
pkg runtime/trace, type FlightRecorderConfig struct, MinAge time.Duration #63185
pkg sync, method (*WaitGroup) Go(func()) #63796
pkg testing, method (*B) Attr(string, string) #43936
pkg testing, method (*B) Output() io.Writer #59928
pkg testing, method (*F) Attr(string, string) #43936
pkg testing, method (*F) Output() io.Writer #59928
pkg testing, method (*T) Attr(string, string) #43936
pkg testing, method (*T) Output() io.Writer #59928
pkg testing, type TB interface, Attr(string, string) #43936
pkg testing, type TB interface, Output() io.Writer #59928
pkg testing/fstest, method (MapFS) Lstat(string) (fs.FileInfo, error) #49580
pkg testing/fstest, method (MapFS) ReadLink(string) (string, error) #49580
pkg testing/synctest, func Test(*testing.T, func(*testing.T)) #67434
pkg testing/synctest, func Wait() #67434
pkg unicode, var CategoryAliases map[string]string #70780
pkg unicode, var Cn *RangeTable #70780
pkg unicode, var LC *RangeTable #70780

View File

@@ -1,180 +0,0 @@
pkg bytes, method (*Buffer) Peek(int) ([]uint8, error) #73794
pkg crypto, type Decapsulator interface { Decapsulate, Encapsulator } #75300
pkg crypto, type Decapsulator interface, Decapsulate([]uint8) ([]uint8, error) #75300
pkg crypto, type Decapsulator interface, Encapsulator() Encapsulator #75300
pkg crypto, type Encapsulator interface { Bytes, Encapsulate } #75300
pkg crypto, type Encapsulator interface, Bytes() []uint8 #75300
pkg crypto, type Encapsulator interface, Encapsulate() ([]uint8, []uint8) #75300
pkg crypto/ecdh, type KeyExchanger interface { Curve, ECDH, PublicKey } #75300
pkg crypto/ecdh, type KeyExchanger interface, Curve() Curve #75300
pkg crypto/ecdh, type KeyExchanger interface, ECDH(*PublicKey) ([]uint8, error) #75300
pkg crypto/ecdh, type KeyExchanger interface, PublicKey() *PublicKey #75300
pkg crypto/ecdsa, type PrivateKey struct, D //deprecated #63963
pkg crypto/ecdsa, type PublicKey struct, X //deprecated #63963
pkg crypto/ecdsa, type PublicKey struct, Y //deprecated #63963
pkg crypto/fips140, func Enforced() bool #74630
pkg crypto/fips140, func Version() string #75301
pkg crypto/fips140, func WithoutEnforcement(func()) #74630
pkg crypto/hpke, func AES128GCM() AEAD #75300
pkg crypto/hpke, func AES256GCM() AEAD #75300
pkg crypto/hpke, func ChaCha20Poly1305() AEAD #75300
pkg crypto/hpke, func DHKEM(ecdh.Curve) KEM #75300
pkg crypto/hpke, func ExportOnly() AEAD #75300
pkg crypto/hpke, func HKDFSHA256() KDF #75300
pkg crypto/hpke, func HKDFSHA384() KDF #75300
pkg crypto/hpke, func HKDFSHA512() KDF #75300
pkg crypto/hpke, func MLKEM1024() KEM #75300
pkg crypto/hpke, func MLKEM1024P384() KEM #75300
pkg crypto/hpke, func MLKEM768() KEM #75300
pkg crypto/hpke, func MLKEM768P256() KEM #75300
pkg crypto/hpke, func MLKEM768X25519() KEM #75300
pkg crypto/hpke, func NewAEAD(uint16) (AEAD, error) #75300
pkg crypto/hpke, func NewDHKEMPrivateKey(ecdh.KeyExchanger) (PrivateKey, error) #75300
pkg crypto/hpke, func NewDHKEMPublicKey(*ecdh.PublicKey) (PublicKey, error) #75300
pkg crypto/hpke, func NewHybridPrivateKey(crypto.Decapsulator, ecdh.KeyExchanger) (PrivateKey, error) #75300
pkg crypto/hpke, func NewHybridPublicKey(crypto.Encapsulator, *ecdh.PublicKey) (PublicKey, error) #75300
pkg crypto/hpke, func NewKDF(uint16) (KDF, error) #75300
pkg crypto/hpke, func NewKEM(uint16) (KEM, error) #75300
pkg crypto/hpke, func NewMLKEMPrivateKey(crypto.Decapsulator) (PrivateKey, error) #75300
pkg crypto/hpke, func NewMLKEMPublicKey(crypto.Encapsulator) (PublicKey, error) #75300
pkg crypto/hpke, func NewRecipient([]uint8, PrivateKey, KDF, AEAD, []uint8) (*Recipient, error) #75300
pkg crypto/hpke, func NewSender(PublicKey, KDF, AEAD, []uint8) ([]uint8, *Sender, error) #75300
pkg crypto/hpke, func Open(PrivateKey, KDF, AEAD, []uint8, []uint8) ([]uint8, error) #75300
pkg crypto/hpke, func SHAKE128() KDF #75300
pkg crypto/hpke, func SHAKE256() KDF #75300
pkg crypto/hpke, func Seal(PublicKey, KDF, AEAD, []uint8, []uint8) ([]uint8, error) #75300
pkg crypto/hpke, method (*Recipient) Export(string, int) ([]uint8, error) #75300
pkg crypto/hpke, method (*Recipient) Open([]uint8, []uint8) ([]uint8, error) #75300
pkg crypto/hpke, method (*Sender) Export(string, int) ([]uint8, error) #75300
pkg crypto/hpke, method (*Sender) Seal([]uint8, []uint8) ([]uint8, error) #75300
pkg crypto/hpke, type AEAD interface, ID() uint16 #75300
pkg crypto/hpke, type AEAD interface, unexported methods #75300
pkg crypto/hpke, type KDF interface, ID() uint16 #75300
pkg crypto/hpke, type KDF interface, unexported methods #75300
pkg crypto/hpke, type KEM interface, DeriveKeyPair([]uint8) (PrivateKey, error) #75300
pkg crypto/hpke, type KEM interface, GenerateKey() (PrivateKey, error) #75300
pkg crypto/hpke, type KEM interface, ID() uint16 #75300
pkg crypto/hpke, type KEM interface, NewPrivateKey([]uint8) (PrivateKey, error) #75300
pkg crypto/hpke, type KEM interface, NewPublicKey([]uint8) (PublicKey, error) #75300
pkg crypto/hpke, type KEM interface, unexported methods #75300
pkg crypto/hpke, type PrivateKey interface, Bytes() ([]uint8, error) #75300
pkg crypto/hpke, type PrivateKey interface, KEM() KEM #75300
pkg crypto/hpke, type PrivateKey interface, PublicKey() PublicKey #75300
pkg crypto/hpke, type PrivateKey interface, unexported methods #75300
pkg crypto/hpke, type PublicKey interface, Bytes() []uint8 #75300
pkg crypto/hpke, type PublicKey interface, KEM() KEM #75300
pkg crypto/hpke, type PublicKey interface, unexported methods #75300
pkg crypto/hpke, type Recipient struct #75300
pkg crypto/hpke, type Sender struct #75300
pkg crypto/mlkem, method (*DecapsulationKey1024) Encapsulator() crypto.Encapsulator #75300
pkg crypto/mlkem, method (*DecapsulationKey768) Encapsulator() crypto.Encapsulator #75300
pkg crypto/mlkem/mlkemtest, func Encapsulate1024(*mlkem.EncapsulationKey1024, []uint8) ([]uint8, []uint8, error) #73627
pkg crypto/mlkem/mlkemtest, func Encapsulate768(*mlkem.EncapsulationKey768, []uint8) ([]uint8, []uint8, error) #73627
pkg crypto/rsa, func DecryptPKCS1v15 //deprecated #75302
pkg crypto/rsa, func DecryptPKCS1v15SessionKey //deprecated #75302
pkg crypto/rsa, func EncryptOAEPWithOptions(io.Reader, *PublicKey, []uint8, *OAEPOptions) ([]uint8, error) #65716
pkg crypto/rsa, func EncryptPKCS1v15 //deprecated #75302
pkg crypto/rsa, type PKCS1v15DecryptOptions //deprecated #75302
pkg crypto/tls, const QUICErrorEvent = 10 #75108
pkg crypto/tls, const QUICErrorEvent QUICEventKind #75108
pkg crypto/tls, const SecP256r1MLKEM768 = 4587 #71206
pkg crypto/tls, const SecP256r1MLKEM768 CurveID #71206
pkg crypto/tls, const SecP384r1MLKEM1024 = 4589 #71206
pkg crypto/tls, const SecP384r1MLKEM1024 CurveID #71206
pkg crypto/tls, type ClientHelloInfo struct, HelloRetryRequest bool #74425
pkg crypto/tls, type ConnectionState struct, HelloRetryRequest bool #74425
pkg crypto/tls, type QUICEvent struct, Err error #75108
pkg crypto/x509, func OIDFromASN1OID(asn1.ObjectIdentifier) (OID, error) #75325
pkg crypto/x509, method (ExtKeyUsage) OID() OID #75325
pkg crypto/x509, method (ExtKeyUsage) String() string #56866
pkg crypto/x509, method (KeyUsage) String() string #56866
pkg debug/elf, const R_LARCH_CALL36 = 110 #75562
pkg debug/elf, const R_LARCH_CALL36 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC32 = 13 #75562
pkg debug/elf, const R_LARCH_TLS_DESC32 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC64 = 14 #75562
pkg debug/elf, const R_LARCH_TLS_DESC64 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC64_HI12 = 118 #75562
pkg debug/elf, const R_LARCH_TLS_DESC64_HI12 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC64_LO20 = 117 #75562
pkg debug/elf, const R_LARCH_TLS_DESC64_LO20 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC64_PC_HI12 = 114 #75562
pkg debug/elf, const R_LARCH_TLS_DESC64_PC_HI12 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC64_PC_LO20 = 113 #75562
pkg debug/elf, const R_LARCH_TLS_DESC64_PC_LO20 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC_CALL = 120 #75562
pkg debug/elf, const R_LARCH_TLS_DESC_CALL R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC_HI20 = 115 #75562
pkg debug/elf, const R_LARCH_TLS_DESC_HI20 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC_LD = 119 #75562
pkg debug/elf, const R_LARCH_TLS_DESC_LD R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC_LO12 = 116 #75562
pkg debug/elf, const R_LARCH_TLS_DESC_LO12 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC_PCREL20_S2 = 126 #75562
pkg debug/elf, const R_LARCH_TLS_DESC_PCREL20_S2 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC_PC_HI20 = 111 #75562
pkg debug/elf, const R_LARCH_TLS_DESC_PC_HI20 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_DESC_PC_LO12 = 112 #75562
pkg debug/elf, const R_LARCH_TLS_DESC_PC_LO12 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_GD_PCREL20_S2 = 125 #75562
pkg debug/elf, const R_LARCH_TLS_GD_PCREL20_S2 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_LD_PCREL20_S2 = 124 #75562
pkg debug/elf, const R_LARCH_TLS_LD_PCREL20_S2 R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_LE_ADD_R = 122 #75562
pkg debug/elf, const R_LARCH_TLS_LE_ADD_R R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_LE_HI20_R = 121 #75562
pkg debug/elf, const R_LARCH_TLS_LE_HI20_R R_LARCH #75562
pkg debug/elf, const R_LARCH_TLS_LE_LO12_R = 123 #75562
pkg debug/elf, const R_LARCH_TLS_LE_LO12_R R_LARCH #75562
pkg errors, func AsType[$0 error](error) ($0, bool) #51945
pkg go/ast, func ParseDirective(token.Pos, string) (Directive, bool) #68021
pkg go/ast, method (*Directive) End() token.Pos #68021
pkg go/ast, method (*Directive) ParseArgs() ([]DirectiveArg, error) #68021
pkg go/ast, method (*Directive) Pos() token.Pos #68021
pkg go/ast, type BasicLit struct, ValueEnd token.Pos #76031
pkg go/ast, type Directive struct #68021
pkg go/ast, type Directive struct, Args string #68021
pkg go/ast, type Directive struct, ArgsPos token.Pos #68021
pkg go/ast, type Directive struct, Name string #68021
pkg go/ast, type Directive struct, Slash token.Pos #68021
pkg go/ast, type Directive struct, Tool string #68021
pkg go/ast, type DirectiveArg struct #68021
pkg go/ast, type DirectiveArg struct, Arg string #68021
pkg go/ast, type DirectiveArg struct, Pos token.Pos #68021
pkg go/token, method (*File) End() Pos #75849
pkg log/slog, func NewMultiHandler(...Handler) *MultiHandler #65954
pkg log/slog, method (*MultiHandler) Enabled(context.Context, Level) bool #65954
pkg log/slog, method (*MultiHandler) Handle(context.Context, Record) error #65954
pkg log/slog, method (*MultiHandler) WithAttrs([]Attr) Handler #65954
pkg log/slog, method (*MultiHandler) WithGroup(string) Handler #65954
pkg log/slog, type MultiHandler struct #65954
pkg net, method (*Dialer) DialIP(context.Context, string, netip.Addr, netip.Addr) (*IPConn, error) #49097
pkg net, method (*Dialer) DialTCP(context.Context, string, netip.AddrPort, netip.AddrPort) (*TCPConn, error) #49097
pkg net, method (*Dialer) DialUDP(context.Context, string, netip.AddrPort, netip.AddrPort) (*UDPConn, error) #49097
pkg net, method (*Dialer) DialUnix(context.Context, string, *UnixAddr, *UnixAddr) (*UnixConn, error) #49097
pkg net/http, method (*ClientConn) Available() int #75772
pkg net/http, method (*ClientConn) Close() error #75772
pkg net/http, method (*ClientConn) Err() error #75772
pkg net/http, method (*ClientConn) InFlight() int #75772
pkg net/http, method (*ClientConn) Release() #75772
pkg net/http, method (*ClientConn) Reserve() error #75772
pkg net/http, method (*ClientConn) RoundTrip(*Request) (*Response, error) #75772
pkg net/http, method (*ClientConn) SetStateHook(func(*ClientConn)) #75772
pkg net/http, method (*Transport) NewClientConn(context.Context, string, string) (*ClientConn, error) #75772
pkg net/http, type ClientConn struct #75772
pkg net/http, type HTTP2Config struct, StrictMaxConcurrentRequests bool #67813
pkg net/http/httputil, type ReverseProxy struct, Director //deprecated #73161
pkg net/netip, method (Prefix) Compare(Prefix) int #61642
pkg os, method (*Process) WithHandle(func(uintptr)) error #70352
pkg os, var ErrNoHandle error #70352
pkg reflect, method (Value) Fields() iter.Seq2[StructField, Value] #66631
pkg reflect, method (Value) Methods() iter.Seq2[Method, Value] #66631
pkg reflect, type Type interface, Fields() iter.Seq[StructField] #66631
pkg reflect, type Type interface, Ins() iter.Seq[Type] #66631
pkg reflect, type Type interface, Methods() iter.Seq[Method] #66631
pkg reflect, type Type interface, Outs() iter.Seq[Type] #66631
pkg testing, method (*B) ArtifactDir() string #71287
pkg testing, method (*F) ArtifactDir() string #71287
pkg testing, method (*T) ArtifactDir() string #71287
pkg testing, type TB interface, ArtifactDir() string #71287
pkg testing/cryptotest, func SetGlobalRandom(*testing.T, uint64) #70942

View File

@@ -1,2 +1,2 @@
branch: dev.simd
branch: release-branch.go1.22
parent-branch: master

View File

@@ -1,75 +0,0 @@
# Release Notes
The `initial` and `next` subdirectories of this directory are for release notes.
## For developers
Release notes should be added to `next` by editing existing files or creating
new files. **Do not add RELNOTE=yes comments in CLs.** Instead, add a file to
the CL (or ask the author to do so).
At the end of the development cycle, the files will be merged by being
concatenated in sorted order by pathname. Files in the directory matching the
glob "*stdlib/*minor" are treated specially. They should be in subdirectories
corresponding to standard library package paths, and headings for those package
paths will be generated automatically.
Files in this repo's `api/next` directory must have corresponding files in
`doc/next/*stdlib/*minor`.
The files should be in the subdirectory for the package with the new
API, and should be named after the issue number of the API proposal.
For example, if the directory `6-stdlib/99-minor` is present,
then an `api/next` file with the line
pkg net/http, function F #12345
should have a corresponding file named `doc/next/6-stdlib/99-minor/net/http/12345.md`.
At a minimum, that file should contain either a full sentence or a TODO,
ideally referring to a person with the responsibility to complete the note.
If your CL addresses an accepted proposal, mention the proposal issue number in
your release note in the form `/issue/NUMBER`. A link to the issue in the text
will have this form (see below). If you don't want to mention the issue in the
text, add it as a comment:
```
<!-- go.dev/issue/12345 -->
```
If an accepted proposal is mentioned in a CL but not in the release notes, it will be
flagged as a TODO by the automated tooling. That is true even for proposals that add API.
Use the following forms in your markdown:
[http.Request] # symbol documentation; auto-linked as in Go doc strings
[Request] # short form, for symbols in the package being documented
[net/http] # package link
[#12345](/issue/12345) # GitHub issues
[CL 6789](/cl/6789) # Gerrit changelists
To preview `next` content in merged form using a local instance of the website, run:
```
go run golang.org/x/website/cmd/golangorg@latest -goroot=..
```
Then open http://localhost:6060/doc/next. Refresh the page to see your latest edits.
## For the release team
The `relnote` tool, at `golang.org/x/build/cmd/relnote`, operates on the files
in `doc/next`.
As a release cycle nears completion, run `relnote todo` to get a list of
unfinished release note work.
To prepare the release notes for a release, run `relnote generate`.
That will merge the `.md` files in `next` into a single file.
Atomically (as close to it as possible) add that file to `_content/doc` directory
of the website repository and remove the `doc/next` directory in this repository.
To begin the next release development cycle, populate the contents of `next`
with those of `initial`. From the repo root:
> cd doc
> cp -R initial/ next
Then edit `next/1-intro.md` to refer to the next version.

View File

@@ -1039,12 +1039,6 @@ The value of <code>GOMIPS64</code> environment variable (<code>hardfloat</code>
<code>GOMIPS64_hardfloat</code> or <code>GOMIPS64_softfloat</code>.
</p>
<h3 id="riscv64">RISCV64</h3>
<p>
Reference: <a href="/pkg/cmd/internal/obj/riscv">Go RISCV64 Assembly Instructions Reference Manual</a>
</p>
<h3 id="unsupported_opcodes">Unsupported opcodes</h3>
<p>

6864
doc/go1.17_spec.html Normal file

File diff suppressed because it is too large Load Diff

1007
doc/go1.22.html Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -82,7 +82,7 @@ while still insisting that races are errors and that tools can diagnose and repo
<p>
The following formal definition of Go's memory model closely follows
the approach presented by Hans-J. Boehm and Sarita V. Adve in
<a href="https://dl.acm.org/doi/10.1145/1375581.1375591">Foundations of the C++ Concurrency Memory Model</a>”,
<a href="https://www.hpl.hp.com/techreports/2008/HPL-2008-56.pdf">Foundations of the C++ Concurrency Memory Model</a>”,
published in PLDI 2008.
The definition of data-race-free programs and the guarantee of sequential consistency
for race-free programs are equivalent to the ones in that work.
@@ -98,12 +98,12 @@ which in turn are made up of memory operations.
A <i>memory operation</i> is modeled by four details:
</p>
<ul>
<li>its kind, indicating whether it is an ordinary data read, an ordinary data write,
or a <i>synchronizing operation</i> such as an atomic data access,
a mutex operation, or a channel operation,</li>
<li>its location in the program,</li>
<li>the memory location or variable being accessed, and</li>
<li>the values read or written by the operation.</li>
<li>its kind, indicating whether it is an ordinary data read, an ordinary data write,
or a <i>synchronizing operation</i> such as an atomic data access,
a mutex operation, or a channel operation,
<li>its location in the program,
<li>the memory location or variable being accessed, and
<li>the values read or written by the operation.
</ul>
<p>
Some memory operations are <i>read-like</i>, including read, atomic read, mutex lock, and channel receive.
@@ -162,8 +162,8 @@ where visible means that both of the following hold:
</p>
<ol>
<li><i>w</i> happens before <i>r</i>.</li>
<li><i>w</i> does not happen before any other write <i>w'</i> (to <i>x</i>) that happens before <i>r</i>.</li>
<li><i>w</i> happens before <i>r</i>.
<li><i>w</i> does not happen before any other write <i>w'</i> (to <i>x</i>) that happens before <i>r</i>.
</ol>
<p>
@@ -231,7 +231,7 @@ do exactly this.
<p>
A read of an array, struct, or complex number
may be implemented as a read of each individual sub-value
may by implemented as a read of each individual sub-value
(array element, struct field, or real/imaginary component),
in any order.
Similarly, a write of an array, struct, or complex number
@@ -453,7 +453,7 @@ crash, or do something else.)
</p>
<p class="rule">
The <i>k</i>th receive from a channel with capacity <i>C</i> is synchronized before the completion of the <i>k</i>+<i>C</i>th send on that channel.
The <i>k</i>th receive on a channel with capacity <i>C</i> is synchronized before the completion of the <i>k</i>+<i>C</i>th send from that channel completes.
</p>
<p>

File diff suppressed because it is too large Load Diff

View File

@@ -34,7 +34,6 @@ For example, if a Go program is running in an environment that contains
then that Go program will disable the use of HTTP/2 by default in both
the HTTP client and the HTTP server.
Unrecognized settings in the `GODEBUG` environment variable are ignored.
It is also possible to set the default `GODEBUG` for a given program
(discussed below).
@@ -89,40 +88,14 @@ Because this method of setting GODEBUG defaults was introduced only in Go 1.21,
programs listing versions of Go earlier than Go 1.20 are configured to match Go 1.20,
not the older version.
To override these defaults, starting in Go 1.23, the work module's `go.mod`
or the workspace's `go.work` can list one or more `godebug` lines:
godebug (
default=go1.21
panicnil=1
asynctimerchan=0
)
The special key `default` indicates a Go version to take unspecified
settings from. This allows setting the GODEBUG defaults separately
from the Go language version in the module.
In this example, the program is asking for Go 1.21 semantics and
then asking for the old pre-Go 1.21 `panic(nil)` behavior and the
new Go 1.23 `asynctimerchan=0` behavior.
Only the work module's `go.mod` is consulted for `godebug` directives.
Any directives in required dependency modules are ignored.
It is an error to list a `godebug` with an unrecognized setting.
(Toolchains older than Go 1.23 reject all `godebug` lines, since they do not
understand `godebug` at all.) When a workspace is in use, `godebug`
directives in `go.mod` files are ignored, and `go.work` will be consulted
for `godebug` directives instead.
The defaults from the `go` and `godebug` lines apply to all main
packages that are built. For more fine-grained control,
starting in Go 1.21, a main package's source files
To override these defaults, a main package's source files
can include one or more `//go:debug` directives at the top of the file
(preceding the `package` statement).
The `godebug` lines in the previous example would be written:
Continuing the `panicnil` example, if the module or workspace is updated
to say `go` `1.21`, the program can opt back into the old `panic(nil)`
behavior by including this directive:
//go:debug default=go1.21
//go:debug panicnil=1
//go:debug asynctimerchan=0
Starting in Go 1.21, the Go toolchain treats a `//go:debug` directive
with an unrecognized GODEBUG setting as an invalid program.
@@ -153,215 +126,6 @@ for example,
see the [runtime documentation](/pkg/runtime#hdr-Environment_Variables)
and the [go command documentation](/cmd/go#hdr-Build_and_test_caching).
### Go 1.26
Go 1.26 added a new `httpcookiemaxnum` setting that controls the maximum number
of cookies that net/http will accept when parsing HTTP headers. If the number of
cookie in a header exceeds the number set in `httpcookiemaxnum`, cookie parsing
will fail early. The default value is `httpcookiemaxnum=3000`. Setting
`httpcookiemaxnum=0` will allow the cookie parsing to accept an indefinite
number of cookies. To avoid denial of service attacks, this setting and default
was backported to Go 1.25.2 and Go 1.24.8.
Go 1.26 added a new `urlstrictcolons` setting that controls whether `net/url.Parse`
allows malformed hostnames containing colons outside of a bracketed IPv6 address.
The default `urlstrictcolons=1` rejects URLs such as `http://localhost:1:2` or `http://::1/`.
Colons are permitted as part of a bracketed IPv6 address, such as `http://[::1]/`.
Go 1.26 enabled two additional post-quantum key exchange mechanisms:
SecP256r1MLKEM768 and SecP384r1MLKEM1024. The default can be reverted using the
[`tlssecpmlkem` setting](/pkg/crypto/tls/#Config.CurvePreferences).
Go 1.26 added a new `tracebacklabels` setting that controls the inclusion of
goroutine labels set through the the `runtime/pprof` package. Setting `tracebacklabels=1`
includes these key/value pairs in the goroutine status header of runtime
tracebacks and debug=2 runtime/pprof stack dumps. This format may change in the future.
(see go.dev/issue/76349)
Go 1.26 added a new `cryptocustomrand` setting that controls whether most crypto/...
APIs ignore the random `io.Reader` parameter. For Go 1.26, it defaults
to `cryptocustomrand=0`, ignoring the random parameters. Using `cryptocustomrand=1`
reverts to the pre-Go 1.26 behavior.
### Go 1.25
Go 1.25 added a new `decoratemappings` setting that controls whether the Go
runtime annotates OS anonymous memory mappings with context about their
purpose. These annotations appear in /proc/self/maps and /proc/self/smaps as
"[anon: Go: ...]". This setting is only used on Linux. For Go 1.25, it defaults
to `decoratemappings=1`, enabling annotations. Using `decoratemappings=0`
reverts to the pre-Go 1.25 behavior. This setting is fixed at program startup
time, and can't be modified by changing the `GODEBUG` environment variable
after the program starts.
Go 1.25 added a new `embedfollowsymlinks` setting that controls whether the
Go command will follow symlinks to regular files embedding files.
The default value `embedfollowsymlinks=0` does not allow following
symlinks. `embedfollowsymlinks=1` will allow following symlinks.
Go 1.25 added a new `containermaxprocs` setting that controls whether the Go
runtime will consider cgroup CPU limits when setting the default GOMAXPROCS.
The default value `containermaxprocs=1` will use cgroup limits in addition to
the total logical CPU count and CPU affinity. `containermaxprocs=0` will
disable consideration of cgroup limits. This setting only affects Linux.
Go 1.25 added a new `updatemaxprocs` setting that controls whether the Go
runtime will periodically update GOMAXPROCS for new CPU affinity or cgroup
limits. The default value `updatemaxprocs=1` will enable periodic updates.
`updatemaxprocs=0` will disable periodic updates.
Go 1.25 disabled SHA-1 signature algorithms in TLS 1.2 according to RFC 9155.
The default can be reverted using the `tlssha1=1` setting.
Go 1.25 switched to SHA-256 to fill in missing SubjectKeyId in
crypto/x509.CreateCertificate. The setting `x509sha256skid=0` reverts to SHA-1.
Go 1.25 corrected the semantics of contention reports for runtime-internal locks,
and so removed the [`runtimecontentionstacks` setting](/pkg/runtime#hdr-Environment_Variables).
Go 1.25 (starting with Go 1.25 RC 2) disabled build information stamping when
multiple VCS are detected due to concerns around VCS injection attacks. This
behavior and setting was backported to Go 1.24.5 and Go 1.23.11. This behavior
can be renabled with the setting `allowmultiplevcs=1`.
### Go 1.24
Go 1.24 added a new `fips140` setting that controls whether the Go
Cryptographic Module operates in FIPS 140-3 mode.
The possible values are:
- "off": no special support for FIPS 140-3 mode. This is the default.
- "on": the Go Cryptographic Module operates in FIPS 140-3 mode.
- "only": like "on", but cryptographic algorithms not approved by
FIPS 140-3 return an error or panic.
For more information, see [FIPS 140-3 Compliance](/doc/security/fips140).
This setting is fixed at program startup time, and can't be modified
by changing the `GODEBUG` environment variable after the program starts.
Go 1.24 changed the global [`math/rand.Seed`](/pkg/math/rand/#Seed) to be a
no-op. This behavior is controlled by the `randseednop` setting.
For Go 1.24 it defaults to `randseednop=1`.
Using `randseednop=0` reverts to the pre-Go 1.24 behavior.
Go 1.24 added new values for the `multipathtcp` setting.
The possible values for `multipathtcp` are now:
- "0": disable MPTCP on dialers and listeners by default
- "1": enable MPTCP on dialers and listeners by default
- "2": enable MPTCP on listeners only by default
- "3": enable MPTCP on dialers only by default
For Go 1.24, it now defaults to multipathtcp="2", thus
enabled by default on listeners. Using multipathtcp="0" reverts to the
pre-Go 1.24 behavior.
Go 1.24 changed the behavior of `go test -json` to emit build errors as JSON
instead of text.
These new JSON events are distinguished by new `Action` values,
but can still cause problems with CI systems that aren't robust to these events.
This behavior can be controlled with the `gotestjsonbuildtext` setting.
Using `gotestjsonbuildtext=1` restores the 1.23 behavior.
This setting will be removed in a future release, Go 1.28 at the earliest.
Go 1.24 changed [`crypto/rsa`](/pkg/crypto/rsa) to require RSA keys to be at
least 1024 bits. This behavior can be controlled with the `rsa1024min` setting.
Using `rsa1024min=0` restores the Go 1.23 behavior.
Go 1.24 introduced a mechanism for enabling platform specific Data Independent
Timing (DIT) modes in the [`crypto/subtle`](/pkg/crypto/subtle) package. This
mode can be enabled for an entire program with the `dataindependenttiming` setting.
For Go 1.24 it defaults to `dataindependenttiming=0`. There is no change in default
behavior from Go 1.23 when `dataindependenttiming` is unset.
Using `dataindependenttiming=1` enables the DIT mode for the entire Go program.
When enabled, DIT will be enabled when calling into C from Go. When enabled,
calling into Go code from C will enable DIT, and disable it before returning to
C if it was not enabled when Go code was entered.
This currently only affects arm64 programs. For all other platforms it is a no-op.
Go 1.24 removed the `x509sha1` setting. `crypto/x509` no longer supports verifying
signatures on certificates that use SHA-1 based signature algorithms.
Go 1.24 changes the default value of the [`x509usepolicies`
setting.](/pkg/crypto/x509/#CreateCertificate) from `0` to `1`. When marshalling
certificates, policies are now taken from the
[`Certificate.Policies`](/pkg/crypto/x509/#Certificate.Policies) field rather
than the
[`Certificate.PolicyIdentifiers`](/pkg/crypto/x509/#Certificate.PolicyIdentifiers)
field by default.
Go 1.24 enabled the post-quantum key exchange mechanism
X25519MLKEM768 by default. The default can be reverted using the
[`tlsmlkem` setting](/pkg/crypto/tls/#Config.CurvePreferences).
This can be useful when dealing with buggy TLS servers that do not handle large records correctly,
causing a timeout during the handshake (see [TLS post-quantum TL;DR fail](https://tldr.fail/)).
Go 1.24 also removed X25519Kyber768Draft00 and the Go 1.23 `tlskyber` setting.
Go 1.24 made [`ParsePKCS1PrivateKey`](/pkg/crypto/x509/#ParsePKCS1PrivateKey)
use and validate the CRT parameters in the encoded private key. This behavior
can be controlled with the `x509rsacrt` setting. Using `x509rsacrt=0` restores
the Go 1.23 behavior.
### Go 1.23
Go 1.23 changed the channels created by package time to be unbuffered
(synchronous), which makes correct use of the [`Timer.Stop`](/pkg/time/#Timer.Stop)
and [`Timer.Reset`](/pkg/time/#Timer.Reset) method results much easier.
The [`asynctimerchan` setting](/pkg/time/#NewTimer) disables this change.
There are no runtime metrics for this change,
This setting will be removed in Go 1.27.
Go 1.23 changed the mode bits reported by [`os.Lstat`](/pkg/os#Lstat) and [`os.Stat`](/pkg/os#Stat)
for reparse points, which can be controlled with the `winsymlink` setting.
As of Go 1.23 (`winsymlink=1`), mount points no longer have [`os.ModeSymlink`](/pkg/os#ModeSymlink)
set, and reparse points that are not symlinks, Unix sockets, or dedup files now
always have [`os.ModeIrregular`](/pkg/os#ModeIrregular) set. As a result of these changes,
[`filepath.EvalSymlinks`](/pkg/path/filepath#EvalSymlinks) no longer evaluates
mount points, which was a source of many inconsistencies and bugs.
At previous versions (`winsymlink=0`), mount points are treated as symlinks,
and other reparse points with non-default [`os.ModeType`](/pkg/os#ModeType) bits
(such as [`os.ModeDir`](/pkg/os#ModeDir)) do not have the `ModeIrregular` bit set.
Go 1.23 changed [`os.Readlink`](/pkg/os#Readlink) and [`filepath.EvalSymlinks`](/pkg/path/filepath#EvalSymlinks)
to avoid trying to normalize volumes to drive letters, which was not always even possible.
This behavior is controlled by the `winreadlinkvolume` setting.
For Go 1.23, it defaults to `winreadlinkvolume=1`.
Previous versions default to `winreadlinkvolume=0`.
Go 1.23 enabled the experimental post-quantum key exchange mechanism
X25519Kyber768Draft00 by default. The default can be reverted using the
[`tlskyber` setting](/pkg/crypto/tls/#Config.CurvePreferences).
This can be useful when dealing with buggy TLS servers that do not handle large records correctly,
causing a timeout during the handshake (see [TLS post-quantum TL;DR fail](https://tldr.fail/)).
Go 1.23 changed the behavior of
[crypto/x509.ParseCertificate](/pkg/crypto/x509/#ParseCertificate) to reject
serial numbers that are negative. This change can be reverted with
the [`x509negativeserial` setting](/pkg/crypto/x509/#ParseCertificate).
Go 1.23 re-enabled support in html/template for ECMAScript 6 template literals by default.
The [`jstmpllitinterp` setting](/pkg/html/template#hdr-Security_Model) no longer has
any effect.
Go 1.23 changed the default TLS cipher suites used by clients and servers when
not explicitly configured, removing 3DES cipher suites. The default can be reverted
using the [`tls3des` setting](/pkg/crypto/tls/#Config.CipherSuites).
This setting will be removed in Go 1.27.
Go 1.23 changed the behavior of [`tls.X509KeyPair`](/pkg/crypto/tls#X509KeyPair)
and [`tls.LoadX509KeyPair`](/pkg/crypto/tls#LoadX509KeyPair) to populate the
Leaf field of the returned [`tls.Certificate`](/pkg/crypto/tls#Certificate).
This behavior is controlled by the `x509keypairleaf` setting. For Go 1.23, it
defaults to `x509keypairleaf=1`. Previous versions default to
`x509keypairleaf=0`.
This setting will be removed in Go 1.27.
Go 1.23 changed
[`net/http.ServeContent`](/pkg/net/http#ServeContent),
[`net/http.ServeFile`](/pkg/net/http#ServeFile), and
[`net/http.ServeFS`](/pkg/net/http#ServeFS) to
remove Cache-Control, Content-Encoding, Etag, and Last-Modified headers
when serving an error. This behavior is controlled by
the [`httpservecontentkeepheaders` setting](/pkg/net/http#ServeContent).
Using `httpservecontentkeepheaders=1` restores the pre-Go 1.23 behavior.
### Go 1.22
Go 1.22 adds a configurable limit to control the maximum acceptable RSA key size
@@ -384,32 +148,29 @@ for the explicit representation of [type aliases](/ref/spec#Type_declarations).
Whether the type checker produces `Alias` types or not is controlled by the
[`gotypesalias` setting](/pkg/go/types#Alias).
For Go 1.22 it defaults to `gotypesalias=0`.
For Go 1.23, `gotypesalias=1` will become the default.
This setting will be removed in Go 1.27.
For Go 1.23, `gotypealias=1` will become the default.
This setting will be removed in a future release, Go 1.24 at the earliest.
Go 1.22 changed the default minimum TLS version supported by both servers
and clients to TLS 1.2. The default can be reverted to TLS 1.0 using the
[`tls10server` setting](/pkg/crypto/tls/#Config).
This setting will be removed in Go 1.27.
Go 1.22 changed the default TLS cipher suites used by clients and servers when
not explicitly configured, removing the cipher suites which used RSA based key
exchange. The default can be reverted using the [`tlsrsakex` setting](/pkg/crypto/tls/#Config).
This setting will be removed in Go 1.27.
exchange. The default can be revert using the [`tlsrsakex` setting](/pkg/crypto/tls/#Config).
Go 1.22 disabled
[`ConnectionState.ExportKeyingMaterial`](/pkg/crypto/tls/#ConnectionState.ExportKeyingMaterial)
when the connection supports neither TLS 1.3 nor Extended Master Secret
(implemented in Go 1.21). It can be reenabled with the [`tlsunsafeekm`
setting](/pkg/crypto/tls/#ConnectionState.ExportKeyingMaterial).
This setting will be removed in Go 1.27.
Go 1.22 changed how the runtime interacts with transparent huge pages on Linux.
In particular, a common default Linux kernel configuration can result in
significant memory overheads, and Go 1.22 no longer works around this default.
To work around this issue without adjusting kernel settings, transparent huge
pages can be disabled for Go memory with the
[`disablethp` setting](/pkg/runtime#hdr-Environment_Variables).
[`disablethp` setting](/pkg/runtime#hdr-Environment_Variable).
This behavior was backported to Go 1.21.1, but the setting is only available
starting with Go 1.21.6.
This setting may be removed in a future release, and users impacted by this issue
@@ -421,7 +182,7 @@ Go 1.22 added contention on runtime-internal locks to the [`mutex`
profile](/pkg/runtime/pprof#Profile). Contention on these locks is always
reported at `runtime._LostContendedRuntimeLock`. Complete stack traces of
runtime locks can be enabled with the [`runtimecontentionstacks`
setting](/pkg/runtime#hdr-Environment_Variables). These stack traces have
setting](/pkg/runtime#hdr-Environment_Variable). These stack traces have
non-standard semantics, see setting documentation for details.
Go 1.22 added a new [`crypto/x509.Certificate`](/pkg/crypto/x509/#Certificate)
@@ -430,7 +191,7 @@ certificate policy OIDs with components larger than 31 bits. By default this
field is only used during parsing, when it is populated with policy OIDs, but
not used during marshaling. It can be used to marshal these larger OIDs, instead
of the existing PolicyIdentifiers field, by using the
[`x509usepolicies` setting](/pkg/crypto/x509/#CreateCertificate).
[`x509usepolicies` setting.](/pkg/crypto/x509/#CreateCertificate).
### Go 1.21
@@ -487,18 +248,11 @@ Go 1.19 made it an error for path lookups to resolve to binaries in the current
controlled by the [`execerrdot` setting](/pkg/os/exec#hdr-Executables_in_the_current_directory).
There is no plan to remove this setting.
Go 1.19 started sending EDNS0 additional headers on DNS requests.
This can reportedly break the DNS server provided on some routers,
such as CenturyLink Zyxel C3000Z.
This can be changed by the [`netedns0` setting](/pkg/net#hdr-Name_Resolution).
This setting is available in Go 1.21.12, Go 1.22.5, Go 1.23, and later.
There is no plan to remove this setting.
### Go 1.18
Go 1.18 removed support for SHA1 in most X.509 certificates,
controlled by the [`x509sha1` setting](/pkg/crypto/x509#InsecureAlgorithmError).
This setting was removed in Go 1.24.
This setting will be removed in a future release, Go 1.22 at the earliest.
### Go 1.10

View File

@@ -1,8 +0,0 @@
<style>
main ul li { margin: 0.5em 0; }
</style>
## DRAFT RELEASE NOTES — Introduction to Go 1.N {#introduction}
**Go 1.N is not yet released. These are work-in-progress release notes.
Go 1.N is expected to be released in {Month} {Year}.**

View File

@@ -1,3 +0,0 @@
## Changes to the language {#language}

View File

@@ -1,6 +0,0 @@
## Tools {#tools}
### Go command {#go-command}
### Cgo {#cgo}

View File

@@ -1 +0,0 @@
## Runtime {#runtime}

View File

@@ -1,7 +0,0 @@
## Compiler {#compiler}
## Assembler {#assembler}
## Linker {#linker}

View File

@@ -1,2 +0,0 @@
## Standard library {#library}

View File

@@ -1 +0,0 @@
### Minor changes to the library {#minor_library_changes}

View File

@@ -1 +0,0 @@
API changes and other small changes to the standard library go here.

View File

@@ -1,2 +0,0 @@
## Ports {#ports}

View File

@@ -1,46 +0,0 @@
# Copyright 2024 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# Rules for building and testing new FIPS snapshots.
# For example:
#
# make v1.2.3.zip
# make v1.2.3.test
#
# and then if changes are needed, check them into master
# and run 'make v1.2.3.rm' and repeat.
#
# Note that once published a snapshot zip file should never
# be modified. We record the sha256 hashes of the zip files
# in fips140.sum, and the cmd/go/internal/fips140 test checks
# that the zips match.
#
# When the zip file is finalized, run 'make updatesum' to update
# fips140.sum.
default:
@echo nothing to make
# make v1.2.3.zip builds a v1.2.3.zip file
# from the current origin/master.
# copy and edit the 'go run' command by hand to use a different branch.
v%.zip:
git fetch origin master
go run ../../src/cmd/go/internal/fips140/mkzip.go v$*
# normally mkzip refuses to overwrite an existing zip file.
# make v1.2.3.rm removes the zip file and unpacked
# copy from the module cache.
v%.rm:
rm -f v$*.zip
chmod -R u+w $$(go env GOMODCACHE)/golang.org/fips140@v$* 2>/dev/null || true
rm -rf $$(go env GOMODCACHE)/golang.org/fips140@v$*
# make v1.2.3.test runs the crypto tests using that snapshot.
v%.test:
GOFIPS140=v$* go test -short crypto...
# make updatesum updates the fips140.sum file.
updatesum:
go test cmd/go/internal/fips140 -update

View File

@@ -1,9 +0,0 @@
This directory holds snapshots of the crypto/internal/fips140 tree
that are being validated and certified for FIPS-140 use.
The file x.txt (for example, inprocess.txt, certified.txt)
defines the meaning of the FIPS version alias x, listing
the exact version to use.
The zip files are created by cmd/go/internal/fips140/mkzip.go.
The fips140.sum file lists checksums for the zip files.
See the Makefile for recipes.

View File

@@ -1,13 +0,0 @@
# SHA256 checksums of snapshot zip files in this directory.
# These checksums are included in the FIPS security policy
# (validation instructions sent to the lab) and MUST NOT CHANGE.
# That is, the zip files themselves must not change.
#
# It is okay to add new zip files to the list, and it is okay to
# remove zip files from the list when they are removed from
# this directory. To update this file:
#
# go test cmd/go/internal/fips140 -update
#
v1.0.0-c2097c7c.zip daf3614e0406f67ae6323c902db3f953a1effb199142362a039e7526dfb9368b
v1.1.0-rc1.zip ea94f8c3885294c9efe1bd8f9b6e86daeb25b6aff2aeb20707cd9a5101f6f54e

View File

@@ -1 +0,0 @@
v1.0.0-c2097c7c

Binary file not shown.

View File

@@ -1 +0,0 @@
v1.0.0-c2097c7c

Binary file not shown.

View File

@@ -1,64 +0,0 @@
# Copyright 2025 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# Mercurial extension to add a 'goreposum' command that
# computes a hash of a remote repo's tag state.
# Tag definitions can come from the .hgtags file stored in
# any head of any branch, and the server protocol does not
# expose the tags directly. However, the protocol does expose
# the hashes of all the branch heads, so we can use a hash of
# all those branch names and heads as a conservative snapshot
# of the entire remote repo state, and use that as the tag sum.
# Any change on the server then invalidates the tag sum,
# even if it didn't have anything to do with tags, but at least
# we will avoid re-cloning a server when there have been no
# changes at all.
#
# Separately, this extension also adds a 'golookup' command that
# returns the hash of a specific reference, like 'default' or a tag.
# And golookup of a hash confirms that it still exists on the server.
# We can use that to revalidate that specific versions still exist and
# have the same meaning they did the last time we checked.
#
# Usage:
#
# hg --config "extensions.goreposum=$GOROOT/lib/hg/goreposum.py" goreposum REPOURL
import base64, hashlib, sys
from mercurial import registrar, ui, hg, node
from mercurial.i18n import _
cmdtable = {}
command = registrar.command(cmdtable)
@command(b'goreposum', [], _('url'), norepo=True)
def goreposum(ui, url):
"""
goreposum computes a checksum of all the named state in the remote repo.
It hashes together all the branch names and hashes
and then all the bookmark names and hashes.
Tags are stored in .hgtags files in any of the branches,
so the branch metadata includes the tags as well.
"""
h = hashlib.sha256()
peer = hg.peer(ui, {}, url)
for name, revs in peer.branchmap().items():
h.update(name)
for r in revs:
h.update(b' ')
h.update(r)
h.update(b'\n')
if (b'bookmarks' in peer.listkeys(b'namespaces')):
for name, rev in peer.listkeys(b'bookmarks').items():
h.update(name)
h.update(b'=')
h.update(rev)
h.update(b'\n')
print('r1:'+base64.standard_b64encode(h.digest()).decode('utf-8'))
@command(b'golookup', [], _('url rev'), norepo=True)
def golookup(ui, url, rev):
"""
golookup looks up a single identifier in the repo,
printing its hash.
"""
print(node.hex(hg.peer(ui, {}, url).lookup(rev)).decode('utf-8'))

View File

@@ -31,7 +31,7 @@ import (
)
func usage() {
fmt.Fprintf(os.Stderr, "usage: go run mkzip.go zoneinfo.zip\n")
fmt.Fprintf(os.Stderr, "usage: go run mkzip.go ../../zoneinfo.zip\n")
os.Exit(2)
}

View File

@@ -24,8 +24,8 @@
# in the CL match the update.bash in the CL.
# Versions to use.
CODE=2025c
DATA=2025c
CODE=2023c
DATA=2023c
set -e
@@ -40,12 +40,7 @@ curl -sS -L -O https://www.iana.org/time-zones/repository/releases/tzdata$DATA.t
tar xzf tzcode$CODE.tar.gz
tar xzf tzdata$DATA.tar.gz
# The PACKRATLIST and PACKRATDATA options are copied from Ubuntu:
# https://git.launchpad.net/ubuntu/+source/tzdata/tree/debian/rules?h=debian/sid
#
# You can see the description of these make variables in the tzdata Makefile:
# https://github.com/eggert/tz/blob/main/Makefile
if ! make CFLAGS=-DSTD_INSPIRED AWK=awk TZDIR=zoneinfo PACKRATDATA=backzone PACKRATLIST=zone.tab posix_only >make.out 2>&1; then
if ! make CFLAGS=-DSTD_INSPIRED AWK=awk TZDIR=zoneinfo posix_only >make.out 2>&1; then
cat make.out
exit 2
fi

Binary file not shown.

View File

@@ -1,23 +0,0 @@
#!/usr/bin/env bash
# Copyright 2023 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
case "$GOWASIRUNTIME" in
"wasmedge")
exec wasmedge --dir=/ --env PWD="$PWD" --env PATH="$PATH" ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}"
;;
"wasmer")
exec wasmer run --dir=/ --env PWD="$PWD" --env PATH="$PATH" ${GOWASIRUNTIMEARGS:-} "$1" -- "${@:2}"
;;
"wazero")
exec wazero run -mount /:/ -env-inherit -cachedir "${TMPDIR:-/tmp}"/wazero ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}"
;;
"wasmtime" | "")
exec wasmtime run --dir=/ --env PWD="$PWD" --env PATH="$PATH" -W max-wasm-stack=8388608 ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}"
;;
*)
echo "Unknown Go WASI runtime specified: $GOWASIRUNTIME"
exit 1
;;
esac

View File

@@ -1,22 +1,19 @@
#!/bin/sh
# This script configures clang to target the iOS simulator. If you'd like to
# build for real iOS devices, change SDK to "iphoneos" and PLATFORM to "ios".
# This uses the latest available iOS SDK, which is recommended. To select a
# specific SDK, run 'xcodebuild -showsdks' to see the available SDKs and replace
# iphonesimulator with one of them.
SDK=iphonesimulator
PLATFORM=ios-simulator
# This uses the latest available iOS SDK, which is recommended.
# To select a specific SDK, run 'xcodebuild -showsdks'
# to see the available SDKs and replace iphoneos with one of them.
if [ "$GOARCH" == "arm64" ]; then
SDK=iphoneos
PLATFORM=ios
CLANGARCH="arm64"
else
SDK=iphonesimulator
PLATFORM=ios-simulator
CLANGARCH="x86_64"
fi
SDK_PATH=`xcrun --sdk $SDK --show-sdk-path`
export IPHONEOS_DEPLOYMENT_TARGET=5.1
# cmd/cgo doesn't support llvm-gcc-4.2, so we have to use clang.
CLANG=`xcrun --sdk $SDK --find clang`

View File

@@ -1,21 +1,44 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This program can be used as go_ios_$GOARCH_exec by the Go tool. It executes
// binaries on the iOS Simulator using the XCode toolchain.
// This program can be used as go_ios_$GOARCH_exec by the Go tool.
// It executes binaries on an iOS device using the XCode toolchain
// and the ios-deploy program: https://github.com/phonegap/ios-deploy
//
// This script supports an extra flag, -lldb, that pauses execution
// just before the main program begins and allows the user to control
// the remote lldb session. This flag is appended to the end of the
// script's arguments and is not passed through to the underlying
// binary.
//
// This script requires that three environment variables be set:
//
// GOIOS_DEV_ID: The codesigning developer id or certificate identifier
// GOIOS_APP_ID: The provisioning app id prefix. Must support wildcard app ids.
// GOIOS_TEAM_ID: The team id that owns the app id prefix.
//
// $GOROOT/misc/ios contains a script, detect.go, that attempts to autodetect these.
package main
import (
"bytes"
"encoding/xml"
"errors"
"fmt"
"go/build"
"io"
"log"
"net"
"os"
"os/exec"
"os/signal"
"path/filepath"
"runtime"
"strconv"
"strings"
"syscall"
"time"
)
const debug = false
@@ -87,8 +110,18 @@ func runMain() (int, error) {
return 1, err
}
err = runOnSimulator(appdir)
if goarch := os.Getenv("GOARCH"); goarch == "arm64" {
err = runOnDevice(appdir)
} else {
err = runOnSimulator(appdir)
}
if err != nil {
// If the lldb driver completed with an exit code, use that.
if err, ok := err.(*exec.ExitError); ok {
if ws, ok := err.Sys().(interface{ ExitStatus() int }); ok {
return ws.ExitStatus(), nil
}
}
return 1, err
}
return 0, nil
@@ -102,6 +135,61 @@ func runOnSimulator(appdir string) error {
return runSimulator(appdir, bundleID, os.Args[2:])
}
func runOnDevice(appdir string) error {
// e.g. B393DDEB490947F5A463FD074299B6C0AXXXXXXX
devID = getenv("GOIOS_DEV_ID")
// e.g. Z8B3JBXXXX.org.golang.sample, Z8B3JBXXXX prefix is available at
// https://developer.apple.com/membercenter/index.action#accountSummary as Team ID.
appID = getenv("GOIOS_APP_ID")
// e.g. Z8B3JBXXXX, available at
// https://developer.apple.com/membercenter/index.action#accountSummary as Team ID.
teamID = getenv("GOIOS_TEAM_ID")
// Device IDs as listed with ios-deploy -c.
deviceID = os.Getenv("GOIOS_DEVICE_ID")
if _, id, ok := strings.Cut(appID, "."); ok {
bundleID = id
}
if err := signApp(appdir); err != nil {
return err
}
if err := uninstallDevice(bundleID); err != nil {
return err
}
if err := installDevice(appdir); err != nil {
return err
}
if err := mountDevImage(); err != nil {
return err
}
// Kill any hanging debug bridges that might take up port 3222.
exec.Command("killall", "idevicedebugserverproxy").Run()
closer, err := startDebugBridge()
if err != nil {
return err
}
defer closer()
return runDevice(appdir, bundleID, os.Args[2:])
}
func getenv(envvar string) string {
s := os.Getenv(envvar)
if s == "" {
log.Fatalf("%s not set\nrun $GOROOT/misc/ios/detect.go to attempt to autodetect", envvar)
}
return s
}
func assembleApp(appdir, bin string) error {
if err := os.MkdirAll(appdir, 0755); err != nil {
return err
@@ -129,6 +217,236 @@ func assembleApp(appdir, bin string) error {
return nil
}
func signApp(appdir string) error {
entitlementsPath := filepath.Join(tmpdir, "Entitlements.plist")
cmd := exec.Command(
"codesign",
"-f",
"-s", devID,
"--entitlements", entitlementsPath,
appdir,
)
if debug {
log.Println(strings.Join(cmd.Args, " "))
}
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("codesign: %v", err)
}
return nil
}
// mountDevImage ensures a developer image is mounted on the device.
// The image contains the device lldb server for idevicedebugserverproxy
// to connect to.
func mountDevImage() error {
// Check for existing mount.
cmd := idevCmd(exec.Command("ideviceimagemounter", "-l", "-x"))
out, err := cmd.CombinedOutput()
if err != nil {
os.Stderr.Write(out)
return fmt.Errorf("ideviceimagemounter: %v", err)
}
var info struct {
Dict struct {
Data []byte `xml:",innerxml"`
} `xml:"dict"`
}
if err := xml.Unmarshal(out, &info); err != nil {
return fmt.Errorf("mountDevImage: failed to decode mount information: %v", err)
}
dict, err := parsePlistDict(info.Dict.Data)
if err != nil {
return fmt.Errorf("mountDevImage: failed to parse mount information: %v", err)
}
if dict["ImagePresent"] == "true" && dict["Status"] == "Complete" {
return nil
}
// Some devices only give us an ImageSignature key.
if _, exists := dict["ImageSignature"]; exists {
return nil
}
// No image is mounted. Find a suitable image.
imgPath, err := findDevImage()
if err != nil {
return err
}
sigPath := imgPath + ".signature"
cmd = idevCmd(exec.Command("ideviceimagemounter", imgPath, sigPath))
if out, err := cmd.CombinedOutput(); err != nil {
os.Stderr.Write(out)
return fmt.Errorf("ideviceimagemounter: %v", err)
}
return nil
}
// findDevImage use the device iOS version and build to locate a suitable
// developer image.
func findDevImage() (string, error) {
cmd := idevCmd(exec.Command("ideviceinfo"))
out, err := cmd.Output()
if err != nil {
return "", fmt.Errorf("ideviceinfo: %v", err)
}
var iosVer, buildVer string
lines := bytes.Split(out, []byte("\n"))
for _, line := range lines {
key, val, ok := strings.Cut(string(line), ": ")
if !ok {
continue
}
switch key {
case "ProductVersion":
iosVer = val
case "BuildVersion":
buildVer = val
}
}
if iosVer == "" || buildVer == "" {
return "", errors.New("failed to parse ideviceinfo output")
}
verSplit := strings.Split(iosVer, ".")
if len(verSplit) > 2 {
// Developer images are specific to major.minor ios version.
// Cut off the patch version.
iosVer = strings.Join(verSplit[:2], ".")
}
sdkBase := "/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/DeviceSupport"
patterns := []string{fmt.Sprintf("%s (%s)", iosVer, buildVer), fmt.Sprintf("%s (*)", iosVer), fmt.Sprintf("%s*", iosVer)}
for _, pattern := range patterns {
matches, err := filepath.Glob(filepath.Join(sdkBase, pattern, "DeveloperDiskImage.dmg"))
if err != nil {
return "", fmt.Errorf("findDevImage: %v", err)
}
if len(matches) > 0 {
return matches[0], nil
}
}
return "", fmt.Errorf("failed to find matching developer image for iOS version %s build %s", iosVer, buildVer)
}
// startDebugBridge ensures that the idevicedebugserverproxy runs on
// port 3222.
func startDebugBridge() (func(), error) {
errChan := make(chan error, 1)
cmd := idevCmd(exec.Command("idevicedebugserverproxy", "3222"))
var stderr bytes.Buffer
cmd.Stderr = &stderr
if err := cmd.Start(); err != nil {
return nil, fmt.Errorf("idevicedebugserverproxy: %v", err)
}
go func() {
if err := cmd.Wait(); err != nil {
if _, ok := err.(*exec.ExitError); ok {
errChan <- fmt.Errorf("idevicedebugserverproxy: %s", stderr.Bytes())
} else {
errChan <- fmt.Errorf("idevicedebugserverproxy: %v", err)
}
}
errChan <- nil
}()
closer := func() {
cmd.Process.Kill()
<-errChan
}
// Dial localhost:3222 to ensure the proxy is ready.
delay := time.Second / 4
for attempt := 0; attempt < 5; attempt++ {
conn, err := net.DialTimeout("tcp", "localhost:3222", 5*time.Second)
if err == nil {
conn.Close()
return closer, nil
}
select {
case <-time.After(delay):
delay *= 2
case err := <-errChan:
return nil, err
}
}
closer()
return nil, errors.New("failed to set up idevicedebugserverproxy")
}
// findDeviceAppPath returns the device path to the app with the
// given bundle ID. It parses the output of ideviceinstaller -l -o xml,
// looking for the bundle ID and the corresponding Path value.
func findDeviceAppPath(bundleID string) (string, error) {
cmd := idevCmd(exec.Command("ideviceinstaller", "-l", "-o", "xml"))
out, err := cmd.CombinedOutput()
if err != nil {
os.Stderr.Write(out)
return "", fmt.Errorf("ideviceinstaller: -l -o xml %v", err)
}
var list struct {
Apps []struct {
Data []byte `xml:",innerxml"`
} `xml:"array>dict"`
}
if err := xml.Unmarshal(out, &list); err != nil {
return "", fmt.Errorf("failed to parse ideviceinstaller output: %v", err)
}
for _, app := range list.Apps {
values, err := parsePlistDict(app.Data)
if err != nil {
return "", fmt.Errorf("findDeviceAppPath: failed to parse app dict: %v", err)
}
if values["CFBundleIdentifier"] == bundleID {
if path, ok := values["Path"]; ok {
return path, nil
}
}
}
return "", fmt.Errorf("failed to find device path for bundle: %s", bundleID)
}
// Parse an xml encoded plist. Plist values are mapped to string.
func parsePlistDict(dict []byte) (map[string]string, error) {
d := xml.NewDecoder(bytes.NewReader(dict))
values := make(map[string]string)
var key string
var hasKey bool
for {
tok, err := d.Token()
if err == io.EOF {
break
}
if err != nil {
return nil, err
}
if tok, ok := tok.(xml.StartElement); ok {
if tok.Name.Local == "key" {
if err := d.DecodeElement(&key, &tok); err != nil {
return nil, err
}
hasKey = true
} else if hasKey {
var val string
var err error
switch n := tok.Name.Local; n {
case "true", "false":
// Bools are represented as <true/> and <false/>.
val = n
err = d.Skip()
default:
err = d.DecodeElement(&val, &tok)
}
if err != nil {
return nil, err
}
values[key] = val
hasKey = false
} else {
if err := d.Skip(); err != nil {
return nil, err
}
}
}
}
return values, nil
}
func installSimulator(appdir string) error {
cmd := exec.Command(
"xcrun", "simctl", "install",
@@ -142,20 +460,138 @@ func installSimulator(appdir string) error {
return nil
}
func runSimulator(appdir, bundleID string, args []string) error {
xcrunArgs := []string{"simctl", "spawn",
"booted",
appdir + "/gotest",
func uninstallDevice(bundleID string) error {
cmd := idevCmd(exec.Command(
"ideviceinstaller",
"-U", bundleID,
))
if out, err := cmd.CombinedOutput(); err != nil {
os.Stderr.Write(out)
return fmt.Errorf("ideviceinstaller -U %q: %s", bundleID, err)
}
xcrunArgs = append(xcrunArgs, args...)
cmd := exec.Command("xcrun", xcrunArgs...)
cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr
err := cmd.Run()
return nil
}
func installDevice(appdir string) error {
attempt := 0
for {
cmd := idevCmd(exec.Command(
"ideviceinstaller",
"-i", appdir,
))
if out, err := cmd.CombinedOutput(); err != nil {
// Sometimes, installing the app fails for some reason.
// Give the device a few seconds and try again.
if attempt < 5 {
time.Sleep(5 * time.Second)
attempt++
continue
}
os.Stderr.Write(out)
return fmt.Errorf("ideviceinstaller -i %q: %v (%d attempts)", appdir, err, attempt)
}
return nil
}
}
func idevCmd(cmd *exec.Cmd) *exec.Cmd {
if deviceID != "" {
// Inject -u device_id after the executable, but before the arguments.
args := []string{cmd.Args[0], "-u", deviceID}
cmd.Args = append(args, cmd.Args[1:]...)
}
return cmd
}
func runSimulator(appdir, bundleID string, args []string) error {
cmd := exec.Command(
"xcrun", "simctl", "launch",
"--wait-for-debugger",
"booted",
bundleID,
)
out, err := cmd.CombinedOutput()
if err != nil {
os.Stderr.Write(out)
return fmt.Errorf("xcrun simctl launch booted %q: %v", bundleID, err)
}
var processID int
var ignore string
if _, err := fmt.Sscanf(string(out), "%s %d", &ignore, &processID); err != nil {
return fmt.Errorf("runSimulator: couldn't find processID from `simctl launch`: %v (%q)", err, out)
}
_, err = runLLDB("ios-simulator", appdir, strconv.Itoa(processID), args)
return err
}
return nil
func runDevice(appdir, bundleID string, args []string) error {
attempt := 0
for {
// The device app path reported by the device might be stale, so retry
// the lookup of the device path along with the lldb launching below.
deviceapp, err := findDeviceAppPath(bundleID)
if err != nil {
// The device app path might not yet exist for a newly installed app.
if attempt == 5 {
return err
}
attempt++
time.Sleep(5 * time.Second)
continue
}
out, err := runLLDB("remote-ios", appdir, deviceapp, args)
// If the program was not started it can be retried without papering over
// real test failures.
started := bytes.HasPrefix(out, []byte("lldb: running program"))
if started || err == nil || attempt == 5 {
return err
}
// Sometimes, the app was not yet ready to launch or the device path was
// stale. Retry.
attempt++
time.Sleep(5 * time.Second)
}
}
func runLLDB(target, appdir, deviceapp string, args []string) ([]byte, error) {
var env []string
for _, e := range os.Environ() {
// Don't override TMPDIR, HOME, GOCACHE on the device.
if strings.HasPrefix(e, "TMPDIR=") || strings.HasPrefix(e, "HOME=") || strings.HasPrefix(e, "GOCACHE=") {
continue
}
env = append(env, e)
}
lldb := exec.Command(
"python",
"-", // Read script from stdin.
target,
appdir,
deviceapp,
)
lldb.Args = append(lldb.Args, args...)
lldb.Env = env
lldb.Stdin = strings.NewReader(lldbDriver)
lldb.Stdout = os.Stdout
var out bytes.Buffer
lldb.Stderr = io.MultiWriter(&out, os.Stderr)
err := lldb.Start()
if err == nil {
// Forward SIGQUIT to the lldb driver which in turn will forward
// to the running program.
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGQUIT)
proc := lldb.Process
go func() {
for sig := range sigs {
proc.Signal(sig)
}
}()
err = lldb.Wait()
signal.Stop(sigs)
close(sigs)
}
return out.Bytes(), err
}
func copyLocalDir(dst, src string) error {
@@ -212,7 +648,7 @@ func copyLocalData(dstbase string) (pkgpath string, err error) {
// Copy all immediate files and testdata directories between
// the package being tested and the source root.
pkgpath = ""
for element := range strings.SplitSeq(finalPkgpath, string(filepath.Separator)) {
for _, element := range strings.Split(finalPkgpath, string(filepath.Separator)) {
if debug {
log.Printf("copying %s", pkgpath)
}
@@ -364,3 +800,112 @@ const resourceRules = `<?xml version="1.0" encoding="UTF-8"?>
</dict>
</plist>
`
const lldbDriver = `
import sys
import os
import signal
platform, exe, device_exe_or_pid, args = sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4:]
env = []
for k, v in os.environ.items():
env.append(k + "=" + v)
sys.path.append('/Applications/Xcode.app/Contents/SharedFrameworks/LLDB.framework/Resources/Python')
import lldb
debugger = lldb.SBDebugger.Create()
debugger.SetAsync(True)
debugger.SkipLLDBInitFiles(True)
err = lldb.SBError()
target = debugger.CreateTarget(exe, None, platform, True, err)
if not target.IsValid() or not err.Success():
sys.stderr.write("lldb: failed to setup up target: %s\n" % (err))
sys.exit(1)
listener = debugger.GetListener()
if platform == 'remote-ios':
target.modules[0].SetPlatformFileSpec(lldb.SBFileSpec(device_exe_or_pid))
process = target.ConnectRemote(listener, 'connect://localhost:3222', None, err)
else:
process = target.AttachToProcessWithID(listener, int(device_exe_or_pid), err)
if not err.Success():
sys.stderr.write("lldb: failed to connect to remote target %s: %s\n" % (device_exe_or_pid, err))
sys.exit(1)
# Don't stop on signals.
sigs = process.GetUnixSignals()
for i in range(0, sigs.GetNumSignals()):
sig = sigs.GetSignalAtIndex(i)
sigs.SetShouldStop(sig, False)
sigs.SetShouldNotify(sig, False)
event = lldb.SBEvent()
running = False
prev_handler = None
def signal_handler(signal, frame):
process.Signal(signal)
def run_program():
# Forward SIGQUIT to the program.
prev_handler = signal.signal(signal.SIGQUIT, signal_handler)
# Tell the Go driver that the program is running and should not be retried.
sys.stderr.write("lldb: running program\n")
running = True
# Process is stopped at attach/launch. Let it run.
process.Continue()
if platform != 'remote-ios':
# For the local emulator the program is ready to run.
# For remote device runs, we need to wait for eStateConnected,
# below.
run_program()
while True:
if not listener.WaitForEvent(1, event):
continue
if not lldb.SBProcess.EventIsProcessEvent(event):
continue
if running:
# Pass through stdout and stderr.
while True:
out = process.GetSTDOUT(8192)
if not out:
break
sys.stdout.write(out)
while True:
out = process.GetSTDERR(8192)
if not out:
break
sys.stderr.write(out)
state = process.GetStateFromEvent(event)
if state in [lldb.eStateCrashed, lldb.eStateDetached, lldb.eStateUnloaded, lldb.eStateExited]:
if running:
signal.signal(signal.SIGQUIT, prev_handler)
break
elif state == lldb.eStateConnected:
if platform == 'remote-ios':
process.RemoteLaunch(args, env, None, None, None, None, 0, False, err)
if not err.Success():
sys.stderr.write("lldb: failed to launch remote process: %s\n" % (err))
process.Kill()
debugger.Terminate()
sys.exit(1)
run_program()
exitStatus = process.GetExitStatus()
exitDesc = process.GetExitDescription()
process.Kill()
debugger.Terminate()
if exitStatus == 0 and exitDesc is not None:
# Ensure tests fail when killed by a signal.
exitStatus = 123
sys.exit(exitStatus)
`

191
misc/linkcheck/linkcheck.go Normal file
View File

@@ -0,0 +1,191 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The linkcheck command finds missing links in the godoc website.
// It crawls a URL recursively and notes URLs and URL fragments
// that it's seen and prints a report of missing links at the end.
package main
import (
"errors"
"flag"
"fmt"
"io"
"log"
"net/http"
"os"
"regexp"
"strings"
"sync"
)
var (
root = flag.String("root", "http://localhost:6060", "Root to crawl")
verbose = flag.Bool("verbose", false, "verbose")
)
var wg sync.WaitGroup // outstanding fetches
var urlq = make(chan string) // URLs to crawl
// urlFrag is a URL and its optional #fragment (without the #)
type urlFrag struct {
url, frag string
}
var (
mu sync.Mutex
crawled = make(map[string]bool) // URL without fragment -> true
neededFrags = make(map[urlFrag][]string) // URL#frag -> who needs it
)
var aRx = regexp.MustCompile(`<a href=['"]?(/[^\s'">]+)`)
// Owned by crawlLoop goroutine:
var (
linkSources = make(map[string][]string) // url no fragment -> sources
fragExists = make(map[urlFrag]bool)
problems []string
)
func localLinks(body string) (links []string) {
seen := map[string]bool{}
mv := aRx.FindAllStringSubmatch(body, -1)
for _, m := range mv {
ref := m[1]
if strings.HasPrefix(ref, "/src/") {
continue
}
if !seen[ref] {
seen[ref] = true
links = append(links, m[1])
}
}
return
}
var idRx = regexp.MustCompile(`\bid=['"]?([^\s'">]+)`)
func pageIDs(body string) (ids []string) {
mv := idRx.FindAllStringSubmatch(body, -1)
for _, m := range mv {
ids = append(ids, m[1])
}
return
}
// url may contain a #fragment, and the fragment is then noted as needing to exist.
func crawl(url string, sourceURL string) {
if strings.Contains(url, "/devel/release") {
return
}
mu.Lock()
defer mu.Unlock()
if u, frag, ok := strings.Cut(url, "#"); ok {
url = u
if frag != "" {
uf := urlFrag{url, frag}
neededFrags[uf] = append(neededFrags[uf], sourceURL)
}
}
if crawled[url] {
return
}
crawled[url] = true
wg.Add(1)
go func() {
urlq <- url
}()
}
func addProblem(url, errmsg string) {
msg := fmt.Sprintf("Error on %s: %s (from %s)", url, errmsg, linkSources[url])
if *verbose {
log.Print(msg)
}
problems = append(problems, msg)
}
func crawlLoop() {
for url := range urlq {
if err := doCrawl(url); err != nil {
addProblem(url, err.Error())
}
}
}
func doCrawl(url string) error {
defer wg.Done()
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return err
}
res, err := http.DefaultTransport.RoundTrip(req)
if err != nil {
return err
}
// Handle redirects.
if res.StatusCode/100 == 3 {
newURL, err := res.Location()
if err != nil {
return fmt.Errorf("resolving redirect: %v", err)
}
if !strings.HasPrefix(newURL.String(), *root) {
// Skip off-site redirects.
return nil
}
crawl(newURL.String(), url)
return nil
}
if res.StatusCode != 200 {
return errors.New(res.Status)
}
slurp, err := io.ReadAll(res.Body)
res.Body.Close()
if err != nil {
log.Fatalf("Error reading %s body: %v", url, err)
}
if *verbose {
log.Printf("Len of %s: %d", url, len(slurp))
}
body := string(slurp)
for _, ref := range localLinks(body) {
if *verbose {
log.Printf(" links to %s", ref)
}
dest := *root + ref
linkSources[dest] = append(linkSources[dest], url)
crawl(dest, url)
}
for _, id := range pageIDs(body) {
if *verbose {
log.Printf(" url %s has #%s", url, id)
}
fragExists[urlFrag{url, id}] = true
}
return nil
}
func main() {
flag.Parse()
go crawlLoop()
crawl(*root, "")
wg.Wait()
close(urlq)
for uf, needers := range neededFrags {
if !fragExists[uf] {
problems = append(problems, fmt.Sprintf("Missing fragment for %+v from %v", uf, needers))
}
}
for _, s := range problems {
fmt.Println(s)
}
if len(problems) > 0 {
os.Exit(1)
}
}

31
misc/wasm/go_wasip1_wasm_exec Executable file
View File

@@ -0,0 +1,31 @@
#!/usr/bin/env bash
# Copyright 2023 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
case "$GOWASIRUNTIME" in
"wasmedge")
exec wasmedge --dir=/ --env PWD="$PWD" --env PATH="$PATH" ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}"
;;
"wasmer")
exec wasmer run --dir=/ --env PWD="$PWD" --env PATH="$PATH" ${GOWASIRUNTIMEARGS:-} "$1" -- "${@:2}"
;;
"wazero")
exec wazero run -mount /:/ -env-inherit -cachedir "${TMPDIR:-/tmp}"/wazero ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}"
;;
"wasmtime" | "")
# Match the major version in "wasmtime-cli 14.0.0". For versions before 14
# we need to use the old CLI. This requires Bash v3.0 and above.
# TODO(johanbrandhorst): Remove this condition once 1.22 is released.
# From 1.23 onwards we'll only support the new wasmtime CLI.
if [[ "$(wasmtime --version)" =~ wasmtime-cli[[:space:]]([0-9]+)\.[0-9]+\.[0-9]+ && "${BASH_REMATCH[1]}" -lt 14 ]]; then
exec wasmtime run --dir=/ --env PWD="$PWD" --env PATH="$PATH" --max-wasm-stack 1048576 ${GOWASIRUNTIMEARGS:-} "$1" -- "${@:2}"
else
exec wasmtime run --dir=/ --env PWD="$PWD" --env PATH="$PATH" -W max-wasm-stack=1048576 ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}"
fi
;;
*)
echo "Unknown Go WASI runtime specified: $GOWASIRUNTIME"
exit 1
;;
esac

View File

@@ -17,7 +17,7 @@ license that can be found in the LICENSE file.
<script src="https://cdn.jsdelivr.net/npm/text-encoding@0.7.0/lib/encoding.min.js"></script>
(see https://caniuse.com/#feat=textencoder)
-->
<script src="../../lib/wasm/wasm_exec.js"></script>
<script src="wasm_exec.js"></script>
<script>
if (!WebAssembly.instantiateStreaming) { // polyfill
WebAssembly.instantiateStreaming = async (resp, importObject) => {

View File

@@ -14,7 +14,7 @@
if (!globalThis.fs) {
let outputBuf = "";
globalThis.fs = {
constants: { O_WRONLY: -1, O_RDWR: -1, O_CREAT: -1, O_TRUNC: -1, O_APPEND: -1, O_EXCL: -1, O_DIRECTORY: -1 }, // unused
constants: { O_WRONLY: -1, O_RDWR: -1, O_CREAT: -1, O_TRUNC: -1, O_APPEND: -1, O_EXCL: -1 }, // unused
writeSync(fd, buf) {
outputBuf += decoder.decode(buf);
const nl = outputBuf.lastIndexOf("\n");
@@ -73,14 +73,6 @@
}
}
if (!globalThis.path) {
globalThis.path = {
resolve(...pathSegments) {
return pathSegments.join("/");
}
}
}
if (!globalThis.crypto) {
throw new Error("globalThis.crypto is not available, polyfill required (crypto.getRandomValues only)");
}
@@ -216,16 +208,10 @@
return decoder.decode(new DataView(this._inst.exports.mem.buffer, saddr, len));
}
const testCallExport = (a, b) => {
this._inst.exports.testExport0();
return this._inst.exports.testExport(a, b);
}
const timeOrigin = Date.now() - performance.now();
this.importObject = {
_gotest: {
add: (a, b) => a + b,
callExport: testCallExport,
},
gojs: {
// Go's SP does not change as long as no Go code is running. Some operations (e.g. calls, getters and setters)

View File

@@ -11,7 +11,6 @@ if (process.argv.length < 3) {
globalThis.require = require;
globalThis.fs = require("fs");
globalThis.path = require("path");
globalThis.TextEncoder = require("util").TextEncoder;
globalThis.TextDecoder = require("util").TextDecoder;

View File

@@ -31,17 +31,13 @@ Maintaining vendor directories
Before updating vendor directories, ensure that module mode is enabled.
Make sure that GO111MODULE is not set in the environment, or that it is
set to 'on' or 'auto', and if you use a go.work file, set GOWORK=off.
Also, ensure that 'go env GOROOT' shows the root of this Go source
tree. Otherwise, the results are undefined. It's recommended to build
Go from source and use that 'go' binary to update its source tree.
set to 'on' or 'auto'.
Requirements may be added, updated, and removed with 'go get'.
The vendor directory may be updated with 'go mod vendor'.
A typical sequence might be:
cd src # or src/cmd
cd src
go get golang.org/x/net@master
go mod tidy
go mod vendor

View File

@@ -10,4 +10,4 @@ if [ ! -f make.bash ]; then
fi
. ./make.bash "$@" --no-banner
bash run.bash --no-rebuild
../bin/go tool dist banner # print build info
$GOTOOLDIR/dist banner # print build info

View File

@@ -6,11 +6,17 @@
setlocal
if not exist make.bat (
echo all.bat must be run from go\src
exit /b 1
)
if exist make.bat goto ok
echo all.bat must be run from go\src
:: cannot exit: would kill parent command interpreter
goto end
:ok
call .\make.bat --no-banner || exit /b 1
call .\run.bat --no-rebuild || exit /b 1
..\bin\go tool dist banner
call .\make.bat --no-banner --no-local
if %GOBUILDFAIL%==1 goto end
call .\run.bat --no-rebuild --no-local
if %GOBUILDFAIL%==1 goto end
"%GOTOOLDIR%/dist" banner
:end
if x%GOBUILDEXIT%==x1 exit %GOBUILDFAIL%

View File

@@ -13,4 +13,4 @@ if(! test -f make.rc){
. ./make.rc --no-banner $*
bind -b $GOROOT/bin /bin
./run.rc --no-rebuild
../bin/go tool dist banner # print build info
$GOTOOLDIR/dist banner # print build info

View File

@@ -15,7 +15,6 @@ import (
"fmt"
"internal/godebug"
"io/fs"
"maps"
"math"
"path"
"reflect"
@@ -39,7 +38,6 @@ var (
errMissData = errors.New("archive/tar: sparse file references non-existent data")
errUnrefData = errors.New("archive/tar: sparse file contains unreferenced data")
errWriteHole = errors.New("archive/tar: write non-NUL byte in sparse hole")
errSparseTooLong = errors.New("archive/tar: sparse map too long")
)
type headerError []string
@@ -614,7 +612,9 @@ func (fi headerFileInfo) String() string {
}
// sysStat, if non-nil, populates h from system-dependent fields of fi.
var sysStat func(fi fs.FileInfo, h *Header, doNameLookups bool) error
var sysStat func(fi fs.FileInfo, h *Header) error
var loadUidAndGid func(fi fs.FileInfo, uid, gid *int)
const (
// Mode constants from the USTAR spec:
@@ -643,8 +643,8 @@ const (
// to provide the full path name of the file.
//
// If fi implements [FileInfoNames]
// Header.Gname and Header.Uname
// are provided by the methods of the interface.
// the Gname and Uname of the header are
// provided by the methods of the interface.
func FileInfoHeader(fi fs.FileInfo, link string) (*Header, error) {
if fi == nil {
return nil, errors.New("archive/tar: FileInfo is nil")
@@ -698,43 +698,55 @@ func FileInfoHeader(fi fs.FileInfo, link string) (*Header, error) {
h.Gname = sys.Gname
h.AccessTime = sys.AccessTime
h.ChangeTime = sys.ChangeTime
h.Xattrs = maps.Clone(sys.Xattrs)
if sys.Xattrs != nil {
h.Xattrs = make(map[string]string)
for k, v := range sys.Xattrs {
h.Xattrs[k] = v
}
}
if sys.Typeflag == TypeLink {
// hard link
h.Typeflag = TypeLink
h.Size = 0
h.Linkname = sys.Linkname
}
h.PAXRecords = maps.Clone(sys.PAXRecords)
if sys.PAXRecords != nil {
h.PAXRecords = make(map[string]string)
for k, v := range sys.PAXRecords {
h.PAXRecords[k] = v
}
}
}
var doNameLookups = true
if iface, ok := fi.(FileInfoNames); ok {
doNameLookups = false
var err error
h.Gname, err = iface.Gname()
if loadUidAndGid != nil {
loadUidAndGid(fi, &h.Uid, &h.Gid)
}
h.Gname, err = iface.Gname(h.Gid)
if err != nil {
return nil, err
}
h.Uname, err = iface.Uname()
h.Uname, err = iface.Uname(h.Uid)
if err != nil {
return nil, err
}
return h, nil
}
if sysStat != nil {
return h, sysStat(fi, h, doNameLookups)
return h, sysStat(fi, h)
}
return h, nil
}
// FileInfoNames extends [fs.FileInfo].
// FileInfoNames extends [FileInfo] to translate UID/GID to names.
// Passing an instance of this to [FileInfoHeader] permits the caller
// to avoid a system-dependent name lookup by specifying the Uname and Gname directly.
// to control UID/GID resolution.
type FileInfoNames interface {
fs.FileInfo
// Uname should give a user name.
Uname() (string, error)
// Gname should give a group name.
Gname() (string, error)
// Uname should translate a UID into a user name.
Uname(uid int) (string, error)
// Gname should translate a GID into a group name.
Gname(gid int) (string, error)
}
// isHeaderOnlyType checks if the given type flag is of the type that has no

View File

@@ -531,17 +531,12 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) {
cntNewline int64
buf bytes.Buffer
blk block
totalSize int
)
// feedTokens copies data in blocks from r into buf until there are
// at least cnt newlines in buf. It will not read more blocks than needed.
feedTokens := func(n int64) error {
for cntNewline < n {
totalSize += len(blk)
if totalSize > maxSpecialFileSize {
return errSparseTooLong
}
if _, err := mustReadFull(r, blk[:]); err != nil {
return err
}
@@ -574,8 +569,8 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) {
}
// Parse for all member entries.
// numEntries is trusted after this since feedTokens limits the number of
// tokens based on maxSpecialFileSize.
// numEntries is trusted after this since a potential attacker must have
// committed resources proportional to what this library used.
if err := feedTokens(2 * numEntries); err != nil {
return nil, err
}
@@ -816,7 +811,9 @@ func (sr sparseFileReader) physicalRemaining() int64 {
type zeroReader struct{}
func (zeroReader) Read(b []byte) (int, error) {
clear(b)
for i := range b {
b[i] = 0
}
return len(b), nil
}

View File

@@ -7,17 +7,14 @@ package tar
import (
"bytes"
"compress/bzip2"
"crypto/md5"
"errors"
"fmt"
"hash/crc32"
"internal/obscuretestdata"
"io"
"maps"
"math"
"os"
"path"
"reflect"
"slices"
"strconv"
"strings"
"testing"
@@ -26,11 +23,10 @@ import (
func TestReader(t *testing.T) {
vectors := []struct {
file string // Test input file
obscured bool // Obscured with obscuretestdata package
headers []*Header // Expected output headers
chksums []string // CRC32 checksum of files, leave as nil if not checked
err error // Expected error to occur
file string // Test input file
headers []*Header // Expected output headers
chksums []string // MD5 checksum of files, leave as nil if not checked
err error // Expected error to occur
}{{
file: "testdata/gnu.tar",
headers: []*Header{{
@@ -57,8 +53,8 @@ func TestReader(t *testing.T) {
Format: FormatGNU,
}},
chksums: []string{
"6cbd88fc",
"ddac04b3",
"e38b27eaccb4391bdec553a7f3ae6b2f",
"c65bd2e50a56a2138bf1716f2fd56fe9",
},
}, {
file: "testdata/sparse-formats.tar",
@@ -151,11 +147,11 @@ func TestReader(t *testing.T) {
Format: FormatGNU,
}},
chksums: []string{
"5375e1d2",
"5375e1d2",
"5375e1d2",
"5375e1d2",
"8eb179ba",
"6f53234398c2449fe67c1812d993012f",
"6f53234398c2449fe67c1812d993012f",
"6f53234398c2449fe67c1812d993012f",
"6f53234398c2449fe67c1812d993012f",
"b0061974914468de549a2af8ced10316",
},
}, {
file: "testdata/star.tar",
@@ -272,7 +268,7 @@ func TestReader(t *testing.T) {
Format: FormatPAX,
}},
chksums: []string{
"5fd7e86a",
"0afb597b283fe61b5d4879669a350556",
},
}, {
file: "testdata/pax-records.tar",
@@ -525,9 +521,8 @@ func TestReader(t *testing.T) {
file: "testdata/pax-nul-path.tar",
err: ErrHeader,
}, {
file: "testdata/neg-size.tar.base64",
obscured: true,
err: ErrHeader,
file: "testdata/neg-size.tar",
err: ErrHeader,
}, {
file: "testdata/issue10968.tar",
err: ErrHeader,
@@ -624,32 +619,18 @@ func TestReader(t *testing.T) {
},
Format: FormatPAX,
}},
}, {
// Small compressed file that uncompresses to
// a file with a very large GNU 1.0 sparse map.
file: "testdata/gnu-sparse-many-zeros.tar.bz2",
err: errSparseTooLong,
}}
for _, v := range vectors {
t.Run(strings.TrimSuffix(path.Base(v.file), ".base64"), func(t *testing.T) {
path := v.file
if v.obscured {
tf, err := obscuretestdata.DecodeToTempFile(path)
if err != nil {
t.Fatalf("obscuredtestdata.DecodeToTempFile(%s): %v", path, err)
}
path = tf
}
f, err := os.Open(path)
t.Run(path.Base(v.file), func(t *testing.T) {
f, err := os.Open(v.file)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
defer f.Close()
var fr io.Reader = f
if strings.HasSuffix(v.file, ".bz2") || strings.HasSuffix(v.file, ".bz2.base64") {
if strings.HasSuffix(v.file, ".bz2") {
fr = bzip2.NewReader(fr)
}
@@ -674,7 +655,7 @@ func TestReader(t *testing.T) {
if v.chksums == nil {
continue
}
h := crc32.NewIEEE()
h := md5.New()
_, err = io.CopyBuffer(h, tr, rdbuf) // Effectively an incremental read
if err != nil {
break
@@ -787,7 +768,7 @@ type readBadSeeker struct{ io.ReadSeeker }
func (rbs *readBadSeeker) Seek(int64, int) (int64, error) { return 0, fmt.Errorf("illegal seek") }
// TestReadTruncation tests the ending condition on various truncated files and
// TestReadTruncation test the ending condition on various truncated files and
// that truncated files are still detected even if the underlying io.Reader
// satisfies io.Seeker.
func TestReadTruncation(t *testing.T) {
@@ -1036,7 +1017,7 @@ func TestParsePAX(t *testing.T) {
for i, v := range vectors {
r := strings.NewReader(v.in)
got, err := parsePAX(r)
if !maps.Equal(got, v.want) && !(len(got) == 0 && len(v.want) == 0) {
if !reflect.DeepEqual(got, v.want) && !(len(got) == 0 && len(v.want) == 0) {
t.Errorf("test %d, parsePAX():\ngot %v\nwant %v", i, got, v.want)
}
if ok := err == nil; ok != v.ok {
@@ -1153,7 +1134,7 @@ func TestReadOldGNUSparseMap(t *testing.T) {
v.input = v.input[copy(blk[:], v.input):]
tr := Reader{r: bytes.NewReader(v.input)}
got, err := tr.readOldGNUSparseMap(&hdr, &blk)
if !slices.Equal(got, v.wantMap) {
if !equalSparseEntries(got, v.wantMap) {
t.Errorf("test %d, readOldGNUSparseMap(): got %v, want %v", i, got, v.wantMap)
}
if err != v.wantErr {
@@ -1344,7 +1325,7 @@ func TestReadGNUSparsePAXHeaders(t *testing.T) {
r := strings.NewReader(v.inputData + "#") // Add canary byte
tr := Reader{curr: &regFileReader{r, int64(r.Len())}}
got, err := tr.readGNUSparsePAXHeaders(&hdr)
if !slices.Equal(got, v.wantMap) {
if !equalSparseEntries(got, v.wantMap) {
t.Errorf("test %d, readGNUSparsePAXHeaders(): got %v, want %v", i, got, v.wantMap)
}
if err != v.wantErr {

View File

@@ -17,36 +17,37 @@ import (
func init() {
sysStat = statUnix
loadUidAndGid = loadUidAndGidFunc
}
// userMap and groupMap cache UID and GID lookups for performance reasons.
// userMap and groupMap caches UID and GID lookups for performance reasons.
// The downside is that renaming uname or gname by the OS never takes effect.
var userMap, groupMap sync.Map // map[int]string
func statUnix(fi fs.FileInfo, h *Header, doNameLookups bool) error {
func statUnix(fi fs.FileInfo, h *Header) error {
sys, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return nil
}
h.Uid = int(sys.Uid)
h.Gid = int(sys.Gid)
if doNameLookups {
// Best effort at populating Uname and Gname.
// The os/user functions may fail for any number of reasons
// (not implemented on that platform, cgo not enabled, etc).
if u, ok := userMap.Load(h.Uid); ok {
h.Uname = u.(string)
} else if u, err := user.LookupId(strconv.Itoa(h.Uid)); err == nil {
h.Uname = u.Username
userMap.Store(h.Uid, h.Uname)
}
if g, ok := groupMap.Load(h.Gid); ok {
h.Gname = g.(string)
} else if g, err := user.LookupGroupId(strconv.Itoa(h.Gid)); err == nil {
h.Gname = g.Name
groupMap.Store(h.Gid, h.Gname)
}
// Best effort at populating Uname and Gname.
// The os/user functions may fail for any number of reasons
// (not implemented on that platform, cgo not enabled, etc).
if u, ok := userMap.Load(h.Uid); ok {
h.Uname = u.(string)
} else if u, err := user.LookupId(strconv.Itoa(h.Uid)); err == nil {
h.Uname = u.Username
userMap.Store(h.Uid, h.Uname)
}
if g, ok := groupMap.Load(h.Gid); ok {
h.Gname = g.(string)
} else if g, err := user.LookupGroupId(strconv.Itoa(h.Gid)); err == nil {
h.Gname = g.Name
groupMap.Store(h.Gid, h.Gname)
}
h.AccessTime = statAtime(sys)
h.ChangeTime = statCtime(sys)
@@ -99,3 +100,12 @@ func statUnix(fi fs.FileInfo, h *Header, doNameLookups bool) error {
}
return nil
}
func loadUidAndGidFunc(fi fs.FileInfo, uid, gid *int) {
sys, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return
}
*uid = int(sys.Uid)
*gid = int(sys.Gid)
}

View File

@@ -213,17 +213,15 @@ func parsePAXTime(s string) (time.Time, error) {
}
// Parse the nanoseconds.
// Initialize an array with '0's to handle right padding automatically.
nanoDigits := [maxNanoSecondDigits]byte{'0', '0', '0', '0', '0', '0', '0', '0', '0'}
for i := range len(sn) {
switch c := sn[i]; {
case c < '0' || c > '9':
return time.Time{}, ErrHeader
case i < len(nanoDigits):
nanoDigits[i] = c
}
if strings.Trim(sn, "0123456789") != "" {
return time.Time{}, ErrHeader
}
nsecs, _ := strconv.ParseInt(string(nanoDigits[:]), 10, 64) // Must succeed after validation
if len(sn) < maxNanoSecondDigits {
sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad
} else {
sn = sn[:maxNanoSecondDigits] // Right truncate
}
nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed
if len(ss) > 0 && ss[0] == '-' {
return time.Unix(secs, -1*nsecs), nil // Negative correction
}
@@ -312,7 +310,7 @@ func formatPAXRecord(k, v string) (string, error) {
// "%d %s=%s\n" % (size, key, value)
//
// Keys and values should be UTF-8, but the number of bad writers out there
// forces us to be more liberal.
// forces us to be a more liberal.
// Thus, we only reject all keys with NUL, and only reject NULs in values
// for the PAX version of the USTAR string fields.
// The key must not contain an '=' character.

View File

@@ -439,66 +439,3 @@ func TestFormatPAXRecord(t *testing.T) {
}
}
}
func BenchmarkParsePAXTime(b *testing.B) {
tests := []struct {
name string
in string
want time.Time
ok bool
}{
{
name: "NoNanos",
in: "123456",
want: time.Unix(123456, 0),
ok: true,
},
{
name: "ExactNanos",
in: "1.123456789",
want: time.Unix(1, 123456789),
ok: true,
},
{
name: "WithNanoPadding",
in: "1.123",
want: time.Unix(1, 123000000),
ok: true,
},
{
name: "WithNanoTruncate",
in: "1.123456789123",
want: time.Unix(1, 123456789),
ok: true,
},
{
name: "TrailingError",
in: "1.123abc",
want: time.Time{},
ok: false,
},
{
name: "LeadingError",
in: "1.abc123",
want: time.Time{},
ok: false,
},
}
for _, tt := range tests {
b.Run(tt.name, func(b *testing.B) {
b.ReportAllocs()
for b.Loop() {
ts, err := parsePAXTime(tt.in)
if (err == nil) != tt.ok {
if err != nil {
b.Fatal(err)
}
b.Fatal("expected error")
}
if !ts.Equal(tt.want) {
b.Fatalf("time mismatch: got %v, want %v", ts, tt.want)
}
}
})
}
}

View File

@@ -11,13 +11,11 @@ import (
"internal/testenv"
"io"
"io/fs"
"maps"
"math"
"os"
"path"
"path/filepath"
"reflect"
"slices"
"strings"
"testing"
"time"
@@ -100,6 +98,10 @@ func (f *testFile) Seek(pos int64, whence int) (int64, error) {
return f.pos, nil
}
func equalSparseEntries(x, y []sparseEntry) bool {
return (len(x) == 0 && len(y) == 0) || reflect.DeepEqual(x, y)
}
func TestSparseEntries(t *testing.T) {
vectors := []struct {
in []sparseEntry
@@ -196,11 +198,11 @@ func TestSparseEntries(t *testing.T) {
continue
}
gotAligned := alignSparseEntries(append([]sparseEntry{}, v.in...), v.size)
if !slices.Equal(gotAligned, v.wantAligned) {
if !equalSparseEntries(gotAligned, v.wantAligned) {
t.Errorf("test %d, alignSparseEntries():\ngot %v\nwant %v", i, gotAligned, v.wantAligned)
}
gotInverted := invertSparseEntries(append([]sparseEntry{}, v.in...), v.size)
if !slices.Equal(gotInverted, v.wantInverted) {
if !equalSparseEntries(gotInverted, v.wantInverted) {
t.Errorf("test %d, inverseSparseEntries():\ngot %v\nwant %v", i, gotInverted, v.wantInverted)
}
}
@@ -742,7 +744,7 @@ func TestHeaderAllowedFormats(t *testing.T) {
if formats != v.formats {
t.Errorf("test %d, allowedFormats(): got %v, want %v", i, formats, v.formats)
}
if formats&FormatPAX > 0 && !maps.Equal(paxHdrs, v.paxHdrs) && !(len(paxHdrs) == 0 && len(v.paxHdrs) == 0) {
if formats&FormatPAX > 0 && !reflect.DeepEqual(paxHdrs, v.paxHdrs) && !(len(paxHdrs) == 0 && len(v.paxHdrs) == 0) {
t.Errorf("test %d, allowedFormats():\ngot %v\nwant %s", i, paxHdrs, v.paxHdrs)
}
if (formats != FormatUnknown) && (err != nil) {
@@ -847,7 +849,10 @@ func Benchmark(b *testing.B) {
}
var _ fileInfoNames = fileInfoNames{}
const (
testUid = 10
testGid = 20
)
type fileInfoNames struct{}
@@ -875,24 +880,39 @@ func (f *fileInfoNames) Sys() any {
return nil
}
func (f *fileInfoNames) Uname() (string, error) {
return "Uname", nil
func (f *fileInfoNames) Uname(uid int) (string, error) {
if uid == testUid {
return "Uname", nil
}
return "", nil
}
func (f *fileInfoNames) Gname() (string, error) {
return "Gname", nil
func (f *fileInfoNames) Gname(gid int) (string, error) {
if gid == testGid {
return "Gname", nil
}
return "", nil
}
func TestFileInfoHeaderUseFileInfoNames(t *testing.T) {
origLoadUidAndGid := loadUidAndGid
defer func() {
loadUidAndGid = origLoadUidAndGid
}()
loadUidAndGid = func(fi fs.FileInfo, uid, gid *int) {
*uid = testUid
*gid = testGid
}
info := &fileInfoNames{}
header, err := FileInfoHeader(info, "")
if err != nil {
t.Fatal(err)
}
if header.Uname != "Uname" {
t.Fatalf("header.Uname: got %s, want %s", header.Uname, "Uname")
t.Fatalf("header.Uname: got %v, want %v", header.Uname, "Uname")
}
if header.Gname != "Gname" {
t.Fatalf("header.Gname: got %s, want %s", header.Gname, "Gname")
t.Fatalf("header.Gname: got %v, want %v", header.Gname, "Gname")
}
}

Binary file not shown.

View File

@@ -1,90 +0,0 @@
Z251LXNwYXJzZQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAwMDAwMDAAMDAwMDAw
MAAwMDAwMDAwADAwMDAwMDA2MDAwADAwMDAwMDAwMDAwADAyMjIyNwAgUwAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB1c3RhciAgAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwMDAwMDAwADAwMDAw
MDAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAlQL4gAw
MDAwMDAwMTAwMACAAAAAAAAABKgXxgAwMDAwMDAwMTAwMACAAAAAAAAABvwjqgAwMDAwMDAwMTAw
MACAAAAAAAAACVAvjgAwMDAwMDAwMTAwMAABgAAAAAAAAA34R1gAAAAAAAAAAAAAAAAAAAAAAACA
AAAAAAAAC6Q7cgAwMDAwMDAwMTAwMACAAAAAAAAADfhHVgAwMDAwMDAwMTAwMAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1
Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5AAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAADAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2
Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAMDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3
ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OQAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4
OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5AAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAADAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5
MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAMDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkw
MTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OQAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=

BIN
src/archive/tar/testdata/neg-size.tar vendored Normal file

Binary file not shown.

View File

@@ -1,9 +0,0 @@
EwMwMBMDLTgyMTk1MDI5NnQTE4NzfINzEzAwcXfhZrsDMDAwAAAAEDAxMRNz9DEwMTAwdBMTg3N8
g3NzADATc3yDc3P0eFMTc/QxMDEwMHQTE4NzfINzc/QwE3N8g3Nz9HFTMNR0MBMwMHEw9DAAAAAQ
MDGAABAwMTETc/QxMDH0MHQTMDBx1OFmuwMAAAD/gICAADExNTYyMQAgMBP///+AMTAwdHhTMDB0
MBMwMHF3MDEwMTAwdBMTg3N8g3Nz9HhTMDB0MBMwMHF34Wa7AzAwMAAAABAwMTETc/QxMDEwMHQT
E4NzfINzc/QwE3N8g3Nz9HhTE3P0MTAxMDB0ExODc3yDc3MwMBNzfINzczB4UzAwdDATMDBxMDAA
gAAAEDAxc/QxMDEwMHQTAAAAIOFmuwMwNAAAABAwMTET////gDEwMHR4UzAwdDATMDBxd+FmuwMw
MDAAAAAQMDExE3ODc3P0eFMTc/QxMDEwMHQTE4NzfINzc/QzMTEwMzM2MjQ4NDYxMjgzODBzfINz
c/R4UzAwdDATMDBxMDAwAAAAEDAxAAAQMDExE3P0MTAxMDB0EzAwcdThZrsDMDQAAAAQg3N8g3Nz
9DATc3yDc3P0eFMwMHQwEzAwcTAwMAAAABAwMQAAEDAxMRNz9DEwMTAwdBMwMHgw4Wa7AwAAEDA=

Binary file not shown.

View File

@@ -1,108 +0,0 @@
UGF4SGVhZGVycy4wL3BheC1zcGFyc2UAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAwMDAwMDAAMDAwMDAw
MAAwMDAwMDAwADAwMDAwMDAwMTcyADAwMDAwMDAwMDAwADAxMjIyNwAgeAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB1c3RhcgAwMAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAy
MiBHTlUuc3BhcnNlLm1ham9yPTEKMjIgR05VLnNwYXJzZS5taW5vcj0wCjMwIEdOVS5zcGFyc2Uu
bmFtZT1wYXgtc3BhcnNlCjM1IEdOVS5zcGFyc2UucmVhbHNpemU9NjAwMDAwMDAwMDAKMTMgc2l6
ZT0zNTg0CgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEdO
VVNwYXJzZUZpbGUuMC9wYXgtc3BhcnNlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwMDAwMDAwADAwMDAwMDAA
MDAwMDAwMAAwMDAwMDAwNzAwMAAwMDAwMDAwMDAwMAAwMTM3MzcAIDAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAdXN0YXIAMDAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMDAwMDAwMAAwMDAwMDAw
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAANgo5
OTk5OTk5NDg4CjUxMgoxOTk5OTk5OTQ4OAo1MTIKMjk5OTk5OTk0ODgKNTEyCjM5OTk5OTk5NDg4
CjUxMgo0OTk5OTk5OTQ4OAo1MTIKNTk5OTk5OTk0ODgKNTEyCgAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAMDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3
ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OQAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4
OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5AAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAADAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5
MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAMDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkw
MTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OQAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAx
MjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5AAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAADAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEy
MzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA

Binary file not shown.

View File

@@ -1,27 +0,0 @@
bG9uZ25hbWUvbG9uZ25hbWUvbG9uZ25hbWUvbG9uZ25hbWUvbG9uZ25hbWUvbG9uZ25hbWUvbG9u
Z25hbWUvbG9uZ25hbWUvbG9uZ25hbWUvbG9uZ25hbWUvbG9uZ25hbWUvbDAwMDAwMDAAMDAwMDAw
MAAwMDAwMDAwADAwMDAwMDAwMjU2ADAwMDAwMDAwMDAwADAzMTQyMAAgeAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB1c3RhcgAwMAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAx
NTQgcGF0aD1sb25nbmFtZS9sb25nbmFtZS9sb25nbmFtZS9sb25nbmFtZS9sb25nbmFtZS9sb25n
bmFtZS9sb25nbmFtZS9sb25nbmFtZS9sb25nbmFtZS9sb25nbmFtZS9sb25nbmFtZS9sb25nbmFt
ZS9sb25nbmFtZS9sb25nbmFtZS9sb25nbmFtZS8xNmdpZy50eHQKMjAgc2l6ZT0xNzE3OTg2OTE4
NAoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGxv
bmduYW1lL2xvbmduYW1lL2xvbmduYW1lL2xvbmduYW1lL2xvbmduYW1lL2xvbmduYW1lL2xvbmdu
YW1lL2xvbmduYW1lL2xvbmduYW1lL2xvbmduYW1lL2xvbmduYW1lL2wwMDAwNjQ0ADAwMDE3NTAA
MDAwMTc1MAAwMDAwMDAwMDAwMAAxMjMzMjc3MDUwNwAwMzY0NjIAIDAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAdXN0YXIAMDBndWlsbGF1bWUAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAGd1aWxsYXVtZQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMDAwMDAwMAAwMDAwMDAw
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA

View File

@@ -9,9 +9,8 @@ import (
"fmt"
"io"
"io/fs"
"maps"
"path"
"slices"
"sort"
"strings"
"time"
)
@@ -170,10 +169,16 @@ func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error {
// Write PAX records to the output.
isGlobal := hdr.Typeflag == TypeXGlobalHeader
if len(paxHdrs) > 0 || isGlobal {
// Sort keys for deterministic ordering.
var keys []string
for k := range paxHdrs {
keys = append(keys, k)
}
sort.Strings(keys)
// Write each record to a buffer.
var buf strings.Builder
// Sort keys for deterministic ordering.
for _, k := range slices.Sorted(maps.Keys(paxHdrs)) {
for _, k := range keys {
rec, err := formatPAXRecord(k, paxHdrs[k])
if err != nil {
return err
@@ -408,37 +413,25 @@ func (tw *Writer) AddFS(fsys fs.FS) error {
if err != nil {
return err
}
if name == "." {
if d.IsDir() {
return nil
}
info, err := d.Info()
if err != nil {
return err
}
linkTarget := ""
if typ := d.Type(); typ == fs.ModeSymlink {
var err error
linkTarget, err = fs.ReadLink(fsys, name)
if err != nil {
return err
}
} else if !typ.IsRegular() && typ != fs.ModeDir {
// TODO(#49580): Handle symlinks when fs.ReadLinkFS is available.
if !info.Mode().IsRegular() {
return errors.New("tar: cannot add non-regular file")
}
h, err := FileInfoHeader(info, linkTarget)
h, err := FileInfoHeader(info, "")
if err != nil {
return err
}
h.Name = name
if d.IsDir() {
h.Name += "/"
}
if err := tw.WriteHeader(h); err != nil {
return err
}
if !d.Type().IsRegular() {
return nil
}
f, err := fsys.Open(name)
if err != nil {
return err
@@ -675,7 +668,6 @@ func (sw *sparseFileWriter) ReadFrom(r io.Reader) (n int64, err error) {
func (sw sparseFileWriter) logicalRemaining() int64 {
return sw.sp[len(sw.sp)-1].endOffset() - sw.pos
}
func (sw sparseFileWriter) physicalRemaining() int64 {
return sw.fw.physicalRemaining()
}

View File

@@ -8,13 +8,11 @@ import (
"bytes"
"encoding/hex"
"errors"
"internal/obscuretestdata"
"io"
"io/fs"
"maps"
"os"
"path"
"slices"
"reflect"
"sort"
"strings"
"testing"
@@ -75,9 +73,8 @@ func TestWriter(t *testing.T) {
)
vectors := []struct {
file string // Optional filename of expected output
obscured bool // Whether file is obscured
tests []testFnc
file string // Optional filename of expected output
tests []testFnc
}{{
// The writer test file was produced with this command:
// tar (GNU tar) 1.26
@@ -153,8 +150,7 @@ func TestWriter(t *testing.T) {
// bsdtar -xvf writer-big-long.tar
//
// This file is in PAX format.
file: "testdata/writer-big-long.tar.base64",
obscured: true,
file: "testdata/writer-big-long.tar",
tests: []testFnc{
testHeader{Header{
Typeflag: TypeReg,
@@ -396,8 +392,7 @@ func TestWriter(t *testing.T) {
testClose{},
},
}, {
file: "testdata/gnu-sparse-big.tar.base64",
obscured: true,
file: "testdata/gnu-sparse-big.tar",
tests: []testFnc{
testHeader{Header{
Typeflag: TypeGNUSparse,
@@ -429,8 +424,7 @@ func TestWriter(t *testing.T) {
testClose{nil},
},
}, {
file: "testdata/pax-sparse-big.tar.base64",
obscured: true,
file: "testdata/pax-sparse-big.tar",
tests: []testFnc{
testHeader{Header{
Typeflag: TypeReg,
@@ -488,7 +482,7 @@ func TestWriter(t *testing.T) {
return x == y
}
for _, v := range vectors {
t.Run(strings.TrimSuffix(path.Base(v.file), ".base64"), func(t *testing.T) {
t.Run(path.Base(v.file), func(t *testing.T) {
const maxSize = 10 << 10 // 10KiB
buf := new(bytes.Buffer)
tw := NewWriter(iotest.TruncateWriter(buf, maxSize))
@@ -527,16 +521,7 @@ func TestWriter(t *testing.T) {
}
if v.file != "" {
path := v.file
if v.obscured {
tf, err := obscuretestdata.DecodeToTempFile(path)
if err != nil {
t.Fatalf("obscuretestdata.DecodeToTempFile(%s): %v", path, err)
}
path = tf
}
want, err := os.ReadFile(path)
want, err := os.ReadFile(v.file)
if err != nil {
t.Fatalf("ReadFile() = %v, want nil", err)
}
@@ -596,10 +581,10 @@ func TestPaxSymlink(t *testing.T) {
t.Fatal(err)
}
hdr, err := FileInfoHeader(fileinfo, "")
hdr.Typeflag = TypeSymlink
if err != nil {
t.Fatalf("os.Stat:1 %v", err)
}
hdr.Typeflag = TypeSymlink
// Force a PAX long linkname to be written
longLinkname := strings.Repeat("1234567890/1234567890", 10)
hdr.Linkname = longLinkname
@@ -717,7 +702,7 @@ func TestPaxXattrs(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if !maps.Equal(hdr.Xattrs, xattrs) {
if !reflect.DeepEqual(hdr.Xattrs, xattrs) {
t.Fatalf("xattrs did not survive round trip: got %+v, want %+v",
hdr.Xattrs, xattrs)
}
@@ -764,7 +749,7 @@ func TestPaxHeadersSorted(t *testing.T) {
bytes.Index(buf.Bytes(), []byte("foo=foo")),
bytes.Index(buf.Bytes(), []byte("qux=qux")),
}
if !slices.IsSorted(indices) {
if !sort.IntsAreSorted(indices) {
t.Fatal("PAX headers are not sorted")
}
}
@@ -776,10 +761,10 @@ func TestUSTARLongName(t *testing.T) {
t.Fatal(err)
}
hdr, err := FileInfoHeader(fileinfo, "")
hdr.Typeflag = TypeDir
if err != nil {
t.Fatalf("os.Stat:1 %v", err)
}
hdr.Typeflag = TypeDir
// Force a PAX long name to be written. The name was taken from a practical example
// that fails and replaced ever char through numbers to anonymize the sample.
longName := "/0000_0000000/00000-000000000/0000_0000000/00000-0000000000000/0000_0000000/00000-0000000-00000000/0000_0000000/00000000/0000_0000000/000/0000_0000000/00000000v00/0000_0000000/000000/0000_0000000/0000000/0000_0000000/00000y-00/0000/0000/00000000/0x000000/"
@@ -1353,41 +1338,29 @@ func TestFileWriter(t *testing.T) {
func TestWriterAddFS(t *testing.T) {
fsys := fstest.MapFS{
"emptyfolder": {Mode: 0o755 | os.ModeDir},
"file.go": {Data: []byte("hello")},
"subfolder/another.go": {Data: []byte("world")},
"symlink.go": {Mode: 0o777 | os.ModeSymlink, Data: []byte("file.go")},
// Notably missing here is the "subfolder" directory. This makes sure even
// if we don't have a subfolder directory listed.
}
var buf bytes.Buffer
tw := NewWriter(&buf)
if err := tw.AddFS(fsys); err != nil {
t.Fatal(err)
}
if err := tw.Close(); err != nil {
t.Fatal(err)
}
// Add subfolder into fsys to match what we'll read from the tar.
fsys["subfolder"] = &fstest.MapFile{Mode: 0o555 | os.ModeDir}
// Test that we can get the files back from the archive
tr := NewReader(&buf)
names := make([]string, 0, len(fsys))
for name := range fsys {
names = append(names, name)
entries, err := fsys.ReadDir(".")
if err != nil {
t.Fatal(err)
}
sort.Strings(names)
entriesLeft := len(fsys)
for _, name := range names {
entriesLeft--
entryInfo, err := fsys.Lstat(name)
if err != nil {
t.Fatalf("getting entry info error: %v", err)
var curfname string
for _, entry := range entries {
curfname = entry.Name()
if entry.IsDir() {
curfname += "/"
continue
}
hdr, err := tr.Next()
if err == io.EOF {
@@ -1397,42 +1370,22 @@ func TestWriterAddFS(t *testing.T) {
t.Fatal(err)
}
tmpName := name
if entryInfo.IsDir() {
tmpName += "/"
}
if hdr.Name != tmpName {
t.Errorf("test fs has filename %v; archive header has %v",
name, hdr.Name)
data, err := io.ReadAll(tr)
if err != nil {
t.Fatal(err)
}
if entryInfo.Mode() != hdr.FileInfo().Mode() {
t.Errorf("%s: test fs has mode %v; archive header has %v",
name, entryInfo.Mode(), hdr.FileInfo().Mode())
if hdr.Name != curfname {
t.Fatalf("got filename %v, want %v",
curfname, hdr.Name)
}
switch entryInfo.Mode().Type() {
case fs.ModeDir:
// No additional checks necessary.
case fs.ModeSymlink:
origtarget := string(fsys[name].Data)
if hdr.Linkname != origtarget {
t.Fatalf("test fs has link content %s; archive header %v", origtarget, hdr.Linkname)
}
default:
data, err := io.ReadAll(tr)
if err != nil {
t.Fatal(err)
}
origdata := fsys[name].Data
if string(data) != string(origdata) {
t.Fatalf("test fs has file content %v; archive header has %v", origdata, data)
}
origdata := fsys[curfname].Data
if string(data) != string(origdata) {
t.Fatalf("got file content %v, want %v",
data, origdata)
}
}
if entriesLeft > 0 {
t.Fatalf("not all entries are in the archive")
}
}
func TestWriterAddFSNonRegularFiles(t *testing.T) {

View File

@@ -8,7 +8,6 @@ import (
"bufio"
"encoding/binary"
"errors"
"fmt"
"hash"
"hash/crc32"
"internal/godebug"
@@ -17,7 +16,7 @@ import (
"os"
"path"
"path/filepath"
"slices"
"sort"
"strings"
"sync"
"time"
@@ -700,13 +699,9 @@ func findSignatureInBlock(b []byte) int {
if b[i] == 'P' && b[i+1] == 'K' && b[i+2] == 0x05 && b[i+3] == 0x06 {
// n is length of comment
n := int(b[i+directoryEndLen-2]) | int(b[i+directoryEndLen-1])<<8
if n+directoryEndLen+i > len(b) {
// Truncated comment.
// Some parsers (such as Info-ZIP) ignore the truncated comment
// rather than treating it as a hard error.
return -1
if n+directoryEndLen+i <= len(b) {
return i
}
return i
}
}
return -1
@@ -805,9 +800,6 @@ func toValidName(name string) string {
func (r *Reader) initFileList() {
r.fileListOnce.Do(func() {
// Preallocate the minimum size of the index.
// We may also synthesize additional directory entries.
r.fileList = make([]fileListEntry, 0, len(r.File))
// files and knownDirs map from a file/directory name
// to an index into the r.fileList entry that we are
// building. They are used to mark duplicate entries.
@@ -866,19 +858,14 @@ func (r *Reader) initFileList() {
}
}
slices.SortFunc(r.fileList, func(a, b fileListEntry) int {
return fileEntryCompare(a.name, b.name)
})
sort.Slice(r.fileList, func(i, j int) bool { return fileEntryLess(r.fileList[i].name, r.fileList[j].name) })
})
}
func fileEntryCompare(x, y string) int {
func fileEntryLess(x, y string) bool {
xdir, xelem, _ := split(x)
ydir, yelem, _ := split(y)
if xdir != ydir {
return strings.Compare(xdir, ydir)
}
return strings.Compare(xelem, yelem)
return xdir < ydir || xdir == ydir && xelem < yelem
}
// Open opens the named file in the ZIP archive,
@@ -906,8 +893,14 @@ func (r *Reader) Open(name string) (fs.File, error) {
}
func split(name string) (dir, elem string, isDir bool) {
name, isDir = strings.CutSuffix(name, "/")
i := strings.LastIndexByte(name, '/')
if len(name) > 0 && name[len(name)-1] == '/' {
isDir = true
name = name[:len(name)-1]
}
i := len(name) - 1
for i >= 0 && name[i] != '/' {
i--
}
if i < 0 {
return ".", name, isDir
}
@@ -923,12 +916,9 @@ func (r *Reader) openLookup(name string) *fileListEntry {
dir, elem, _ := split(name)
files := r.fileList
i, _ := slices.BinarySearchFunc(files, dir, func(a fileListEntry, dir string) (ret int) {
idir, ielem, _ := split(a.name)
if dir != idir {
return strings.Compare(idir, dir)
}
return strings.Compare(ielem, elem)
i := sort.Search(len(files), func(i int) bool {
idir, ielem, _ := split(files[i].name)
return idir > dir || idir == dir && ielem >= elem
})
if i < len(files) {
fname := files[i].name
@@ -941,21 +931,13 @@ func (r *Reader) openLookup(name string) *fileListEntry {
func (r *Reader) openReadDir(dir string) []fileListEntry {
files := r.fileList
i, _ := slices.BinarySearchFunc(files, dir, func(a fileListEntry, dir string) int {
idir, _, _ := split(a.name)
if dir != idir {
return strings.Compare(idir, dir)
}
// find the first entry with dir
return +1
i := sort.Search(len(files), func(i int) bool {
idir, _, _ := split(files[i].name)
return idir >= dir
})
j, _ := slices.BinarySearchFunc(files, dir, func(a fileListEntry, dir string) int {
jdir, _, _ := split(a.name)
if dir != jdir {
return strings.Compare(jdir, dir)
}
// find the last entry with dir
return -1
j := sort.Search(len(files), func(j int) bool {
jdir, _, _ := split(files[j].name)
return jdir > dir
})
return files[i:j]
}
@@ -989,12 +971,6 @@ func (d *openDir) ReadDir(count int) ([]fs.DirEntry, error) {
s, err := d.files[d.offset+i].stat()
if err != nil {
return nil, err
} else if s.Name() == "." || !fs.ValidPath(s.Name()) {
return nil, &fs.PathError{
Op: "readdir",
Path: d.e.name,
Err: fmt.Errorf("invalid file name: %v", d.files[d.offset+i].name),
}
}
list[i] = s
}

View File

@@ -8,14 +8,13 @@ import (
"bytes"
"encoding/binary"
"encoding/hex"
"errors"
"internal/obscuretestdata"
"io"
"io/fs"
"os"
"path/filepath"
"reflect"
"regexp"
"slices"
"strings"
"testing"
"testing/fstest"
@@ -571,14 +570,6 @@ var tests = []ZipTest{
},
},
},
// Issue 66869: Don't skip over an EOCDR with a truncated comment.
// The test file sneakily hides a second EOCDR before the first one;
// previously we would extract one file ("file") from this archive,
// while most other tools would reject the file or extract a different one ("FILE").
{
Name: "comment-truncated.zip",
Error: ErrFormat,
},
}
func TestReader(t *testing.T) {
@@ -913,7 +904,9 @@ func returnRecursiveZip() (r io.ReaderAt, size int64) {
// type zeros struct{}
//
// func (zeros) Read(b []byte) (int, error) {
// clear(b)
// for i := range b {
// b[i] = 0
// }
// return len(b), nil
// }
//
@@ -1213,6 +1206,7 @@ func TestFS(t *testing.T) {
[]string{"a/b/c"},
},
} {
test := test
t.Run(test.file, func(t *testing.T) {
t.Parallel()
z, err := OpenReader(test.file)
@@ -1246,6 +1240,7 @@ func TestFSWalk(t *testing.T) {
wantErr: true,
},
} {
test := test
t.Run(test.file, func(t *testing.T) {
t.Parallel()
z, err := OpenReader(test.file)
@@ -1273,56 +1268,13 @@ func TestFSWalk(t *testing.T) {
} else if !test.wantErr && sawErr {
t.Error("unexpected error")
}
if test.want != nil && !slices.Equal(files, test.want) {
if test.want != nil && !reflect.DeepEqual(files, test.want) {
t.Errorf("got %v want %v", files, test.want)
}
})
}
}
func TestFSWalkBadFile(t *testing.T) {
t.Parallel()
var buf bytes.Buffer
zw := NewWriter(&buf)
hdr := &FileHeader{Name: "."}
hdr.SetMode(fs.ModeDir | 0o755)
w, err := zw.CreateHeader(hdr)
if err != nil {
t.Fatalf("create zip header: %v", err)
}
_, err = w.Write([]byte("some data"))
if err != nil {
t.Fatalf("write zip contents: %v", err)
}
err = zw.Close()
if err != nil {
t.Fatalf("close zip writer: %v", err)
}
zr, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
if err != nil {
t.Fatalf("create zip reader: %v", err)
}
var count int
var errRepeat = errors.New("repeated call to path")
err = fs.WalkDir(zr, ".", func(p string, d fs.DirEntry, err error) error {
count++
if count > 2 { // once for directory read, once for the error
return errRepeat
}
return err
})
if err == nil {
t.Fatalf("expected error from invalid file name")
} else if errors.Is(err, errRepeat) {
t.Fatal(err)
}
}
func TestFSModTime(t *testing.T) {
t.Parallel()
z, err := OpenReader("testdata/subdir.zip")
@@ -1622,7 +1574,7 @@ func TestCVE202141772(t *testing.T) {
t.Errorf("Opening %q with fs.FS API succeeded", f.Name)
}
}
if !slices.Equal(names, entryNames) {
if !reflect.DeepEqual(names, entryNames) {
t.Errorf("Unexpected file entries: %q", names)
}
if _, err := r.Open(""); err == nil {
@@ -1735,7 +1687,7 @@ func TestInsecurePaths(t *testing.T) {
for _, f := range zr.File {
gotPaths = append(gotPaths, f.Name)
}
if !slices.Equal(gotPaths, []string{path}) {
if !reflect.DeepEqual(gotPaths, []string{path}) {
t.Errorf("NewReader for archive with file %q: got files %q", path, gotPaths)
continue
}
@@ -1760,7 +1712,7 @@ func TestDisableInsecurePathCheck(t *testing.T) {
for _, f := range zr.File {
gotPaths = append(gotPaths, f.Name)
}
if want := []string{name}; !slices.Equal(gotPaths, want) {
if want := []string{name}; !reflect.DeepEqual(gotPaths, want) {
t.Errorf("NewReader with zipinsecurepath=1: got files %q, want %q", gotPaths, want)
}
}

View File

@@ -143,9 +143,9 @@ type FileHeader struct {
// Deprecated: Use CompressedSize64 instead.
CompressedSize uint32
// UncompressedSize is the uncompressed size of the file in bytes.
// UncompressedSize is the compressed size of the file in bytes.
// If either the uncompressed or compressed size of the file
// does not fit in 32 bits, UncompressedSize is set to ^uint32(0).
// does not fit in 32 bits, CompressedSize is set to ^uint32(0).
//
// Deprecated: Use UncompressedSize64 instead.
UncompressedSize uint32

Binary file not shown.

View File

@@ -213,8 +213,7 @@ func (w *Writer) Close() error {
// The name must be a relative path: it must not start with a drive
// letter (e.g. C:) or leading slash, and only forward slashes are
// allowed. To create a directory instead of a file, add a trailing
// slash to the name. Duplicate names will not overwrite previous entries
// and are appended to the zip file.
// slash to the name.
// The file's contents must be written to the [io.Writer] before the next
// call to [Writer.Create], [Writer.CreateHeader], or [Writer.Close].
func (w *Writer) Create(name string) (io.Writer, error) {
@@ -434,10 +433,6 @@ func writeHeader(w io.Writer, h *header) error {
// [Writer.CreateHeader], [Writer.CreateRaw], or [Writer.Close].
//
// In contrast to [Writer.CreateHeader], the bytes passed to Writer are not compressed.
//
// CreateRaw's argument is stored in w. If the argument is a pointer to the embedded
// [FileHeader] in a [File] obtained from a [Reader] created from in-memory data,
// then w will refer to all of that memory.
func (w *Writer) CreateRaw(fh *FileHeader) (io.Writer, error) {
if err := w.prepare(fh); err != nil {
return nil, err
@@ -476,10 +471,7 @@ func (w *Writer) Copy(f *File) error {
if err != nil {
return err
}
// Copy the FileHeader so w doesn't store a pointer to the data
// of f's entire archive. See #65499.
fh := f.FileHeader
fw, err := w.CreateRaw(&fh)
fw, err := w.CreateRaw(&f.FileHeader)
if err != nil {
return err
}
@@ -505,14 +497,14 @@ func (w *Writer) AddFS(fsys fs.FS) error {
if err != nil {
return err
}
if name == "." {
if d.IsDir() {
return nil
}
info, err := d.Info()
if err != nil {
return err
}
if !d.IsDir() && !info.Mode().IsRegular() {
if !info.Mode().IsRegular() {
return errors.New("zip: cannot add non-regular file")
}
h, err := FileInfoHeader(info)
@@ -520,17 +512,11 @@ func (w *Writer) AddFS(fsys fs.FS) error {
return err
}
h.Name = name
if d.IsDir() {
h.Name += "/"
}
h.Method = Deflate
fw, err := w.CreateHeader(h)
if err != nil {
return err
}
if d.IsDir() {
return nil
}
f, err := fsys.Open(name)
if err != nil {
return err
@@ -615,7 +601,7 @@ func (w *fileWriter) writeDataDescriptor() error {
}
// Write data descriptor. This is more complicated than one would
// think, see e.g. comments in zipfile.c:putextended() and
// https://bugs.openjdk.org/browse/JDK-7073588.
// http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=7073588.
// The approach here is to write 8 byte sizes if needed without
// adding a zip64 extra in the local header (too late anyway).
var buf []byte

View File

@@ -108,7 +108,7 @@ func TestWriter(t *testing.T) {
// TestWriterComment is test for EOCD comment read/write.
func TestWriterComment(t *testing.T) {
tests := []struct {
var tests = []struct {
comment string
ok bool
}{
@@ -158,7 +158,7 @@ func TestWriterComment(t *testing.T) {
}
func TestWriterUTF8(t *testing.T) {
utf8Tests := []struct {
var utf8Tests = []struct {
name string
comment string
nonUTF8 bool
@@ -619,32 +619,32 @@ func TestWriterAddFS(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
tests := []WriteTest{
{Name: "emptyfolder", Mode: 0o755 | os.ModeDir},
{Name: "file.go", Data: []byte("hello"), Mode: 0644},
{Name: "subfolder/another.go", Data: []byte("world"), Mode: 0644},
// Notably missing here is the "subfolder" directory. This makes sure even
// if we don't have a subfolder directory listed.
{
Name: "file.go",
Data: []byte("hello"),
Mode: 0644,
},
{
Name: "subfolder/another.go",
Data: []byte("world"),
Mode: 0644,
},
}
err := w.AddFS(writeTestsToFS(tests))
if err != nil {
t.Fatal(err)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
// Add subfolder into fsys to match what we'll read from the zip.
tests = append(tests[:2:2], WriteTest{Name: "subfolder", Mode: 0o555 | os.ModeDir}, tests[2])
// read it back
r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
if err != nil {
t.Fatal(err)
}
for i, wt := range tests {
if wt.Mode.IsDir() {
wt.Name += "/"
}
testReadFile(t, r.File[i], &wt)
}
}

View File

@@ -8,14 +8,13 @@ package zip
import (
"bytes"
"cmp"
"errors"
"fmt"
"hash"
"internal/testenv"
"io"
"runtime"
"slices"
"sort"
"strings"
"testing"
"time"
@@ -215,8 +214,9 @@ func (r *rleBuffer) ReadAt(p []byte, off int64) (n int, err error) {
if len(p) == 0 {
return
}
skipParts, _ := slices.BinarySearchFunc(r.buf, off, func(rb repeatedByte, off int64) int {
return cmp.Compare(rb.off+rb.n, off)
skipParts := sort.Search(len(r.buf), func(i int) bool {
part := &r.buf[i]
return part.off+part.n > off
})
parts := r.buf[skipParts:]
if len(parts) > 0 {
@@ -814,6 +814,8 @@ func TestSuffixSaver(t *testing.T) {
type zeros struct{}
func (zeros) Read(p []byte) (int, error) {
clear(p)
for i := range p {
p[i] = 0
}
return len(p), nil
}

View File

@@ -29,9 +29,6 @@ var (
// Buffered input.
// Reader implements buffering for an io.Reader object.
// A new Reader is created by calling [NewReader] or [NewReaderSize];
// alternatively the zero value of a Reader may be used after calling [Reset]
// on it.
type Reader struct {
buf []byte
rd io.Reader // reader provided by the client
@@ -133,10 +130,9 @@ func (b *Reader) readErr() error {
}
// Peek returns the next n bytes without advancing the reader. The bytes stop
// being valid at the next read call. If necessary, Peek will read more bytes
// into the buffer in order to make n bytes available. If Peek returns fewer
// than n bytes, it also returns an error explaining why the read is short.
// The error is [ErrBufferFull] if n is larger than b's buffer size.
// being valid at the next read call. If Peek returns fewer than n bytes, it
// also returns an error explaining why the read is short. The error is
// [ErrBufferFull] if n is larger than b's buffer size.
//
// Calling Peek prevents a [Reader.UnreadByte] or [Reader.UnreadRune] call from succeeding
// until the next read operation.
@@ -311,7 +307,10 @@ func (b *Reader) ReadRune() (r rune, size int, err error) {
if b.r == b.w {
return 0, 0, b.readErr()
}
r, size = utf8.DecodeRune(b.buf[b.r:b.w])
r, size = rune(b.buf[b.r]), 1
if r >= utf8.RuneSelf {
r, size = utf8.DecodeRune(b.buf[b.r:b.w])
}
b.r += size
b.lastByte = int(b.buf[b.r-1])
b.lastRuneSize = size
@@ -516,11 +515,9 @@ func (b *Reader) WriteTo(w io.Writer) (n int64, err error) {
b.lastByte = -1
b.lastRuneSize = -1
if b.r < b.w {
n, err = b.writeBuf(w)
if err != nil {
return
}
n, err = b.writeBuf(w)
if err != nil {
return
}
if r, ok := b.rd.(io.WriterTo); ok {

View File

@@ -9,7 +9,6 @@ import (
"bytes"
"errors"
"fmt"
"internal/asan"
"io"
"math/rand"
"strconv"
@@ -586,9 +585,6 @@ func TestWriteInvalidRune(t *testing.T) {
}
func TestReadStringAllocs(t *testing.T) {
if asan.Enabled {
t.Skip("test allocates more with -asan; see #70079")
}
r := strings.NewReader(" foo foo 42 42 42 42 42 42 42 42 4.2 4.2 4.2 4.2\n")
buf := NewReader(r)
allocs := testing.AllocsPerRun(100, func() {
@@ -640,7 +636,7 @@ func TestWriter(t *testing.T) {
for l := 0; l < len(written); l++ {
if written[l] != data[l] {
t.Errorf("wrong bytes written")
t.Errorf("want=%q", data[:len(written)])
t.Errorf("want=%q", data[0:len(written)])
t.Errorf("have=%q", written)
}
}
@@ -939,6 +935,7 @@ func (t *testReader) Read(buf []byte) (n int, err error) {
}
func testReadLine(t *testing.T, input []byte) {
//for stride := 1; stride < len(input); stride++ {
for stride := 1; stride < 2; stride++ {
done := 0
reader := testReader{input, stride}
@@ -1149,7 +1146,7 @@ func (w errorWriterToTest) Write(p []byte) (int, error) {
var errorWriterToTests = []errorWriterToTest{
{1, 0, nil, io.ErrClosedPipe, io.ErrClosedPipe},
{0, 1, io.ErrClosedPipe, nil, io.ErrClosedPipe},
{0, 0, io.ErrUnexpectedEOF, io.ErrClosedPipe, io.ErrUnexpectedEOF},
{0, 0, io.ErrUnexpectedEOF, io.ErrClosedPipe, io.ErrClosedPipe},
{0, 1, io.EOF, nil, nil},
}

View File

@@ -33,33 +33,6 @@ func ExampleWriter_AvailableBuffer() {
// Output: 1 2 3 4
}
// ExampleWriter_ReadFrom demonstrates how to use the ReadFrom method of Writer.
func ExampleWriter_ReadFrom() {
var buf bytes.Buffer
writer := bufio.NewWriter(&buf)
data := "Hello, world!\nThis is a ReadFrom example."
reader := strings.NewReader(data)
n, err := writer.ReadFrom(reader)
if err != nil {
fmt.Println("ReadFrom Error:", err)
return
}
if err = writer.Flush(); err != nil {
fmt.Println("Flush Error:", err)
return
}
fmt.Println("Bytes written:", n)
fmt.Println("Buffer contents:", buf.String())
// Output:
// Bytes written: 41
// Buffer contents: Hello, world!
// This is a ReadFrom example.
}
// The simplest use of a Scanner, to read standard input as a set of lines.
func ExampleScanner_lines() {
scanner := bufio.NewScanner(os.Stdin)

View File

@@ -1,96 +0,0 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build unix
package bufio_test
import (
"bufio"
"io"
"net"
"path/filepath"
"strings"
"sync"
"testing"
)
// TestCopyUnixpacket tests that we can use bufio when copying
// across a unixpacket socket. This used to fail due to an unnecessary
// empty Write call that was interpreted as an EOF.
func TestCopyUnixpacket(t *testing.T) {
tmpDir := t.TempDir()
socket := filepath.Join(tmpDir, "unixsock")
// Start a unixpacket server.
addr := &net.UnixAddr{
Name: socket,
Net: "unixpacket",
}
server, err := net.ListenUnix("unixpacket", addr)
if err != nil {
t.Skipf("skipping test because opening a unixpacket socket failed: %v", err)
}
// Start a goroutine for the server to accept one connection
// and read all the data sent on the connection,
// reporting the number of bytes read on ch.
ch := make(chan int, 1)
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
tot := 0
defer func() {
ch <- tot
}()
serverConn, err := server.Accept()
if err != nil {
t.Error(err)
return
}
buf := make([]byte, 1024)
for {
n, err := serverConn.Read(buf)
tot += n
if err == io.EOF {
return
}
if err != nil {
t.Error(err)
return
}
}
}()
clientConn, err := net.DialUnix("unixpacket", nil, addr)
if err != nil {
// Leaves the server goroutine hanging. Oh well.
t.Fatal(err)
}
defer wg.Wait()
defer clientConn.Close()
const data = "data"
r := bufio.NewReader(strings.NewReader(data))
n, err := io.Copy(clientConn, r)
if err != nil {
t.Fatal(err)
}
if n != int64(len(data)) {
t.Errorf("io.Copy returned %d, want %d", n, len(data))
}
clientConn.Close()
tot := <-ch
if tot != len(data) {
t.Errorf("server read %d, want %d", tot, len(data))
}
}

View File

@@ -260,11 +260,8 @@ func (s *Scanner) setErr(err error) {
}
}
// Buffer controls memory allocation by the Scanner.
// It sets the initial buffer to use when scanning
// Buffer sets the initial buffer to use when scanning
// and the maximum size of buffer that may be allocated during scanning.
// The contents of the buffer are ignored.
//
// The maximum token size must be less than the larger of max and cap(buf).
// If max <= cap(buf), [Scanner.Scan] will use this buffer only and do no allocation.
//

View File

@@ -41,7 +41,7 @@ GOROOT="$(cd .. && pwd)"
gettargets() {
../bin/go tool dist list | sed -e 's|/|-|' |
grep -E -v '^(android|ios)' # need C toolchain even for cross-compiling
egrep -v '^(android|ios)' # need C toolchain even for cross-compiling
echo linux-arm-arm5
}

View File

@@ -53,10 +53,10 @@ type int32 int32
// Range: -9223372036854775808 through 9223372036854775807.
type int64 int64
// float32 is the set of all IEEE 754 32-bit floating-point numbers.
// float32 is the set of all IEEE-754 32-bit floating-point numbers.
type float32 float32
// float64 is the set of all IEEE 754 64-bit floating-point numbers.
// float64 is the set of all IEEE-754 64-bit floating-point numbers.
type float64 float64
// complex64 is the set of all complex numbers with float32 real and
@@ -162,12 +162,12 @@ func delete(m map[Type]Type1, key Type)
// The len built-in function returns the length of v, according to its type:
//
// - Array: the number of elements in v.
// - Pointer to array: the number of elements in *v (even if v is nil).
// - Slice, or map: the number of elements in v; if v is nil, len(v) is zero.
// - String: the number of bytes in v.
// - Channel: the number of elements queued (unread) in the channel buffer;
// if v is nil, len(v) is zero.
// Array: the number of elements in v.
// Pointer to array: the number of elements in *v (even if v is nil).
// Slice, or map: the number of elements in v; if v is nil, len(v) is zero.
// String: the number of bytes in v.
// Channel: the number of elements queued (unread) in the channel buffer;
// if v is nil, len(v) is zero.
//
// For some arguments, such as a string literal or a simple array expression, the
// result can be a constant. See the Go language specification's "Length and
@@ -176,12 +176,12 @@ func len(v Type) int
// The cap built-in function returns the capacity of v, according to its type:
//
// - Array: the number of elements in v (same as len(v)).
// - Pointer to array: the number of elements in *v (same as len(v)).
// - Slice: the maximum length the slice can reach when resliced;
// if v is nil, cap(v) is zero.
// - Channel: the channel buffer capacity, in units of elements;
// if v is nil, cap(v) is zero.
// Array: the number of elements in v (same as len(v)).
// Pointer to array: the number of elements in *v (same as len(v)).
// Slice: the maximum length the slice can reach when resliced;
// if v is nil, cap(v) is zero.
// Channel: the channel buffer capacity, in units of elements;
// if v is nil, cap(v) is zero.
//
// For some arguments, such as a simple array expression, the result can be a
// constant. See the Go language specification's "Length and capacity" section for
@@ -194,18 +194,18 @@ func cap(v Type) int
// argument, not a pointer to it. The specification of the result depends on
// the type:
//
// - Slice: The size specifies the length. The capacity of the slice is
// equal to its length. A second integer argument may be provided to
// specify a different capacity; it must be no smaller than the
// length. For example, make([]int, 0, 10) allocates an underlying array
// of size 10 and returns a slice of length 0 and capacity 10 that is
// backed by this underlying array.
// - Map: An empty map is allocated with enough space to hold the
// specified number of elements. The size may be omitted, in which case
// a small starting size is allocated.
// - Channel: The channel's buffer is initialized with the specified
// buffer capacity. If zero, or the size is omitted, the channel is
// unbuffered.
// Slice: The size specifies the length. The capacity of the slice is
// equal to its length. A second integer argument may be provided to
// specify a different capacity; it must be no smaller than the
// length. For example, make([]int, 0, 10) allocates an underlying array
// of size 10 and returns a slice of length 0 and capacity 10 that is
// backed by this underlying array.
// Map: An empty map is allocated with enough space to hold the
// specified number of elements. The size may be omitted, in which case
// a small starting size is allocated.
// Channel: The channel's buffer is initialized with the specified
// buffer capacity. If zero, or the size is omitted, the channel is
// unbuffered.
func make(t Type, size ...IntegerType) Type
// The max built-in function returns the largest value of a fixed number of
@@ -247,7 +247,7 @@ func imag(c ComplexType) FloatType
// to the zero value of the respective element type. If the argument
// type is a type parameter, the type parameter's type set must
// contain only map or slice types, and clear performs the operation
// implied by the type argument. If t is nil, clear is a no-op.
// implied by the type argument.
func clear[T ~[]Type | ~map[Type]Type1](t T)
// The close built-in function closes a channel, which must be either

View File

@@ -21,12 +21,6 @@ type Buffer struct {
buf []byte // contents are the bytes buf[off : len(buf)]
off int // read at &buf[off], write at &buf[len(buf)]
lastRead readOp // last read operation, so that Unread* can work correctly.
// Copying and modifying a non-zero Buffer is prone to error,
// but we cannot employ the noCopy trick used by WaitGroup and Mutex,
// which causes vet's copylocks checker to report misuse, as vet
// cannot reliably distinguish the zero and non-zero cases.
// See #26462, #25907, #47276, #48398 for history.
}
// The readOp constants describe the last action performed on
@@ -68,7 +62,7 @@ func (b *Buffer) AvailableBuffer() []byte { return b.buf[len(b.buf):] }
// String returns the contents of the unread portion of the buffer
// as a string. If the [Buffer] is a nil pointer, it returns "<nil>".
//
// To build strings more efficiently, see the [strings.Builder] type.
// To build strings more efficiently, see the strings.Builder type.
func (b *Buffer) String() string {
if b == nil {
// Special case, useful in debugging.
@@ -77,18 +71,6 @@ func (b *Buffer) String() string {
return string(b.buf[b.off:])
}
// Peek returns the next n bytes without advancing the buffer.
// If Peek returns fewer than n bytes, it also returns [io.EOF].
// The slice is only valid until the next call to a read or write method.
// The slice aliases the buffer content at least until the next buffer modification,
// so immediate changes to the slice will affect the result of future reads.
func (b *Buffer) Peek(n int) ([]byte, error) {
if b.Len() < n {
return b.buf[b.off:], io.EOF
}
return b.buf[b.off : b.off+n], nil
}
// empty reports whether the unread portion of the buffer is empty.
func (b *Buffer) empty() bool { return len(b.buf) <= b.off }
@@ -211,9 +193,9 @@ func (b *Buffer) WriteString(s string) (n int, err error) {
return copy(b.buf[m:], s), nil
}
// MinRead is the minimum slice size passed to a [Buffer.Read] call by
// MinRead is the minimum slice size passed to a Read call by
// [Buffer.ReadFrom]. As long as the [Buffer] has at least MinRead bytes beyond
// what is required to hold the contents of r, [Buffer.ReadFrom] will not grow the
// what is required to hold the contents of r, ReadFrom will not grow the
// underlying buffer.
const MinRead = 512
@@ -265,13 +247,13 @@ func growSlice(b []byte, n int) []byte {
c = 2 * cap(b)
}
b2 := append([]byte(nil), make([]byte, c)...)
i := copy(b2, b)
return b2[:i]
copy(b2, b)
return b2[:len(b)]
}
// WriteTo writes data to w until the buffer is drained or an error occurs.
// The return value n is the number of bytes written; it always fits into an
// int, but it is int64 to match the [io.WriterTo] interface. Any error
// int, but it is int64 to match the io.WriterTo interface. Any error
// encountered during the write is also returned.
func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {
b.lastRead = opInvalid
@@ -331,7 +313,7 @@ func (b *Buffer) WriteRune(r rune) (n int, err error) {
// Read reads the next len(p) bytes from the buffer or until the buffer
// is drained. The return value n is the number of bytes read. If the
// buffer has no data to return, err is [io.EOF] (unless len(p) is zero);
// buffer has no data to return, err is io.EOF (unless len(p) is zero);
// otherwise it is nil.
func (b *Buffer) Read(p []byte) (n int, err error) {
b.lastRead = opInvalid
@@ -370,7 +352,7 @@ func (b *Buffer) Next(n int) []byte {
}
// ReadByte reads and returns the next byte from the buffer.
// If no byte is available, it returns error [io.EOF].
// If no byte is available, it returns error io.EOF.
func (b *Buffer) ReadByte() (byte, error) {
if b.empty() {
// Buffer is empty, reset to recover space.
@@ -442,7 +424,7 @@ func (b *Buffer) UnreadByte() error {
// ReadBytes reads until the first occurrence of delim in the input,
// returning a slice containing the data up to and including the delimiter.
// If ReadBytes encounters an error before finding a delimiter,
// it returns the data read before the error and the error itself (often [io.EOF]).
// it returns the data read before the error and the error itself (often io.EOF).
// ReadBytes returns err != nil if and only if the returned data does not end in
// delim.
func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) {
@@ -470,7 +452,7 @@ func (b *Buffer) readSlice(delim byte) (line []byte, err error) {
// ReadString reads until the first occurrence of delim in the input,
// returning a string containing the data up to and including the delimiter.
// If ReadString encounters an error before finding a delimiter,
// it returns the data read before the error and the error itself (often [io.EOF]).
// it returns the data read before the error and the error itself (often io.EOF).
// ReadString returns err != nil if and only if the returned data does not end
// in delim.
func (b *Buffer) ReadString(delim byte) (line string, err error) {

View File

@@ -7,7 +7,6 @@ package bytes_test
import (
. "bytes"
"fmt"
"internal/testenv"
"io"
"math/rand"
"strconv"
@@ -95,22 +94,6 @@ func TestNewBuffer(t *testing.T) {
check(t, "NewBuffer", buf, testString)
}
var buf Buffer
// Calling NewBuffer and immediately shallow copying the Buffer struct
// should not result in any allocations.
// This can be used to reset the underlying []byte of an existing Buffer.
func TestNewBufferShallow(t *testing.T) {
testenv.SkipIfOptimizationOff(t)
n := testing.AllocsPerRun(1000, func() {
buf = *NewBuffer(testBytes)
})
if n > 0 {
t.Errorf("allocations occurred while shallow copying")
}
check(t, "NewBuffer", &buf, testString)
}
func TestNewBufferString(t *testing.T) {
buf := NewBufferString(testString)
check(t, "NewBufferString", buf, testString)
@@ -213,7 +196,7 @@ func TestLargeByteWrites(t *testing.T) {
func TestLargeStringReads(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
s := fillString(t, "TestLargeReads (1)", &buf, "", 5, testString[:len(testString)/i])
s := fillString(t, "TestLargeReads (1)", &buf, "", 5, testString[0:len(testString)/i])
empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(testString)))
}
check(t, "TestLargeStringReads (3)", &buf, "")
@@ -222,7 +205,7 @@ func TestLargeStringReads(t *testing.T) {
func TestLargeByteReads(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[:len(testBytes)/i])
s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(testString)))
}
check(t, "TestLargeByteReads (3)", &buf, "")
@@ -274,7 +257,7 @@ func TestNil(t *testing.T) {
func TestReadFrom(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[:len(testBytes)/i])
s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
var b Buffer
b.ReadFrom(&buf)
empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(testString)))
@@ -337,7 +320,7 @@ func TestReadFromNegativeReader(t *testing.T) {
func TestWriteTo(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[:len(testBytes)/i])
s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
var b Buffer
buf.WriteTo(&b)
empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(testString)))
@@ -354,7 +337,7 @@ func TestWriteAppend(t *testing.T) {
got.Write(b)
}
if !Equal(got.Bytes(), want) {
t.Fatalf("Bytes() = %q, want %q", &got, want)
t.Fatalf("Bytes() = %q, want %q", got, want)
}
// With a sufficiently sized buffer, there should be no allocations.
@@ -531,40 +514,6 @@ func TestReadString(t *testing.T) {
}
}
var peekTests = []struct {
buffer string
skip int
n int
expected string
err error
}{
{"", 0, 0, "", nil},
{"aaa", 0, 3, "aaa", nil},
{"foobar", 0, 2, "fo", nil},
{"a", 0, 2, "a", io.EOF},
{"helloworld", 4, 3, "owo", nil},
{"helloworld", 5, 5, "world", nil},
{"helloworld", 5, 6, "world", io.EOF},
{"helloworld", 10, 1, "", io.EOF},
}
func TestPeek(t *testing.T) {
for _, test := range peekTests {
buf := NewBufferString(test.buffer)
buf.Next(test.skip)
bytes, err := buf.Peek(test.n)
if string(bytes) != test.expected {
t.Errorf("expected %q, got %q", test.expected, bytes)
}
if err != test.err {
t.Errorf("expected error %v, got %v", test.err, err)
}
if buf.Len() != len(test.buffer)-test.skip {
t.Errorf("bad length after peek: %d, want %d", buf.Len(), len(test.buffer)-test.skip)
}
}
}
func BenchmarkReadString(b *testing.B) {
const n = 32 << 10

View File

@@ -8,10 +8,8 @@ package bytes
import (
"internal/bytealg"
"math/bits"
"unicode"
"unicode/utf8"
_ "unsafe" // for linkname
)
// Equal reports whether a and b
@@ -134,10 +132,9 @@ func LastIndexByte(s []byte, c byte) int {
// IndexRune interprets s as a sequence of UTF-8-encoded code points.
// It returns the byte index of the first occurrence in s of the given rune.
// It returns -1 if rune is not present in s.
// If r is [utf8.RuneError], it returns the first instance of any
// If r is utf8.RuneError, it returns the first instance of any
// invalid UTF-8 byte sequence.
func IndexRune(s []byte, r rune) int {
const haveFastIndex = bytealg.MaxBruteForce > 0
switch {
case 0 <= r && r < utf8.RuneSelf:
return IndexByte(s, byte(r))
@@ -153,64 +150,9 @@ func IndexRune(s []byte, r rune) int {
case !utf8.ValidRune(r):
return -1
default:
// Search for rune r using the last byte of its UTF-8 encoded form.
// The distribution of the last byte is more uniform compared to the
// first byte which has a 78% chance of being [240, 243, 244].
var b [utf8.UTFMax]byte
n := utf8.EncodeRune(b[:], r)
last := n - 1
i := last
fails := 0
for i < len(s) {
if s[i] != b[last] {
o := IndexByte(s[i+1:], b[last])
if o < 0 {
return -1
}
i += o + 1
}
// Step backwards comparing bytes.
for j := 1; j < n; j++ {
if s[i-j] != b[last-j] {
goto next
}
}
return i - last
next:
fails++
i++
if (haveFastIndex && fails > bytealg.Cutover(i)) && i < len(s) ||
(!haveFastIndex && fails >= 4+i>>4 && i < len(s)) {
goto fallback
}
}
return -1
fallback:
// Switch to bytealg.Index, if available, or a brute force search when
// IndexByte returns too many false positives.
if haveFastIndex {
if j := bytealg.Index(s[i-last:], b[:n]); j >= 0 {
return i + j - last
}
} else {
// If bytealg.Index is not available a brute force search is
// ~1.5-3x faster than Rabin-Karp since n is small.
c0 := b[last]
c1 := b[last-1] // There are at least 2 chars to match
loop:
for ; i < len(s); i++ {
if s[i] == c0 && s[i-1] == c1 {
for k := 2; k < n; k++ {
if s[i-k] != b[last-k] {
continue loop
}
}
return i - last
}
}
}
return -1
return Index(s, b[:n])
}
}
@@ -412,20 +354,22 @@ func genSplit(s, sep []byte, sepSave, n int) [][]byte {
// the subslices between those separators.
// If sep is empty, SplitN splits after each UTF-8 sequence.
// The count determines the number of subslices to return:
// - n > 0: at most n subslices; the last subslice will be the unsplit remainder;
// - n == 0: the result is nil (zero subslices);
// - n < 0: all subslices.
//
// To split around the first instance of a separator, see [Cut].
// n > 0: at most n subslices; the last subslice will be the unsplit remainder.
// n == 0: the result is nil (zero subslices)
// n < 0: all subslices
//
// To split around the first instance of a separator, see Cut.
func SplitN(s, sep []byte, n int) [][]byte { return genSplit(s, sep, 0, n) }
// SplitAfterN slices s into subslices after each instance of sep and
// returns a slice of those subslices.
// If sep is empty, SplitAfterN splits after each UTF-8 sequence.
// The count determines the number of subslices to return:
// - n > 0: at most n subslices; the last subslice will be the unsplit remainder;
// - n == 0: the result is nil (zero subslices);
// - n < 0: all subslices.
//
// n > 0: at most n subslices; the last subslice will be the unsplit remainder.
// n == 0: the result is nil (zero subslices)
// n < 0: all subslices
func SplitAfterN(s, sep []byte, n int) [][]byte {
return genSplit(s, sep, len(sep), n)
}
@@ -435,7 +379,7 @@ func SplitAfterN(s, sep []byte, n int) [][]byte {
// If sep is empty, Split splits after each UTF-8 sequence.
// It is equivalent to SplitN with a count of -1.
//
// To split around the first instance of a separator, see [Cut].
// To split around the first instance of a separator, see Cut.
func Split(s, sep []byte) [][]byte { return genSplit(s, sep, 0, -1) }
// SplitAfter slices s into all subslices after each instance of sep and
@@ -450,10 +394,8 @@ var asciiSpace = [256]uint8{'\t': 1, '\n': 1, '\v': 1, '\f': 1, '\r': 1, ' ': 1}
// Fields interprets s as a sequence of UTF-8-encoded code points.
// It splits the slice s around each instance of one or more consecutive white space
// characters, as defined by [unicode.IsSpace], returning a slice of subslices of s or an
// empty slice if s contains only white space. Every element of the returned slice is
// non-empty. Unlike [Split], leading and trailing runs of white space characters
// are discarded.
// characters, as defined by unicode.IsSpace, returning a slice of subslices of s or an
// empty slice if s contains only white space.
func Fields(s []byte) [][]byte {
// First count the fields.
// This is an exact count if s is ASCII, otherwise it is an approximation.
@@ -507,9 +449,7 @@ func Fields(s []byte) [][]byte {
// FieldsFunc interprets s as a sequence of UTF-8-encoded code points.
// It splits the slice s at each run of code points c satisfying f(c) and
// returns a slice of subslices of s. If all code points in s satisfy f(c), or
// len(s) == 0, an empty slice is returned. Every element of the returned slice is
// non-empty. Unlike [Split], leading and trailing runs of code points
// satisfying f(c) are discarded.
// len(s) == 0, an empty slice is returned.
//
// FieldsFunc makes no guarantees about the order in which it calls f(c)
// and assumes that f always returns the same value for a given c.
@@ -528,7 +468,11 @@ func FieldsFunc(s []byte, f func(rune) bool) [][]byte {
// more efficient, possibly due to cache effects.
start := -1 // valid span start if >= 0
for i := 0; i < len(s); {
r, size := utf8.DecodeRune(s[i:])
size := 1
r := rune(s[i])
if r >= utf8.RuneSelf {
r, size = utf8.DecodeRune(s[i:])
}
if f(r) {
if start >= 0 {
spans = append(spans, span{start, i})
@@ -581,7 +525,7 @@ func Join(s [][]byte, sep []byte) []byte {
n += len(v)
}
b := bytealg.MakeNoZero(n)[:n:n]
b := bytealg.MakeNoZero(n)
bp := copy(b, s[0])
for _, v := range s[1:] {
bp += copy(b[bp:], sep)
@@ -592,7 +536,7 @@ func Join(s [][]byte, sep []byte) []byte {
// HasPrefix reports whether the byte slice s begins with prefix.
func HasPrefix(s, prefix []byte) bool {
return len(s) >= len(prefix) && Equal(s[:len(prefix)], prefix)
return len(s) >= len(prefix) && Equal(s[0:len(prefix)], prefix)
}
// HasSuffix reports whether the byte slice s ends with suffix.
@@ -610,7 +554,11 @@ func Map(mapping func(r rune) rune, s []byte) []byte {
// fine. It could also shrink but that falls out naturally.
b := make([]byte, 0, len(s))
for i := 0; i < len(s); {
r, wid := utf8.DecodeRune(s[i:])
wid := 1
r := rune(s[i])
if r >= utf8.RuneSelf {
r, wid = utf8.DecodeRune(s[i:])
}
r = mapping(r)
if r >= 0 {
b = utf8.AppendRune(b, r)
@@ -620,18 +568,6 @@ func Map(mapping func(r rune) rune, s []byte) []byte {
return b
}
// Despite being an exported symbol,
// Repeat is linknamed by widely used packages.
// Notable members of the hall of shame include:
// - gitee.com/quant1x/num
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
// Note that this comment is not part of the doc comment.
//
//go:linkname Repeat
// Repeat returns a new byte slice consisting of count copies of b.
//
// It panics if count is negative or if the result of (len(b) * count)
@@ -647,11 +583,10 @@ func Repeat(b []byte, count int) []byte {
if count < 0 {
panic("bytes: negative Repeat count")
}
hi, lo := bits.Mul(uint(len(b)), uint(count))
if hi > 0 || lo > uint(maxInt) {
if len(b) >= maxInt/count {
panic("bytes: Repeat output length overflow")
}
n := int(lo) // lo = len(b) * count
n := len(b) * count
if len(b) == 0 {
return []byte{}
@@ -675,10 +610,13 @@ func Repeat(b []byte, count int) []byte {
chunkMax = len(b)
}
}
nb := bytealg.MakeNoZero(n)[:n:n]
nb := bytealg.MakeNoZero(n)
bp := copy(nb, b)
for bp < n {
chunk := min(bp, chunkMax)
chunk := bp
if chunk > chunkMax {
chunk = chunkMax
}
bp += copy(nb[bp:], nb[:chunk])
}
return nb
@@ -702,7 +640,7 @@ func ToUpper(s []byte) []byte {
// Just return a copy.
return append([]byte(""), s...)
}
b := bytealg.MakeNoZero(len(s))[:len(s):len(s)]
b := bytealg.MakeNoZero(len(s))
for i := 0; i < len(s); i++ {
c := s[i]
if 'a' <= c && c <= 'z' {
@@ -732,7 +670,7 @@ func ToLower(s []byte) []byte {
if !hasUpper {
return append([]byte(""), s...)
}
b := bytealg.MakeNoZero(len(s))[:len(s):len(s)]
b := bytealg.MakeNoZero(len(s))
for i := 0; i < len(s); i++ {
c := s[i]
if 'A' <= c && c <= 'Z' {
@@ -909,7 +847,11 @@ func LastIndexFunc(s []byte, f func(r rune) bool) int {
func indexFunc(s []byte, f func(r rune) bool, truth bool) int {
start := 0
for start < len(s) {
r, wid := utf8.DecodeRune(s[start:])
wid := 1
r := rune(s[start])
if r >= utf8.RuneSelf {
r, wid = utf8.DecodeRune(s[start:])
}
if f(r) == truth {
return start
}
@@ -1040,7 +982,10 @@ func trimLeftASCII(s []byte, as *asciiSet) []byte {
func trimLeftUnicode(s []byte, cutset string) []byte {
for len(s) > 0 {
r, n := utf8.DecodeRune(s)
r, n := rune(s[0]), 1
if r >= utf8.RuneSelf {
r, n = utf8.DecodeRune(s)
}
if !containsRune(cutset, r) {
break
}
@@ -1102,34 +1047,41 @@ func trimRightUnicode(s []byte, cutset string) []byte {
// TrimSpace returns a subslice of s by slicing off all leading and
// trailing white space, as defined by Unicode.
func TrimSpace(s []byte) []byte {
// Fast path for ASCII: look for the first ASCII non-space byte.
for lo, c := range s {
// Fast path for ASCII: look for the first ASCII non-space byte
start := 0
for ; start < len(s); start++ {
c := s[start]
if c >= utf8.RuneSelf {
// If we run into a non-ASCII byte, fall back to the
// slower unicode-aware method on the remaining bytes.
return TrimFunc(s[lo:], unicode.IsSpace)
// slower unicode-aware method on the remaining bytes
return TrimFunc(s[start:], unicode.IsSpace)
}
if asciiSpace[c] != 0 {
continue
}
s = s[lo:]
// Now look for the first ASCII non-space byte from the end.
for hi := len(s) - 1; hi >= 0; hi-- {
c := s[hi]
if c >= utf8.RuneSelf {
return TrimFunc(s[:hi+1], unicode.IsSpace)
}
if asciiSpace[c] == 0 {
// At this point, s[:hi+1] starts and ends with ASCII
// non-space bytes, so we're done. Non-ASCII cases have
// already been handled above.
return s[:hi+1]
}
if asciiSpace[c] == 0 {
break
}
}
// Special case to preserve previous TrimLeftFunc behavior,
// returning nil instead of empty slice if all spaces.
return nil
// Now look for the first ASCII non-space byte from the end
stop := len(s)
for ; stop > start; stop-- {
c := s[stop-1]
if c >= utf8.RuneSelf {
return TrimFunc(s[start:stop], unicode.IsSpace)
}
if asciiSpace[c] == 0 {
break
}
}
// At this point s[start:stop] starts and ends with an ASCII
// non-space bytes, so we're done. Non-ASCII cases have already
// been handled above.
if start == stop {
// Special case to preserve previous TrimLeftFunc behavior,
// returning nil instead of empty slice if all spaces.
return nil
}
return s[start:stop]
}
// Runes interprets s as a sequence of UTF-8-encoded code points.
@@ -1170,22 +1122,19 @@ func Replace(s, old, new []byte, n int) []byte {
t := make([]byte, len(s)+n*(len(new)-len(old)))
w := 0
start := 0
if len(old) > 0 {
for range n {
j := start + Index(s[start:], old)
w += copy(t[w:], s[start:j])
w += copy(t[w:], new)
start = j + len(old)
for i := 0; i < n; i++ {
j := start
if len(old) == 0 {
if i > 0 {
_, wid := utf8.DecodeRune(s[start:])
j += wid
}
} else {
j += Index(s[start:], old)
}
} else { // len(old) == 0
w += copy(t[w:], s[start:j])
w += copy(t[w:], new)
for range n - 1 {
_, wid := utf8.DecodeRune(s[start:])
j := start + wid
w += copy(t[w:], s[start:j])
w += copy(t[w:], new)
start = j
}
start = j + len(old)
}
w += copy(t[w:], s[start:])
return t[0:w]
@@ -1206,7 +1155,7 @@ func ReplaceAll(s, old, new []byte) []byte {
func EqualFold(s, t []byte) bool {
// ASCII fast path
i := 0
for n := min(len(s), len(t)); i < n; i++ {
for ; i < len(s) && i < len(t); i++ {
sr := s[i]
tr := t[i]
if sr|tr >= utf8.RuneSelf {
@@ -1236,10 +1185,19 @@ hasUnicode:
t = t[i:]
for len(s) != 0 && len(t) != 0 {
// Extract first rune from each.
sr, size := utf8.DecodeRune(s)
s = s[size:]
tr, size := utf8.DecodeRune(t)
t = t[size:]
var sr, tr rune
if s[0] < utf8.RuneSelf {
sr, s = rune(s[0]), s[1:]
} else {
r, size := utf8.DecodeRune(s)
sr, s = r, s[size:]
}
if t[0] < utf8.RuneSelf {
tr, t = rune(t[0]), t[1:]
} else {
r, size := utf8.DecodeRune(t)
tr, t = r, t[size:]
}
// If they match, keep going; if not, return false.

View File

@@ -1,21 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build js && wasm
package bytes_test
import (
"bytes"
"testing"
)
func TestIssue65571(t *testing.T) {
b := make([]byte, 1<<31+1)
b[1<<31] = 1
i := bytes.IndexByte(b, 1)
if i != 1<<31 {
t.Errorf("IndexByte(b, 1) = %d; want %d", i, 1<<31)
}
}

View File

@@ -7,12 +7,10 @@ package bytes_test
import (
. "bytes"
"fmt"
"internal/asan"
"internal/testenv"
"iter"
"math"
"math/rand"
"slices"
"reflect"
"strings"
"testing"
"unicode"
@@ -20,6 +18,18 @@ import (
"unsafe"
)
func eq(a, b []string) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if a[i] != b[i] {
return false
}
}
return true
}
func sliceOfString(s [][]byte) []string {
result := make([]string, len(s))
for i, v := range s {
@@ -28,37 +38,6 @@ func sliceOfString(s [][]byte) []string {
return result
}
func collect(t *testing.T, seq iter.Seq[[]byte]) [][]byte {
out := slices.Collect(seq)
out1 := slices.Collect(seq)
if !slices.Equal(sliceOfString(out), sliceOfString(out1)) {
t.Fatalf("inconsistent seq:\n%s\n%s", out, out1)
}
return out
}
type LinesTest struct {
a string
b []string
}
var linesTests = []LinesTest{
{a: "abc\nabc\n", b: []string{"abc\n", "abc\n"}},
{a: "abc\r\nabc", b: []string{"abc\r\n", "abc"}},
{a: "abc\r\n", b: []string{"abc\r\n"}},
{a: "\nabc", b: []string{"\n", "abc"}},
{a: "\nabc\n\n", b: []string{"\n", "abc\n", "\n"}},
}
func TestLines(t *testing.T) {
for _, s := range linesTests {
result := sliceOfString(slices.Collect(Lines([]byte(s.a))))
if !slices.Equal(result, s.b) {
t.Errorf(`slices.Collect(Lines(%q)) = %q; want %q`, s.a, result, s.b)
}
}
}
// For ease of reading, the test cases use strings that are converted to byte
// slices before invoking the functions.
@@ -198,11 +177,6 @@ var indexTests = []BinOpTest{
{"oxoxoxoxoxoxoxoxoxoxoxox", "oy", -1},
// test fallback to Rabin-Karp.
{"000000000000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000000001", 5},
// test fallback to IndexRune
{"oxoxoxoxoxoxoxoxoxoxox☺", "☺", 22},
// invalid UTF-8 byte sequence (must be longer than bytealg.MaxBruteForce to
// test that we don't use IndexRune)
{"xx0123456789012345678901234567890123456789012345678901234567890120123456789012345678901234567890123456xxx\xed\x9f\xc0", "\xed\x9f\xc0", 105},
}
var lastIndexTests = []BinOpTest{
@@ -451,31 +425,6 @@ func TestIndexRune(t *testing.T) {
{"some_text=some_value", '=', 9},
{"☺a", 'a', 3},
{"a☻☺b", '☺', 4},
{"𠀳𠀗𠀾𠁄𠀧𠁆𠁂𠀫𠀖𠀪𠀲𠀴𠁀𠀨𠀿", '𠀿', 56},
// 2 bytes
{"ӆ", 'ӆ', 0},
{"a", 'ӆ', -1},
{" ӆ", 'ӆ', 2},
{" a", 'ӆ', -1},
{strings.Repeat("ц", 64) + "ӆ", 'ӆ', 128}, // test cutover
{strings.Repeat("ц", 64), 'ӆ', -1},
// 3 bytes
{"Ꚁ", 'Ꚁ', 0},
{"a", 'Ꚁ', -1},
{" Ꚁ", 'Ꚁ', 2},
{" a", 'Ꚁ', -1},
{strings.Repeat("Ꙁ", 64) + "Ꚁ", 'Ꚁ', 192}, // test cutover
{strings.Repeat("Ꙁ", 64) + "Ꚁ", '䚀', -1}, // 'Ꚁ' and '䚀' share the same last two bytes
// 4 bytes
{"𡌀", '𡌀', 0},
{"a", '𡌀', -1},
{" 𡌀", '𡌀', 2},
{" a", '𡌀', -1},
{strings.Repeat("𡋀", 64) + "𡌀", '𡌀', 256}, // test cutover
{strings.Repeat("𡋀", 64) + "𡌀", '𣌀', -1}, // '𡌀' and '𣌀' share the same last two bytes
// RuneError should match any invalid UTF-8 byte sequence.
{"<22>", '<27>', 0},
@@ -489,13 +438,6 @@ func TestIndexRune(t *testing.T) {
{"a☺b☻c☹d\xe2\x98<39>\xff<66>\xed\xa0\x80", -1, -1},
{"a☺b☻c☹d\xe2\x98<39>\xff<66>\xed\xa0\x80", 0xD800, -1}, // Surrogate pair
{"a☺b☻c☹d\xe2\x98<39>\xff<66>\xed\xa0\x80", utf8.MaxRune + 1, -1},
// Test the cutover to bytealg.Index when it is triggered in
// the middle of rune that contains consecutive runs of equal bytes.
{"aaaaa\U000bc104", '\U000bc104', 17}, // cutover: (n + 16) / 8
{"aaaaa鄄", '鄄', 17},
{"aaa\U000bc104", '\U000bc104', 18}, // cutover: 4 + n>>4
{"aaa鄄", '鄄', 18},
}
for _, tt := range tests {
if got := IndexRune([]byte(tt.in), tt.rune); got != tt.want {
@@ -643,21 +585,6 @@ func BenchmarkIndexRuneASCII(b *testing.B) {
benchBytes(b, indexSizes, bmIndexRuneASCII(IndexRune))
}
func BenchmarkIndexRuneUnicode(b *testing.B) {
b.Run("Latin", func(b *testing.B) {
// Latin is mostly 1, 2, 3 byte runes.
benchBytes(b, indexSizes, bmIndexRuneUnicode(unicode.Latin, 'é'))
})
b.Run("Cyrillic", func(b *testing.B) {
// Cyrillic is mostly 2 and 3 byte runes.
benchBytes(b, indexSizes, bmIndexRuneUnicode(unicode.Cyrillic, 'Ꙁ'))
})
b.Run("Han", func(b *testing.B) {
// Han consists only of 3 and 4 byte runes.
benchBytes(b, indexSizes, bmIndexRuneUnicode(unicode.Han, '𠀿'))
})
}
func bmIndexRuneASCII(index func([]byte, rune) int) func(b *testing.B, n int) {
return func(b *testing.B, n int) {
buf := bmbuf[0:n]
@@ -688,61 +615,6 @@ func bmIndexRune(index func([]byte, rune) int) func(b *testing.B, n int) {
}
}
func bmIndexRuneUnicode(rt *unicode.RangeTable, needle rune) func(b *testing.B, n int) {
var rs []rune
for _, r16 := range rt.R16 {
for r := rune(r16.Lo); r <= rune(r16.Hi); r += rune(r16.Stride) {
if r != needle {
rs = append(rs, r)
}
}
}
for _, r32 := range rt.R32 {
for r := rune(r32.Lo); r <= rune(r32.Hi); r += rune(r32.Stride) {
if r != needle {
rs = append(rs, r)
}
}
}
// Shuffle the runes so that they are not in descending order.
// The sort is deterministic since this is used for benchmarks,
// which need to be repeatable.
rr := rand.New(rand.NewSource(1))
rr.Shuffle(len(rs), func(i, j int) {
rs[i], rs[j] = rs[j], rs[i]
})
uchars := string(rs)
return func(b *testing.B, n int) {
buf := bmbuf[0:n]
o := copy(buf, uchars)
for o < len(buf) {
o += copy(buf[o:], uchars)
}
// Make space for the needle rune at the end of buf.
m := utf8.RuneLen(needle)
for o := m; o > 0; {
_, sz := utf8.DecodeLastRune(buf)
copy(buf[len(buf)-sz:], "\x00\x00\x00\x00")
buf = buf[:len(buf)-sz]
o -= sz
}
buf = utf8.AppendRune(buf[:n-m], needle)
n -= m // adjust for rune len
for i := 0; i < b.N; i++ {
j := IndexRune(buf, needle)
if j != n {
b.Fatal("bad index", j)
}
}
for i := range buf {
buf[i] = '\x00'
}
}
}
func BenchmarkEqual(b *testing.B) {
b.Run("0", func(b *testing.B) {
var buf [4]byte
@@ -757,11 +629,6 @@ func BenchmarkEqual(b *testing.B) {
})
sizes := []int{1, 6, 9, 15, 16, 20, 32, 4 << 10, 4 << 20, 64 << 20}
b.Run("same", func(b *testing.B) {
benchBytes(b, sizes, bmEqual(func(a, b []byte) bool { return Equal(a, a) }))
})
benchBytes(b, sizes, bmEqual(Equal))
}
@@ -891,7 +758,9 @@ func BenchmarkCountSingle(b *testing.B) {
b.Fatal("bad count", j, expect)
}
}
clear(buf)
for i := 0; i < len(buf); i++ {
buf[i] = 0
}
})
}
@@ -934,18 +803,10 @@ func TestSplit(t *testing.T) {
}
result := sliceOfString(a)
if !slices.Equal(result, tt.a) {
if !eq(result, tt.a) {
t.Errorf(`Split(%q, %q, %d) = %v; want %v`, tt.s, tt.sep, tt.n, result, tt.a)
continue
}
if tt.n < 0 {
b := sliceOfString(slices.Collect(SplitSeq([]byte(tt.s), []byte(tt.sep))))
if !slices.Equal(b, tt.a) {
t.Errorf(`collect(SplitSeq(%q, %q)) = %v; want %v`, tt.s, tt.sep, b, tt.a)
}
}
if tt.n == 0 || len(a) == 0 {
continue
}
@@ -959,9 +820,9 @@ func TestSplit(t *testing.T) {
t.Errorf(`Join(Split(%q, %q, %d), %q) = %q`, tt.s, tt.sep, tt.n, tt.sep, s)
}
if tt.n < 0 {
b := sliceOfString(Split([]byte(tt.s), []byte(tt.sep)))
if !slices.Equal(result, b) {
t.Errorf("Split disagrees with SplitN(%q, %q, %d) = %v; want %v", tt.s, tt.sep, tt.n, b, a)
b := Split([]byte(tt.s), []byte(tt.sep))
if !reflect.DeepEqual(a, b) {
t.Errorf("Split disagrees withSplitN(%q, %q, %d) = %v; want %v", tt.s, tt.sep, tt.n, b, a)
}
}
if len(a) > 0 {
@@ -1000,18 +861,11 @@ func TestSplitAfter(t *testing.T) {
}
result := sliceOfString(a)
if !slices.Equal(result, tt.a) {
if !eq(result, tt.a) {
t.Errorf(`Split(%q, %q, %d) = %v; want %v`, tt.s, tt.sep, tt.n, result, tt.a)
continue
}
if tt.n < 0 {
b := sliceOfString(slices.Collect(SplitAfterSeq([]byte(tt.s), []byte(tt.sep))))
if !slices.Equal(b, tt.a) {
t.Errorf(`collect(SplitAfterSeq(%q, %q)) = %v; want %v`, tt.s, tt.sep, b, tt.a)
}
}
if want := tt.a[len(tt.a)-1] + "z"; string(x) != want {
t.Errorf("last appended result was %s; want %s", x, want)
}
@@ -1021,9 +875,9 @@ func TestSplitAfter(t *testing.T) {
t.Errorf(`Join(Split(%q, %q, %d), %q) = %q`, tt.s, tt.sep, tt.n, tt.sep, s)
}
if tt.n < 0 {
b := sliceOfString(SplitAfter([]byte(tt.s), []byte(tt.sep)))
if !slices.Equal(result, b) {
t.Errorf("SplitAfter disagrees with SplitAfterN(%q, %q, %d) = %v; want %v", tt.s, tt.sep, tt.n, b, a)
b := SplitAfter([]byte(tt.s), []byte(tt.sep))
if !reflect.DeepEqual(a, b) {
t.Errorf("SplitAfter disagrees withSplitAfterN(%q, %q, %d) = %v; want %v", tt.s, tt.sep, tt.n, b, a)
}
}
}
@@ -1060,16 +914,11 @@ func TestFields(t *testing.T) {
}
result := sliceOfString(a)
if !slices.Equal(result, tt.a) {
if !eq(result, tt.a) {
t.Errorf("Fields(%q) = %v; want %v", tt.s, a, tt.a)
continue
}
result2 := sliceOfString(collect(t, FieldsSeq([]byte(tt.s))))
if !slices.Equal(result2, tt.a) {
t.Errorf(`collect(FieldsSeq(%q)) = %v; want %v`, tt.s, result2, tt.a)
}
if string(b) != tt.s {
t.Errorf("slice changed to %s; want %s", string(b), tt.s)
}
@@ -1085,7 +934,7 @@ func TestFieldsFunc(t *testing.T) {
for _, tt := range fieldstests {
a := FieldsFunc([]byte(tt.s), unicode.IsSpace)
result := sliceOfString(a)
if !slices.Equal(result, tt.a) {
if !eq(result, tt.a) {
t.Errorf("FieldsFunc(%q, unicode.IsSpace) = %v; want %v", tt.s, a, tt.a)
continue
}
@@ -1108,15 +957,10 @@ func TestFieldsFunc(t *testing.T) {
}
result := sliceOfString(a)
if !slices.Equal(result, tt.a) {
if !eq(result, tt.a) {
t.Errorf("FieldsFunc(%q) = %v, want %v", tt.s, a, tt.a)
}
result2 := sliceOfString(collect(t, FieldsFuncSeq([]byte(tt.s), pred)))
if !slices.Equal(result2, tt.a) {
t.Errorf(`collect(FieldsFuncSeq(%q)) = %v; want %v`, tt.s, result2, tt.a)
}
if string(b) != tt.s {
t.Errorf("slice changed to %s; want %s", b, tt.s)
}
@@ -1224,7 +1068,7 @@ func TestMap(t *testing.T) {
// Run a couple of awful growth/shrinkage tests
a := tenRunes('a')
// 1. Grow. This triggers two reallocations in Map.
// 1. Grow. This triggers two reallocations in Map.
maxRune := func(r rune) rune { return unicode.MaxRune }
m := Map(maxRune, []byte(a))
expect := tenRunes(unicode.MaxRune)
@@ -1393,48 +1237,45 @@ func repeat(b []byte, count int) (err error) {
// See Issue golang.org/issue/16237
func TestRepeatCatchesOverflow(t *testing.T) {
type testCase struct {
tests := [...]struct {
s string
count int
errStr string
}
runTestCases := func(prefix string, tests []testCase) {
for i, tt := range tests {
err := repeat([]byte(tt.s), tt.count)
if tt.errStr == "" {
if err != nil {
t.Errorf("#%d panicked %v", i, err)
}
continue
}
if err == nil || !strings.Contains(err.Error(), tt.errStr) {
t.Errorf("%s#%d got %q want %q", prefix, i, err, tt.errStr)
}
}
}
const maxInt = int(^uint(0) >> 1)
runTestCases("", []testCase{
}{
0: {"--", -2147483647, "negative"},
1: {"", maxInt, ""},
1: {"", int(^uint(0) >> 1), ""},
2: {"-", 10, ""},
3: {"gopher", 0, ""},
4: {"-", -1, "negative"},
5: {"--", -102, "negative"},
6: {string(make([]byte, 255)), int((^uint(0))/255 + 1), "overflow"},
})
const is64Bit = 1<<(^uintptr(0)>>63)/2 != 0
if !is64Bit {
return
}
runTestCases("64-bit", []testCase{
0: {"-", maxInt, "out of range"},
})
for i, tt := range tests {
err := repeat([]byte(tt.s), tt.count)
if tt.errStr == "" {
if err != nil {
t.Errorf("#%d panicked %v", i, err)
}
continue
}
if err == nil || !strings.Contains(err.Error(), tt.errStr) {
t.Errorf("#%d expected %q got %q", i, tt.errStr, err)
}
}
}
func runesEqual(a, b []rune) bool {
if len(a) != len(b) {
return false
}
for i, r := range a {
if r != b[i] {
return false
}
}
return true
}
type RunesTest struct {
@@ -1457,7 +1298,7 @@ func TestRunes(t *testing.T) {
for _, tt := range RunesTests {
tin := []byte(tt.in)
a := Runes(tin)
if !slices.Equal(a, tt.out) {
if !runesEqual(a, tt.out) {
t.Errorf("Runes(%q) = %v; want %v", tin, a, tt.out)
continue
}
@@ -1785,20 +1626,9 @@ var ReplaceTests = []ReplaceTest{
func TestReplace(t *testing.T) {
for _, tt := range ReplaceTests {
var (
in = []byte(tt.in)
old = []byte(tt.old)
new = []byte(tt.new)
)
if !asan.Enabled {
allocs := testing.AllocsPerRun(10, func() { Replace(in, old, new, tt.n) })
if allocs > 1 {
t.Errorf("Replace(%q, %q, %q, %d) allocates %.2f objects", tt.in, tt.old, tt.new, tt.n, allocs)
}
}
in = append(in, "<spare>"...)
in := append([]byte(tt.in), "<spare>"...)
in = in[:len(tt.in)]
out := Replace(in, old, new, tt.n)
out := Replace(in, []byte(tt.old), []byte(tt.new), tt.n)
if s := string(out); s != tt.out {
t.Errorf("Replace(%q, %q, %q, %d) = %q, want %q", tt.in, tt.old, tt.new, tt.n, s, tt.out)
}
@@ -1806,7 +1636,7 @@ func TestReplace(t *testing.T) {
t.Errorf("Replace(%q, %q, %q, %d) didn't copy", tt.in, tt.old, tt.new, tt.n)
}
if tt.n == -1 {
out := ReplaceAll(in, old, new)
out := ReplaceAll(in, []byte(tt.old), []byte(tt.new))
if s := string(out); s != tt.out {
t.Errorf("ReplaceAll(%q, %q, %q) = %q, want %q", tt.in, tt.old, tt.new, s, tt.out)
}
@@ -1814,69 +1644,6 @@ func TestReplace(t *testing.T) {
}
}
func FuzzReplace(f *testing.F) {
for _, tt := range ReplaceTests {
f.Add([]byte(tt.in), []byte(tt.old), []byte(tt.new), tt.n)
}
f.Fuzz(func(t *testing.T, in, old, new []byte, n int) {
differentImpl := func(in, old, new []byte, n int) []byte {
var out Buffer
if n < 0 {
n = math.MaxInt
}
for i := 0; i < len(in); {
if n == 0 {
out.Write(in[i:])
break
}
if HasPrefix(in[i:], old) {
out.Write(new)
i += len(old)
n--
if len(old) != 0 {
continue
}
if i == len(in) {
break
}
}
if len(old) == 0 {
_, length := utf8.DecodeRune(in[i:])
out.Write(in[i : i+length])
i += length
} else {
out.WriteByte(in[i])
i++
}
}
if len(old) == 0 && n != 0 {
out.Write(new)
}
return out.Bytes()
}
if simple, replace := differentImpl(in, old, new, n), Replace(in, old, new, n); !slices.Equal(simple, replace) {
t.Errorf("The two implementations do not match %q != %q for Replace(%q, %q, %q, %d)", simple, replace, in, old, new, n)
}
})
}
func BenchmarkReplace(b *testing.B) {
for _, tt := range ReplaceTests {
desc := fmt.Sprintf("%q %q %q %d", tt.in, tt.old, tt.new, tt.n)
var (
in = []byte(tt.in)
old = []byte(tt.old)
new = []byte(tt.new)
)
b.Run(desc, func(b *testing.B) {
b.ReportAllocs()
for b.Loop() {
Replace(in, old, new, tt.n)
}
})
}
}
type TitleTest struct {
in, out string
}
@@ -2126,9 +1893,8 @@ func TestContainsFunc(t *testing.T) {
var makeFieldsInput = func() []byte {
x := make([]byte, 1<<20)
// Input is ~10% space, ~10% 2-byte UTF-8, rest ASCII non-space.
r := rand.New(rand.NewSource(99))
for i := range x {
switch r.Intn(10) {
switch rand.Intn(10) {
case 0:
x[i] = ' '
case 1:
@@ -2147,9 +1913,8 @@ var makeFieldsInput = func() []byte {
var makeFieldsInputASCII = func() []byte {
x := make([]byte, 1<<20)
// Input is ~10% space, rest ASCII non-space.
r := rand.New(rand.NewSource(99))
for i := range x {
if r.Intn(10) == 0 {
if rand.Intn(10) == 0 {
x[i] = ' '
} else {
x[i] = 'x'
@@ -2246,9 +2011,8 @@ func makeBenchInputHard() []byte {
"hello", "world",
}
x := make([]byte, 0, 1<<20)
r := rand.New(rand.NewSource(99))
for {
i := r.Intn(len(tokens))
i := rand.Intn(len(tokens))
if len(x)+len(tokens[i]) >= 1<<20 {
break
}
@@ -2260,11 +2024,6 @@ func makeBenchInputHard() []byte {
var benchInputHard = makeBenchInputHard()
func benchmarkIndexHard(b *testing.B, sep []byte) {
n := Index(benchInputHard, sep)
if n < 0 {
n = len(benchInputHard)
}
b.SetBytes(int64(n))
for i := 0; i < b.N; i++ {
Index(benchInputHard, sep)
}

View File

@@ -10,7 +10,7 @@ import (
"fmt"
"io"
"os"
"slices"
"sort"
"strconv"
"unicode"
)
@@ -102,7 +102,7 @@ func ExampleBuffer_Read() {
fmt.Println(n)
fmt.Println(b.String())
fmt.Println(string(rdbuf))
// Output:
// Output
// 1
// bcde
// a
@@ -118,7 +118,7 @@ func ExampleBuffer_ReadByte() {
}
fmt.Println(c)
fmt.Println(b.String())
// Output:
// Output
// 97
// bcde
}
@@ -165,8 +165,11 @@ func ExampleCompare_search() {
// Binary search to find a matching byte slice.
var needle []byte
var haystack [][]byte // Assume sorted
_, found := slices.BinarySearchFunc(haystack, needle, bytes.Compare)
if found {
i := sort.Search(len(haystack), func(i int) bool {
// Return haystack[i] >= needle.
return bytes.Compare(haystack[i], needle) >= 0
})
if i < len(haystack) && bytes.Equal(haystack[i], needle) {
// Found it!
}
}
@@ -245,9 +248,9 @@ func ExampleCut() {
}
func ExampleCutPrefix() {
show := func(s, prefix string) {
after, found := bytes.CutPrefix([]byte(s), []byte(prefix))
fmt.Printf("CutPrefix(%q, %q) = %q, %v\n", s, prefix, after, found)
show := func(s, sep string) {
after, found := bytes.CutPrefix([]byte(s), []byte(sep))
fmt.Printf("CutPrefix(%q, %q) = %q, %v\n", s, sep, after, found)
}
show("Gopher", "Go")
show("Gopher", "ph")
@@ -257,9 +260,9 @@ func ExampleCutPrefix() {
}
func ExampleCutSuffix() {
show := func(s, suffix string) {
before, found := bytes.CutSuffix([]byte(s), []byte(suffix))
fmt.Printf("CutSuffix(%q, %q) = %q, %v\n", s, suffix, before, found)
show := func(s, sep string) {
before, found := bytes.CutSuffix([]byte(s), []byte(sep))
fmt.Printf("CutSuffix(%q, %q) = %q, %v\n", s, sep, before, found)
}
show("Gopher", "Go")
show("Gopher", "er")
@@ -502,10 +505,10 @@ func ExampleTitle() {
func ExampleToTitle() {
fmt.Printf("%s\n", bytes.ToTitle([]byte("loud noises")))
fmt.Printf("%s\n", bytes.ToTitle([]byte("брат")))
fmt.Printf("%s\n", bytes.ToTitle([]byte("хлеб")))
// Output:
// LOUD NOISES
// БРАТ
// ХЛЕБ
}
func ExampleToTitleSpecial() {
@@ -628,93 +631,3 @@ func ExampleToUpperSpecial() {
// Original : ahoj vývojári golang
// ToUpper : AHOJ VÝVOJÁRİ GOLANG
}
func ExampleLines() {
text := []byte("Hello\nWorld\nGo Programming\n")
for line := range bytes.Lines(text) {
fmt.Printf("%q\n", line)
}
// Output:
// "Hello\n"
// "World\n"
// "Go Programming\n"
}
func ExampleSplitSeq() {
s := []byte("a,b,c,d")
for part := range bytes.SplitSeq(s, []byte(",")) {
fmt.Printf("%q\n", part)
}
// Output:
// "a"
// "b"
// "c"
// "d"
}
func ExampleSplitAfterSeq() {
s := []byte("a,b,c,d")
for part := range bytes.SplitAfterSeq(s, []byte(",")) {
fmt.Printf("%q\n", part)
}
// Output:
// "a,"
// "b,"
// "c,"
// "d"
}
func ExampleFieldsSeq() {
text := []byte("The quick brown fox")
fmt.Println("Split byte slice into fields:")
for word := range bytes.FieldsSeq(text) {
fmt.Printf("%q\n", word)
}
textWithSpaces := []byte(" lots of spaces ")
fmt.Println("\nSplit byte slice with multiple spaces:")
for word := range bytes.FieldsSeq(textWithSpaces) {
fmt.Printf("%q\n", word)
}
// Output:
// Split byte slice into fields:
// "The"
// "quick"
// "brown"
// "fox"
//
// Split byte slice with multiple spaces:
// "lots"
// "of"
// "spaces"
}
func ExampleFieldsFuncSeq() {
text := []byte("The quick brown fox")
fmt.Println("Split on whitespace(similar to FieldsSeq):")
for word := range bytes.FieldsFuncSeq(text, unicode.IsSpace) {
fmt.Printf("%q\n", word)
}
mixedText := []byte("abc123def456ghi")
fmt.Println("\nSplit on digits:")
for word := range bytes.FieldsFuncSeq(mixedText, unicode.IsDigit) {
fmt.Printf("%q\n", word)
}
// Output:
// Split on whitespace(similar to FieldsSeq):
// "The"
// "quick"
// "brown"
// "fox"
//
// Split on digits:
// "abc"
// "def"
// "ghi"
}

View File

@@ -1,137 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bytes
import (
"iter"
"unicode"
"unicode/utf8"
)
// Lines returns an iterator over the newline-terminated lines in the byte slice s.
// The lines yielded by the iterator include their terminating newlines.
// If s is empty, the iterator yields no lines at all.
// If s does not end in a newline, the final yielded line will not end in a newline.
// It returns a single-use iterator.
func Lines(s []byte) iter.Seq[[]byte] {
return func(yield func([]byte) bool) {
for len(s) > 0 {
var line []byte
if i := IndexByte(s, '\n'); i >= 0 {
line, s = s[:i+1], s[i+1:]
} else {
line, s = s, nil
}
if !yield(line[:len(line):len(line)]) {
return
}
}
}
}
// splitSeq is SplitSeq or SplitAfterSeq, configured by how many
// bytes of sep to include in the results (none or all).
func splitSeq(s, sep []byte, sepSave int) iter.Seq[[]byte] {
return func(yield func([]byte) bool) {
if len(sep) == 0 {
for len(s) > 0 {
_, size := utf8.DecodeRune(s)
if !yield(s[:size:size]) {
return
}
s = s[size:]
}
return
}
for {
i := Index(s, sep)
if i < 0 {
break
}
frag := s[:i+sepSave]
if !yield(frag[:len(frag):len(frag)]) {
return
}
s = s[i+len(sep):]
}
yield(s[:len(s):len(s)])
}
}
// SplitSeq returns an iterator over all subslices of s separated by sep.
// The iterator yields the same subslices that would be returned by [Split](s, sep),
// but without constructing a new slice containing the subslices.
// It returns a single-use iterator.
func SplitSeq(s, sep []byte) iter.Seq[[]byte] {
return splitSeq(s, sep, 0)
}
// SplitAfterSeq returns an iterator over subslices of s split after each instance of sep.
// The iterator yields the same subslices that would be returned by [SplitAfter](s, sep),
// but without constructing a new slice containing the subslices.
// It returns a single-use iterator.
func SplitAfterSeq(s, sep []byte) iter.Seq[[]byte] {
return splitSeq(s, sep, len(sep))
}
// FieldsSeq returns an iterator over subslices of s split around runs of
// whitespace characters, as defined by [unicode.IsSpace].
// The iterator yields the same subslices that would be returned by [Fields](s),
// but without constructing a new slice containing the subslices.
func FieldsSeq(s []byte) iter.Seq[[]byte] {
return func(yield func([]byte) bool) {
start := -1
for i := 0; i < len(s); {
size := 1
r := rune(s[i])
isSpace := asciiSpace[s[i]] != 0
if r >= utf8.RuneSelf {
r, size = utf8.DecodeRune(s[i:])
isSpace = unicode.IsSpace(r)
}
if isSpace {
if start >= 0 {
if !yield(s[start:i:i]) {
return
}
start = -1
}
} else if start < 0 {
start = i
}
i += size
}
if start >= 0 {
yield(s[start:len(s):len(s)])
}
}
}
// FieldsFuncSeq returns an iterator over subslices of s split around runs of
// Unicode code points satisfying f(c).
// The iterator yields the same subslices that would be returned by [FieldsFunc](s),
// but without constructing a new slice containing the subslices.
func FieldsFuncSeq(s []byte, f func(rune) bool) iter.Seq[[]byte] {
return func(yield func([]byte) bool) {
start := -1
for i := 0; i < len(s); {
r, size := utf8.DecodeRune(s[i:])
if f(r) {
if start >= 0 {
if !yield(s[start:i:i]) {
return
}
start = -1
}
} else if start < 0 {
start = i
}
i += size
}
if start >= 0 {
yield(s[start:len(s):len(s)])
}
}
}

View File

@@ -1,56 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bytes_test
import (
. "bytes"
"testing"
)
func BenchmarkSplitSeqEmptySeparator(b *testing.B) {
for range b.N {
for range SplitSeq(benchInputHard, nil) {
}
}
}
func BenchmarkSplitSeqSingleByteSeparator(b *testing.B) {
sep := []byte("/")
for range b.N {
for range SplitSeq(benchInputHard, sep) {
}
}
}
func BenchmarkSplitSeqMultiByteSeparator(b *testing.B) {
sep := []byte("hello")
for range b.N {
for range SplitSeq(benchInputHard, sep) {
}
}
}
func BenchmarkSplitAfterSeqEmptySeparator(b *testing.B) {
for range b.N {
for range SplitAfterSeq(benchInputHard, nil) {
}
}
}
func BenchmarkSplitAfterSeqSingleByteSeparator(b *testing.B) {
sep := []byte("/")
for range b.N {
for range SplitAfterSeq(benchInputHard, sep) {
}
}
}
func BenchmarkSplitAfterSeqMultiByteSeparator(b *testing.B) {
sep := []byte("hello")
for range b.N {
for range SplitAfterSeq(benchInputHard, sep) {
}
}
}

View File

@@ -10,8 +10,8 @@ import (
"unicode/utf8"
)
// A Reader implements the [io.Reader], [io.ReaderAt], [io.WriterTo], [io.Seeker],
// [io.ByteScanner], and [io.RuneScanner] interfaces by reading from
// A Reader implements the io.Reader, io.ReaderAt, io.WriterTo, io.Seeker,
// io.ByteScanner, and io.RuneScanner interfaces by reading from
// a byte slice.
// Unlike a [Buffer], a Reader is read-only and supports seeking.
// The zero value for Reader operates like a Reader of an empty slice.
@@ -152,8 +152,8 @@ func (r *Reader) WriteTo(w io.Writer) (n int64, err error) {
return
}
// Reset resets the [Reader] to be reading from b.
// Reset resets the [Reader.Reader] to be reading from b.
func (r *Reader) Reset(b []byte) { *r = Reader{b, 0, -1} }
// NewReader returns a new [Reader] reading from b.
// NewReader returns a new [Reader.Reader] reading from b.
func NewReader(b []byte) *Reader { return &Reader{b, 0, -1} }

Some files were not shown because too many files have changed in this diff Show More