Compare commits

..

2 Commits

Author SHA1 Message Date
Than McIntosh
71aaa8bde1 [dev.inline] merge with master at 894d24d617
Change-Id: I845eec08108c69228ebcba921f8a807a376d3fae
2023-07-07 16:49:15 -04:00
Than McIntosh
3aba453b66 [dev.inline] add back in codereview.cfg
Add back in an appropriately set up codereview.cfg for this work
branch.

Change-Id: I0e9f649da31c6ea1cbf8ddc1d906c20c41248721
Reviewed-on: https://go-review.googlesource.com/c/go/+/507157
Reviewed-by: Dmitri Shuralyov <dmitshur@golang.org>
TryBot-Bypass: Than McIntosh <thanm@google.com>
Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
2023-06-29 17:09:13 +00:00
4722 changed files with 94899 additions and 337797 deletions

45
.github/ISSUE_TEMPLATE/00-bug.md vendored Normal file
View File

@@ -0,0 +1,45 @@
---
name: Bugs
about: The go command, standard library, or anything else
title: "affected/package: "
---
<!--
Please answer these questions before submitting your issue. Thanks!
-->
### What version of Go are you using (`go version`)?
<pre>
$ go version
</pre>
### Does this issue reproduce with the latest release?
### What operating system and processor architecture are you using (`go env`)?
<details><summary><code>go env</code> Output</summary><br><pre>
$ go env
</pre></details>
### What did you do?
<!--
If possible, provide a recipe for reproducing the error.
A complete runnable program is good.
A link on go.dev/play is best.
-->
### What did you expect to see?
### What did you see instead?

View File

@@ -1,94 +0,0 @@
# https://docs.github.com/en/communities/using-templates-to-encourage-useful-issues-and-pull-requests/configuring-issue-templates-for-your-repository#creating-issue-forms
# https://docs.github.com/en/communities/using-templates-to-encourage-useful-issues-and-pull-requests/syntax-for-githubs-form-schema
name: Bugs
description: The go command, standard library, or anything else
title: "import/path: issue title"
body:
- type: markdown
attributes:
value: |
Thanks for helping us improve! 🙏 Please answer these questions and provide as much information as possible about your problem.
- type: input
id: go-version
attributes:
label: Go version
description: |
What version of Go are you using (`go version`)?
Note: we only [support](https://go.dev/doc/devel/release#policy) the two most recent major releases.
placeholder: ex. go version go1.20.7 darwin/arm64
validations:
required: true
- type: textarea
id: go-env
attributes:
label: "Output of `go env` in your module/workspace:"
placeholder: |
GO111MODULE=""
GOARCH="arm64"
GOBIN="/Users/gopher/go/bin"
GOCACHE="/Users/gopher/go/cache"
GOENV="/Users/gopher/Library/Application Support/go/env"
GOEXE=""
GOEXPERIMENT=""
GOFLAGS=""
GOHOSTARCH="arm64"
GOHOSTOS="darwin"
GOINSECURE=""
GOMODCACHE="/Users/gopher/go/pkg/mod"
GONOPROXY=""
GONOSUMDB=""
GOOS="darwin"
GOPATH="/Users/gopher/go"
GOPRIVATE=""
GOPROXY="https://proxy.golang.org,direct"
GOROOT="/usr/local/go"
GOSUMDB="sum.golang.org"
GOTMPDIR=""
GOTOOLDIR="/usr/local/go/pkg/tool/darwin_arm64"
GOVCS=""
GOVERSION="go1.20.7"
GCCGO="gccgo"
AR="ar"
CC="clang"
CXX="clang++"
CGO_ENABLED="1"
GOMOD="/dev/null"
GOWORK=""
CGO_CFLAGS="-O2 -g"
CGO_CPPFLAGS=""
CGO_CXXFLAGS="-O2 -g"
CGO_FFLAGS="-O2 -g"
CGO_LDFLAGS="-O2 -g"
PKG_CONFIG="pkg-config"
GOGCCFLAGS="-fPIC -arch arm64 -pthread -fno-caret-diagnostics -Qunused-arguments -fmessage-length=0 -fdebug-prefix-map=/var/folders/44/nbbyll_10jd0z8rj_qxm43740000gn/T/go-build2331607515=/tmp/go-build -gno-record-gcc-switches -fno-common"
render: shell
validations:
required: true
- type: textarea
id: what-did-you-do
attributes:
label: "What did you do?"
description: "If possible, provide a recipe for reproducing the error. A complete runnable program is good. A link on [go.dev/play](https://go.dev/play) is best."
validations:
required: true
- type: textarea
id: actual-behavior
attributes:
label: "What did you see happen?"
description: Command invocations and their associated output, functions with their arguments and return results, full stacktraces for panics (upload a file if it is very long), etc. Prefer copying text output over using screenshots.
validations:
required: true
- type: textarea
id: expected-behavior
attributes:
label: "What did you expect to see?"
description: Why is the current output incorrect, and any additional context we may need to understand the issue.
validations:
required: true

49
.github/ISSUE_TEMPLATE/01-pkgsite.md vendored Normal file
View File

@@ -0,0 +1,49 @@
---
name: Pkg.go.dev bugs or feature requests
about: Issues or feature requests for the documentation site
title: "x/pkgsite: "
labels: pkgsite
---
<!--
Please answer these questions before submitting your issue. Thanks!
-->
### What is the URL of the page with the issue?
### What is your user agent?
<!--
You can find your user agent here:
https://www.google.com/search?q=what+is+my+user+agent
-->
### Screenshot
<!--
Please paste a screenshot of the page.
-->
### What did you do?
<!--
If possible, provide a recipe for reproducing the error.
Starting with a Private/Incognito tab/window may help rule out problematic browser extensions.
-->
### What did you expect to see?
### What did you see instead?

View File

@@ -1,47 +0,0 @@
name: Pkg.go.dev bugs or feature requests
description: Issues or feature requests for the documentation site
title: "x/pkgsite: issue title"
labels: ["pkgsite"]
body:
- type: markdown
attributes:
value: "Please answer these questions before submitting your issue. Thanks!"
- type: input
id: url
attributes:
label: "What is the URL of the page with the issue?"
validations:
required: true
- type: input
id: user-agent
attributes:
label: "What is your user agent?"
description: "You can find your user agent here: https://www.google.com/search?q=what+is+my+user+agent"
validations:
required: true
- type: textarea
id: screenshot
attributes:
label: "Screenshot"
description: "Please paste a screenshot of the page."
validations:
required: false
- type: textarea
id: what-did-you-do
attributes:
label: "What did you do?"
description: "If possible, provide a recipe for reproducing the error. Starting with a Private/Incognito tab/window may help rule out problematic browser extensions."
validations:
required: true
- type: textarea
id: actual-behavior
attributes:
label: "What did you see happen?"
validations:
required: true
- type: textarea
id: expected-behavior
attributes:
label: "What did you expect to see?"
validations:
required: true

View File

@@ -0,0 +1,39 @@
---
name: Pkg.go.dev package removal request
about: Request a package be removed from the documentation site (pkg.go.dev)
title: "x/pkgsite: package removal request for [type path here]"
labels: pkgsite/package-removal
---
<!--
Please answer these questions before submitting your issue. Thanks!
-->
### What is the path of the package that you would like to have removed?
<!---
We can remove packages with a shared path prefix.
For example, a request for "github.com/author" would remove all pkg.go.dev pages with that package path prefix.
--->
### Are you the owner of this package?
<!---
Only the package owners can request to have their packages removed from pkg.go.dev.
--->
### What is the reason that you could not retract this package instead?
<!---
If you would like to have your module removed from pkg.go.dev, we recommend that you retract them, so that they can be removed from the go command and proxy.golang.org as well.
Retracting a module version involves adding a retract directive to your go.mod file and publishing a new version. For example: https://github.com/jba/retract-demo/blob/main/go.mod#L5-L8
See https://pkg.go.dev/about#removing-a-package for additional tips on retractions.
--->

View File

@@ -1,42 +0,0 @@
name: Pkg.go.dev package removal request
description: Request a package be removed from the documentation site (pkg.go.dev)
title: "x/pkgsite: package removal request for [type path here]"
labels: ["pkgsite/package-removal"]
body:
- type: markdown
attributes:
value: "Please answer these questions before submitting your issue. Thanks!"
- type: input
id: package-path
attributes:
label: "What is the path of the package that you would like to have removed?"
description: |
We can remove packages with a shared path prefix.
For example, a request for 'github.com/author' would remove all pkg.go.dev pages with that package path prefix.
validations:
required: true
- type: textarea
id: package-owner
attributes:
label: "Are you the owner of this package?"
description: |
Only the package owners can request to have their packages removed from pkg.go.dev.
If the package path doesn't include your github username, please provide some other form of proof of ownership.
validations:
required: true
- type: textarea
id: retraction-reason
attributes:
label: "What is the reason that you could not retract this package instead?"
description: |
Requesting we remove a module here only hides the generated documentation on pkg.go.dev.
It does not affect the behaviour of proxy.golang.org or the go command.
Instead we recommend using the retract directive which will be processed by all 3 of the above.
If you have deleted your repo, please recreate it and publish a retraction.
Retracting a module version involves adding a retract directive to your go.mod file and publishing a new version.
For example: https://github.com/jba/retract-demo/blob/main/go.mod#L5-L8.
See https://pkg.go.dev/about#removing-a-package for additional tips on retractions.
validations:
required: true

61
.github/ISSUE_TEMPLATE/03-gopls.md vendored Normal file
View File

@@ -0,0 +1,61 @@
---
name: Gopls bugs or feature requests
about: Issues or feature requests for the Go language server (gopls)
title: "x/tools/gopls: "
labels: gopls Tools
---
<!--
Please answer these questions before submitting your issue. Thanks!
-->
### gopls version
<!--
Output of `gopls -v version` on the command line
-->
### go env
<!--
Output of `go env` on the command line in your workspace directory
-->
### What did you do?
<!--
If possible, provide a recipe for reproducing the error.
A complete runnable program is good.
A link on go.dev/play is better.
A failing unit test is the best.
-->
### What did you expect to see?
### What did you see instead?
### Editor and settings
<!--
Your editor and any settings you have configured (for example, your VSCode settings.json file)
-->
### Logs
<!--
If possible please include gopls logs. Instructions for capturing them can be found here:
https://github.com/golang/tools/blob/master/gopls/doc/troubleshooting.md#capture-logs
-->

View File

@@ -1,56 +0,0 @@
name: Gopls bugs or feature requests
description: Issues or feature requests for the Go language server (gopls)
title: "x/tools/gopls: issue title"
labels: ["gopls", "Tools"]
body:
- type: markdown
attributes:
value: "Please answer these questions before submitting your issue. Thanks!"
- type: textarea
id: gopls-version
attributes:
label: "gopls version"
description: "Output of `gopls -v version` on the command line"
validations:
required: true
- type: textarea
id: go-env
attributes:
label: "go env"
description: "Output of `go env` on the command line in your workspace directory"
render: shell
validations:
required: true
- type: textarea
id: what-did-you-do
attributes:
label: "What did you do?"
description: "If possible, provide a recipe for reproducing the error. A complete runnable program is good. A link on [go.dev/play](https://go.dev/play) is better. A failing unit test is the best."
validations:
required: true
- type: textarea
id: actual-behavior
attributes:
label: "What did you see happen?"
validations:
required: true
- type: textarea
id: expected-behavior
attributes:
label: "What did you expect to see?"
validations:
required: true
- type: textarea
id: editor-and-settings
attributes:
label: "Editor and settings"
description: "Your editor and any settings you have configured (for example, your VSCode settings.json file)"
validations:
required: false
- type: textarea
id: logs
attributes:
label: "Logs"
description: "If possible please include gopls logs. Instructions for capturing them can be found here: https://github.com/golang/tools/blob/master/gopls/doc/troubleshooting.md#capture-logs"
validations:
required: false

51
.github/ISSUE_TEMPLATE/04-vuln.md vendored Normal file
View File

@@ -0,0 +1,51 @@
---
name: Go vulnerability management - bugs and feature requests
about: Issues or feature requests about Go vulnerability management
title: "x/vuln: "
labels: "vulncheck or vulndb"
---
<!--
Please answer these questions before submitting your issue. Thanks!
To add a new vulnerability to the Go vulnerability database
(https://vuln.go.dev), see https://go.dev/s/vulndb-report-new.
To report an issue about a report, see https://go.dev/s/vulndb-report-feedback.
-->
### What version of Go are you using (`go version`)?
<pre>
$ go version
</pre>
### Does this issue reproduce at the latest version of golang.org/x/vuln?
### What operating system and processor architecture are you using (`go env`)?
<details><summary><code>go env</code> Output</summary><br><pre>
$ go env
</pre></details>
### What did you do?
<!--
If possible, provide a recipe for reproducing the error.
A complete runnable program is good.
A link on go.dev/play is best.
-->
### What did you expect to see?
### What did you see instead?

View File

@@ -1,52 +0,0 @@
name: Go vulnerability management - bugs and feature requests
description: Issues or feature requests about Go vulnerability management
title: "x/vuln: issue title"
labels: ["vulncheck or vulndb"]
body:
- type: markdown
attributes:
value: "Please answer these questions before submitting your issue. Thanks! To add a new vulnerability to the Go vulnerability database (https://vuln.go.dev), see https://go.dev/s/vulndb-report-new. To report an issue about a report, see https://go.dev/s/vulndb-report-feedback."
- type: textarea
id: govulncheck-version
attributes:
label: govulncheck version
description: What version of govulncheck are you using (`govulncheck -version`)?
placeholder: |
Go: devel go1.22-0262ea1ff9 Thu Oct 26 18:46:50 2023 +0000
Scanner: govulncheck@v1.0.2-0.20231108200754-fcf7dff7b242
DB: https://vuln.go.dev
DB updated: 2023-11-21 15:39:17 +0000 UTC
validations:
required: true
- type: textarea
id: reproduce-latest-version
attributes:
label: "Does this issue reproduce at the latest version of golang.org/x/vuln?"
validations:
required: true
- type: textarea
id: go-env
attributes:
label: "Output of `go env` in your module/workspace:"
render: shell
validations:
required: true
- type: textarea
id: what-did-you-do
attributes:
label: "What did you do?"
description: "If possible, provide a recipe for reproducing the error. A complete runnable program is good. A link on [go.dev/play](https://go.dev/play) is best."
validations:
required: true
- type: textarea
id: actual-behavior
attributes:
label: "What did you see happen?"
validations:
required: true
- type: textarea
id: expected-behavior
attributes:
label: "What did you expect to see?"
validations:
required: true

13
.github/ISSUE_TEMPLATE/10-proposal.md vendored Normal file
View File

@@ -0,0 +1,13 @@
---
name: Proposals
about: New external API or other notable changes
title: "proposal: affected/package: "
labels: Proposal
---
<!--
Our proposal process is documented here:
https://go.dev/s/proposal-process
-->

View File

@@ -1,15 +0,0 @@
name: Proposals
description: New external API or other notable changes
title: "proposal: import/path: proposal title"
labels: ["Proposal"]
body:
- type: markdown
attributes:
value: "Our proposal process is documented here: https://go.dev/s/proposal-process"
- type: textarea
id: proposal-details
attributes:
label: "Proposal Details"
description: "Please provide the details of your proposal here."
validations:
required: true

View File

@@ -0,0 +1,52 @@
---
name: Language Change Proposals
about: Changes to the language
title: "proposal: Go 2: "
labels: Proposal Go2 LanguageChange
---
<!--
Our process for evaluating language changes can be found here:
https://go.googlesource.com/proposal/+/refs/heads/master#language-changes
-->
### Author background
- **Would you consider yourself a novice, intermediate, or experienced Go programmer?**
- **What other languages do you have experience with?**
### Related proposals
- **Has this idea, or one like it, been proposed before?**
- **If so, how does this proposal differ?**
- **Does this affect error handling?**
- **If so, how does this differ from previous error handling proposals?**
- **Is this about generics?**
- **If so, how does this relate to the accepted design and other generics proposals?**
### Proposal
- **What is the proposed change?**
- **Who does this proposal help, and why?**
- **Please describe as precisely as possible the change to the language.**
- **What would change in the language spec?**
- **Please also describe the change informally, as in a class teaching Go.**
- **Is this change backward compatible?**
- Breaking the Go 1 compatibility guarantee is a large cost and requires a large benefit.
Show example code before and after the change.
- **Before**
- **After**
- **Orthogonality: how does this change interact or overlap with existing features?**
- **Is the goal of this change a performance improvement?**
- **If so, what quantifiable improvement should we expect?**
- **How would we measure it?**
### Costs
- **Would this change make Go easier or harder to learn, and why?**
- **What is the cost of this proposal? (Every language change has a cost).**
- **How many tools (such as vet, gopls, gofmt, goimports, etc.) would be affected?**
- **What is the compile time cost?**
- **What is the run time cost?**
- **Can you describe a possible implementation?**
- **Do you have a prototype? (This is not required.)**

View File

@@ -1,165 +0,0 @@
name: Language Change Proposals
description: Changes to the language
labels: ["Proposal", "v2", "LanguageChange"]
title: "proposal: Go 2: proposal title"
body:
- type: markdown
attributes:
value: |
## Our process for evaluating language changes can be found [here](https://go.googlesource.com/proposal/+/refs/heads/master#language-changes)
- type: dropdown
id: author-go-experience
attributes:
label: "Go Programming Experience"
description: "Would you consider yourself a novice, intermediate, or experienced Go programmer?"
options:
- "Novice"
- "Intermediate"
- "Experienced"
default: 1
- type: input
id: author-other-languages-experience
attributes:
label: "Other Languages Experience"
description: "What other languages do you have experience with?"
placeholder: "Go, Python, JS, Rust"
validations:
required: false
- type: checkboxes
id: related-idea
attributes:
label: "Related Idea"
options:
- label: "Has this idea, or one like it, been proposed before?"
- label: "Does this affect error handling?"
- label: "Is this about generics?"
- label: "Is this change backward compatible? Breaking the Go 1 compatibility guarantee is a large cost and requires a large benefit"
- type: textarea
id: related-proposals
attributes:
label: Has this idea, or one like it, been proposed before?
description: If so, how does this proposal differ?
placeholder: |
Yes or No
If yes,
1. Mention the related proposals
2. then describe how this proposal differs
validations:
required: true
- type: textarea
id: error-handling-proposal
attributes:
label: Does this affect error handling?
description: If so, how does this differ from previous error handling proposals?
placeholder: |
Yes or No
If yes,
1.how does this differ from previous error handling proposals?
validations:
required: true
- type: textarea
id: generics-proposal
attributes:
label: Is this about generics?
description: If so, how does this relate to the accepted design and other generics proposals?
placeholder: |
Yes or No
If yes,
1. how does this relate to the accepted design and other generics proposals?
validations:
required: true
- type: textarea
id: proposal
attributes:
label: "Proposal"
description: "What is the proposed change? Who does this proposal help, and why? Please describe as precisely as possible the change to the language."
validations:
required: true
- type: textarea
id: language-spec-changes
attributes:
label: "Language Spec Changes"
description: "What would change in the language spec?"
validations:
required: false
- type: textarea
id: informal-change
attributes:
label: "Informal Change"
description: "Please also describe the change informally, as in a class teaching Go."
validations:
required: false
- type: textarea
id: go-backwards-compatiblity
attributes:
label: Is this change backward compatible?
description: Breaking the Go 1 compatibility guarantee is a large cost and requires a large benefit.
placeholder: |
Yes or No
If yes,
1. Show example code before and after the change.
validations:
required: true
- type: textarea
id: orthogonality
attributes:
label: "Orthogonality: How does this change interact or overlap with existing features?"
description: "Is the goal of this change a performance improvement? If so, what quantifiable improvement should we expect? How would we measure it?"
validations:
required: false
- type: textarea
id: learning-curve
attributes:
label: "Would this change make Go easier or harder to learn, and why?"
- type: textarea
id: cost-description
attributes:
label: "Cost Description"
description: "What is the cost of this proposal? (Every language change has a cost)"
- type: input
id: go-toolchain
attributes:
label: Changes to Go ToolChain
description: "How many tools (such as vet, gopls, gofmt, goimports, etc.) would be affected? "
validations:
required: false
- type: input
id: perf-costs
attributes:
label: Performance Costs
description: "What is the compile time cost? What is the run time cost? "
validations:
required: false
- type: textarea
id: prototype
attributes:
label: "Prototype"
description: "Can you describe a possible implementation?"
validations:
required: false

View File

@@ -1,30 +0,0 @@
name: Go Telemetry Proposals
description: Changes to the telemetry upload configuration
title: "x/telemetry/config: proposal title"
labels: ["Telemetry-Proposal"]
projects: ["golang/29"]
body:
- type: textarea
attributes:
label: Summary
description: >
What change are you proposing to the upload configuration, and why?
For new upload configuration, which new counters will be collected, what
do they measure, and why is it important to collect them?
Note that uploaded data must not carry sensitive user information.
See [go.dev/doc/telemetry#proposals](https://go.dev/doc/telemetry#proposals)
for more details on telemetry proposals.
validations:
required: true
- type: input
attributes:
label: Proposed Config Change
description: >
A CL containing proposed changes to the
[config.txt](https://go.googlesource.com/telemetry/+/master/internal/chartconfig/config.txt)
chart configuration.
See the [chartconfig](https://pkg.go.dev/golang.org/x/telemetry/internal/chartconfig)
package for an explanation of the chart config format.
For an example change, see [CL 564619](https://go.dev/cl/564619).
validations:
required: true

View File

@@ -1,4 +1,4 @@
blank_issues_enabled: true
blank_issues_enabled: false
contact_links:
- name: Questions
about: Please use one of the forums for questions or general discussions

View File

@@ -4,7 +4,7 @@ Go is an open source programming language that makes it easy to build simple,
reliable, and efficient software.
![Gopher image](https://golang.org/doc/gopher/fiveyears.jpg)
*Gopher image by [Renee French][rf], licensed under [Creative Commons 4.0 Attribution license][cc4-by].*
*Gopher image by [Renee French][rf], licensed under [Creative Commons 4.0 Attributions license][cc4-by].*
Our canonical Git repository is located at https://go.googlesource.com/go.
There is a mirror of the repository at https://github.com/golang/go.

View File

@@ -10,4 +10,4 @@ part of that page.
## Reporting a Vulnerability
See https://go.dev/security/policy for how to report a vulnerability.
See https://go.dev/security for how to report a vulnerability.

View File

@@ -1,2 +0,0 @@
go1.23.3
time 2024-11-06T18:46:45Z

View File

@@ -21,6 +21,3 @@ warning output from the go api tool. Each file should be named
nnnnn.txt, after the issue number for the accepted proposal.
(The #nnnnn suffix must also appear at the end of each line in the file;
that will be preserved when next/*.txt is concatenated into go1.XX.txt.)
When you add a file to the api/next directory, you must add at least one file
under doc/next. See doc/README.md for details.

View File

@@ -598,7 +598,3 @@ pkg syscall (freebsd-arm64-cgo), const SYS_MKNODAT = 498
pkg syscall (freebsd-arm64-cgo), const SYS_STAT = 188
pkg syscall (freebsd-arm64-cgo), const SYS_STAT ideal-int
pkg syscall (freebsd-arm64-cgo), const SYS_STATFS = 396
pkg syscall (openbsd-386), const ELAST = 91
pkg syscall (openbsd-386-cgo), const ELAST = 91
pkg syscall (openbsd-amd64), const ELAST = 91
pkg syscall (openbsd-amd64-cgo), const ELAST = 91

View File

@@ -60,9 +60,7 @@ pkg crypto/tls, method (*QUICConn) Close() error #44886
pkg crypto/tls, method (*QUICConn) ConnectionState() ConnectionState #44886
pkg crypto/tls, method (*QUICConn) HandleData(QUICEncryptionLevel, []uint8) error #44886
pkg crypto/tls, method (*QUICConn) NextEvent() QUICEvent #44886
pkg crypto/tls, method (*QUICConn) SendSessionTicket(QUICSessionTicketOptions) error #60107
pkg crypto/tls, type QUICSessionTicketOptions struct #60107
pkg crypto/tls, type QUICSessionTicketOptions struct, EarlyData bool #60107
pkg crypto/tls, method (*QUICConn) SendSessionTicket(bool) error #60107
pkg crypto/tls, method (*QUICConn) SetTransportParameters([]uint8) #44886
pkg crypto/tls, method (*QUICConn) Start(context.Context) error #44886
pkg crypto/tls, method (QUICEncryptionLevel) String() string #44886
@@ -167,12 +165,7 @@ pkg errors, var ErrUnsupported error #41198
pkg flag, func BoolFunc(string, string, func(string) error) #53747
pkg flag, method (*FlagSet) BoolFunc(string, string, func(string) error) #53747
pkg go/ast, func IsGenerated(*File) bool #28089
pkg go/ast, func NewPackage //deprecated #52463
pkg go/ast, type File struct, GoVersion string #59033
pkg go/ast, type Importer //deprecated #52463
pkg go/ast, type Object //deprecated #52463
pkg go/ast, type Package //deprecated #52463
pkg go/ast, type Scope //deprecated #52463
pkg go/build/constraint, func GoVersion(Expr) string #59033
pkg go/build, type Directive struct #56986
pkg go/build, type Directive struct, Pos token.Position #56986
@@ -226,18 +219,18 @@ pkg log/slog, func Any(string, interface{}) Attr #56345
pkg log/slog, func AnyValue(interface{}) Value #56345
pkg log/slog, func Bool(string, bool) Attr #56345
pkg log/slog, func BoolValue(bool) Value #56345
pkg log/slog, func DebugContext(context.Context, string, ...interface{}) #61200
pkg log/slog, func DebugCtx(context.Context, string, ...interface{}) #56345
pkg log/slog, func Debug(string, ...interface{}) #56345
pkg log/slog, func Default() *Logger #56345
pkg log/slog, func Duration(string, time.Duration) Attr #56345
pkg log/slog, func DurationValue(time.Duration) Value #56345
pkg log/slog, func ErrorContext(context.Context, string, ...interface{}) #61200
pkg log/slog, func ErrorCtx(context.Context, string, ...interface{}) #56345
pkg log/slog, func Error(string, ...interface{}) #56345
pkg log/slog, func Float64(string, float64) Attr #56345
pkg log/slog, func Float64Value(float64) Value #56345
pkg log/slog, func Group(string, ...interface{}) Attr #59204
pkg log/slog, func GroupValue(...Attr) Value #56345
pkg log/slog, func InfoContext(context.Context, string, ...interface{}) #61200
pkg log/slog, func InfoCtx(context.Context, string, ...interface{}) #56345
pkg log/slog, func Info(string, ...interface{}) #56345
pkg log/slog, func Int64(string, int64) Attr #56345
pkg log/slog, func Int64Value(int64) Value #56345
@@ -257,7 +250,7 @@ pkg log/slog, func Time(string, time.Time) Attr #56345
pkg log/slog, func TimeValue(time.Time) Value #56345
pkg log/slog, func Uint64(string, uint64) Attr #56345
pkg log/slog, func Uint64Value(uint64) Value #56345
pkg log/slog, func WarnContext(context.Context, string, ...interface{}) #61200
pkg log/slog, func WarnCtx(context.Context, string, ...interface{}) #56345
pkg log/slog, func Warn(string, ...interface{}) #56345
pkg log/slog, func With(...interface{}) *Logger #56345
pkg log/slog, method (Attr) Equal(Attr) bool #56345
@@ -278,17 +271,17 @@ pkg log/slog, method (*LevelVar) MarshalText() ([]uint8, error) #56345
pkg log/slog, method (*LevelVar) Set(Level) #56345
pkg log/slog, method (*LevelVar) String() string #56345
pkg log/slog, method (*LevelVar) UnmarshalText([]uint8) error #56345
pkg log/slog, method (*Logger) DebugContext(context.Context, string, ...interface{}) #61200
pkg log/slog, method (*Logger) DebugCtx(context.Context, string, ...interface{}) #56345
pkg log/slog, method (*Logger) Debug(string, ...interface{}) #56345
pkg log/slog, method (*Logger) Enabled(context.Context, Level) bool #56345
pkg log/slog, method (*Logger) ErrorContext(context.Context, string, ...interface{}) #61200
pkg log/slog, method (*Logger) ErrorCtx(context.Context, string, ...interface{}) #56345
pkg log/slog, method (*Logger) Error(string, ...interface{}) #56345
pkg log/slog, method (*Logger) Handler() Handler #56345
pkg log/slog, method (*Logger) InfoContext(context.Context, string, ...interface{}) #61200
pkg log/slog, method (*Logger) InfoCtx(context.Context, string, ...interface{}) #56345
pkg log/slog, method (*Logger) Info(string, ...interface{}) #56345
pkg log/slog, method (*Logger) LogAttrs(context.Context, Level, string, ...Attr) #56345
pkg log/slog, method (*Logger) Log(context.Context, Level, string, ...interface{}) #56345
pkg log/slog, method (*Logger) WarnContext(context.Context, string, ...interface{}) #61200
pkg log/slog, method (*Logger) WarnCtx(context.Context, string, ...interface{}) #56345
pkg log/slog, method (*Logger) Warn(string, ...interface{}) #56345
pkg log/slog, method (*Logger) WithGroup(string) *Logger #56345
pkg log/slog, method (*Logger) With(...interface{}) *Logger #56345
@@ -351,6 +344,8 @@ pkg maps, func Copy[$0 interface{ ~map[$2]$3 }, $1 interface{ ~map[$2]$3 }, $2 c
pkg maps, func DeleteFunc[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0, func($1, $2) bool) #57436
pkg maps, func Equal[$0 interface{ ~map[$2]$3 }, $1 interface{ ~map[$2]$3 }, $2 comparable, $3 comparable]($0, $1) bool #57436
pkg maps, func EqualFunc[$0 interface{ ~map[$2]$3 }, $1 interface{ ~map[$2]$4 }, $2 comparable, $3 interface{}, $4 interface{}]($0, $1, func($3, $4) bool) bool #57436
pkg maps, func Keys[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0) []$1 #57436
pkg maps, func Values[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0) []$2 #57436
pkg math/big, method (*Int) Float64() (float64, Accuracy) #56984
pkg net/http, method (*ProtocolError) Is(error) bool #41198
pkg net/http, method (*ResponseController) EnableFullDuplex() error #57786

View File

@@ -1,135 +0,0 @@
pkg archive/tar, method (*Writer) AddFS(fs.FS) error #58000
pkg archive/zip, method (*Writer) AddFS(fs.FS) error #54898
pkg cmp, func Or[$0 comparable](...$0) $0 #60204
pkg crypto/x509, func OIDFromInts([]uint64) (OID, error) #60665
pkg crypto/x509, method (*CertPool) AddCertWithConstraint(*Certificate, func([]*Certificate) error) #57178
pkg crypto/x509, method (OID) Equal(OID) bool #60665
pkg crypto/x509, method (OID) EqualASN1OID(asn1.ObjectIdentifier) bool #60665
pkg crypto/x509, method (OID) String() string #60665
pkg crypto/x509, type Certificate struct, Policies []OID #60665
pkg crypto/x509, type OID struct #60665
pkg database/sql, method (*Null[$0]) Scan(interface{}) error #60370
pkg database/sql, method (Null[$0]) Value() (driver.Value, error) #60370
pkg database/sql, type Null[$0 interface{}] struct #60370
pkg database/sql, type Null[$0 interface{}] struct, V $0 #60370
pkg database/sql, type Null[$0 interface{}] struct, Valid bool #60370
pkg debug/elf, const R_LARCH_64_PCREL = 109 #63725
pkg debug/elf, const R_LARCH_64_PCREL R_LARCH #63725
pkg debug/elf, const R_LARCH_ADD6 = 105 #63725
pkg debug/elf, const R_LARCH_ADD6 R_LARCH #63725
pkg debug/elf, const R_LARCH_ADD_ULEB128 = 107 #63725
pkg debug/elf, const R_LARCH_ADD_ULEB128 R_LARCH #63725
pkg debug/elf, const R_LARCH_ALIGN = 102 #63725
pkg debug/elf, const R_LARCH_ALIGN R_LARCH #63725
pkg debug/elf, const R_LARCH_CFA = 104 #63725
pkg debug/elf, const R_LARCH_CFA R_LARCH #63725
pkg debug/elf, const R_LARCH_DELETE = 101 #63725
pkg debug/elf, const R_LARCH_DELETE R_LARCH #63725
pkg debug/elf, const R_LARCH_PCREL20_S2 = 103 #63725
pkg debug/elf, const R_LARCH_PCREL20_S2 R_LARCH #63725
pkg debug/elf, const R_LARCH_SUB6 = 106 #63725
pkg debug/elf, const R_LARCH_SUB6 R_LARCH #63725
pkg debug/elf, const R_LARCH_SUB_ULEB128 = 108 #63725
pkg debug/elf, const R_LARCH_SUB_ULEB128 R_LARCH #63725
pkg debug/elf, const R_MIPS_PC32 = 248 #61974
pkg debug/elf, const R_MIPS_PC32 R_MIPS #61974
pkg encoding/base32, method (*Encoding) AppendDecode([]uint8, []uint8) ([]uint8, error) #53693
pkg encoding/base32, method (*Encoding) AppendEncode([]uint8, []uint8) []uint8 #53693
pkg encoding/base64, method (*Encoding) AppendDecode([]uint8, []uint8) ([]uint8, error) #53693
pkg encoding/base64, method (*Encoding) AppendEncode([]uint8, []uint8) []uint8 #53693
pkg encoding/hex, func AppendDecode([]uint8, []uint8) ([]uint8, error) #53693
pkg encoding/hex, func AppendEncode([]uint8, []uint8) []uint8 #53693
pkg go/ast, func NewPackage //deprecated #52463
pkg go/ast, func Unparen(Expr) Expr #60061
pkg go/ast, type Importer //deprecated #52463
pkg go/ast, type Object //deprecated #52463
pkg go/ast, type Package //deprecated #52463
pkg go/ast, type Scope //deprecated #52463
pkg go/types, func NewAlias(*TypeName, Type) *Alias #63223
pkg go/types, func Unalias(Type) Type #63223
pkg go/types, method (*Alias) Obj() *TypeName #63223
pkg go/types, method (*Alias) String() string #63223
pkg go/types, method (*Alias) Underlying() Type #63223
pkg go/types, method (*Info) PkgNameOf(*ast.ImportSpec) *PkgName #62037
pkg go/types, method (Checker) PkgNameOf(*ast.ImportSpec) *PkgName #62037
pkg go/types, type Alias struct #63223
pkg go/types, type Info struct, FileVersions map[*ast.File]string #62605
pkg go/version, func Compare(string, string) int #62039
pkg go/version, func IsValid(string) bool #62039
pkg go/version, func Lang(string) string #62039
pkg html/template, const ErrJSTemplate //deprecated #61619
pkg io, method (*SectionReader) Outer() (ReaderAt, int64, int64) #61870
pkg log/slog, func SetLogLoggerLevel(Level) Level #62418
pkg math/big, method (*Rat) FloatPrec() (int, bool) #50489
pkg math/rand/v2, func ExpFloat64() float64 #61716
pkg math/rand/v2, func Float32() float32 #61716
pkg math/rand/v2, func Float64() float64 #61716
pkg math/rand/v2, func Int() int #61716
pkg math/rand/v2, func Int32() int32 #61716
pkg math/rand/v2, func Int32N(int32) int32 #61716
pkg math/rand/v2, func Int64() int64 #61716
pkg math/rand/v2, func Int64N(int64) int64 #61716
pkg math/rand/v2, func IntN(int) int #61716
pkg math/rand/v2, func N[$0 intType]($0) $0 #61716
pkg math/rand/v2, func New(Source) *Rand #61716
pkg math/rand/v2, func NewChaCha8([32]uint8) *ChaCha8 #61716
pkg math/rand/v2, func NewPCG(uint64, uint64) *PCG #61716
pkg math/rand/v2, func NewZipf(*Rand, float64, float64, uint64) *Zipf #61716
pkg math/rand/v2, func NormFloat64() float64 #61716
pkg math/rand/v2, func Perm(int) []int #61716
pkg math/rand/v2, func Shuffle(int, func(int, int)) #61716
pkg math/rand/v2, func Uint32() uint32 #61716
pkg math/rand/v2, func Uint32N(uint32) uint32 #61716
pkg math/rand/v2, func Uint64() uint64 #61716
pkg math/rand/v2, func Uint64N(uint64) uint64 #61716
pkg math/rand/v2, func UintN(uint) uint #61716
pkg math/rand/v2, method (*ChaCha8) MarshalBinary() ([]uint8, error) #61716
pkg math/rand/v2, method (*ChaCha8) Seed([32]uint8) #61716
pkg math/rand/v2, method (*ChaCha8) Uint64() uint64 #61716
pkg math/rand/v2, method (*ChaCha8) UnmarshalBinary([]uint8) error #61716
pkg math/rand/v2, method (*PCG) MarshalBinary() ([]uint8, error) #61716
pkg math/rand/v2, method (*PCG) Seed(uint64, uint64) #61716
pkg math/rand/v2, method (*PCG) Uint64() uint64 #61716
pkg math/rand/v2, method (*PCG) UnmarshalBinary([]uint8) error #61716
pkg math/rand/v2, method (*Rand) ExpFloat64() float64 #61716
pkg math/rand/v2, method (*Rand) Float32() float32 #61716
pkg math/rand/v2, method (*Rand) Float64() float64 #61716
pkg math/rand/v2, method (*Rand) Int() int #61716
pkg math/rand/v2, method (*Rand) Int32() int32 #61716
pkg math/rand/v2, method (*Rand) Int32N(int32) int32 #61716
pkg math/rand/v2, method (*Rand) Int64() int64 #61716
pkg math/rand/v2, method (*Rand) Int64N(int64) int64 #61716
pkg math/rand/v2, method (*Rand) IntN(int) int #61716
pkg math/rand/v2, method (*Rand) NormFloat64() float64 #61716
pkg math/rand/v2, method (*Rand) Perm(int) []int #61716
pkg math/rand/v2, method (*Rand) Shuffle(int, func(int, int)) #61716
pkg math/rand/v2, method (*Rand) Uint32() uint32 #61716
pkg math/rand/v2, method (*Rand) Uint32N(uint32) uint32 #61716
pkg math/rand/v2, method (*Rand) Uint64() uint64 #61716
pkg math/rand/v2, method (*Rand) Uint64N(uint64) uint64 #61716
pkg math/rand/v2, method (*Rand) UintN(uint) uint #61716
pkg math/rand/v2, method (*Zipf) Uint64() uint64 #61716
pkg math/rand/v2, type ChaCha8 struct #61716
pkg math/rand/v2, type PCG struct #61716
pkg math/rand/v2, type Rand struct #61716
pkg math/rand/v2, type Source interface { Uint64 } #61716
pkg math/rand/v2, type Source interface, Uint64() uint64 #61716
pkg math/rand/v2, type Zipf struct #61716
pkg net, method (*TCPConn) WriteTo(io.Writer) (int64, error) #58808
pkg net/http, func FileServerFS(fs.FS) Handler #51971
pkg net/http, func NewFileTransportFS(fs.FS) RoundTripper #51971
pkg net/http, func ServeFileFS(ResponseWriter, *Request, fs.FS, string) #51971
pkg net/http, method (*Request) PathValue(string) string #61410
pkg net/http, method (*Request) SetPathValue(string, string) #61410
pkg net/netip, method (AddrPort) Compare(AddrPort) int #61642
pkg os, method (*File) WriteTo(io.Writer) (int64, error) #58808
pkg reflect, func PtrTo //deprecated #59599
pkg reflect, func TypeFor[$0 interface{}]() Type #60088
pkg slices, func Concat[$0 interface{ ~[]$1 }, $1 interface{}](...$0) $0 #56353
pkg syscall (linux-386), type SysProcAttr struct, PidFD *int #51246
pkg syscall (linux-386-cgo), type SysProcAttr struct, PidFD *int #51246
pkg syscall (linux-amd64), type SysProcAttr struct, PidFD *int #51246
pkg syscall (linux-amd64-cgo), type SysProcAttr struct, PidFD *int #51246
pkg syscall (linux-arm), type SysProcAttr struct, PidFD *int #51246
pkg syscall (linux-arm-cgo), type SysProcAttr struct, PidFD *int #51246
pkg testing/slogtest, func Run(*testing.T, func(*testing.T) slog.Handler, func(*testing.T) map[string]interface{}) #61758

View File

@@ -1,158 +0,0 @@
pkg archive/tar, type FileInfoNames interface { Gname, IsDir, ModTime, Mode, Name, Size, Sys, Uname } #50102
pkg archive/tar, type FileInfoNames interface, Gname() (string, error) #50102
pkg archive/tar, type FileInfoNames interface, IsDir() bool #50102
pkg archive/tar, type FileInfoNames interface, ModTime() time.Time #50102
pkg archive/tar, type FileInfoNames interface, Mode() fs.FileMode #50102
pkg archive/tar, type FileInfoNames interface, Name() string #50102
pkg archive/tar, type FileInfoNames interface, Size() int64 #50102
pkg archive/tar, type FileInfoNames interface, Sys() interface{} #50102
pkg archive/tar, type FileInfoNames interface, Uname() (string, error) #50102
pkg crypto/tls, const QUICResumeSession = 8 #63691
pkg crypto/tls, const QUICResumeSession QUICEventKind #63691
pkg crypto/tls, const QUICStoreSession = 9 #63691
pkg crypto/tls, const QUICStoreSession QUICEventKind #63691
pkg crypto/tls, method (*ECHRejectionError) Error() string #63369
pkg crypto/tls, method (*QUICConn) StoreSession(*SessionState) error #63691
pkg crypto/tls, type Config struct, EncryptedClientHelloConfigList []uint8 #63369
pkg crypto/tls, type Config struct, EncryptedClientHelloRejectionVerify func(ConnectionState) error #63369
pkg crypto/tls, type ConnectionState struct, ECHAccepted bool #63369
pkg crypto/tls, type ECHRejectionError struct #63369
pkg crypto/tls, type ECHRejectionError struct, RetryConfigList []uint8 #63369
pkg crypto/tls, type QUICConfig struct, EnableSessionEvents bool #63691
pkg crypto/tls, type QUICEvent struct, SessionState *SessionState #63691
pkg crypto/tls, type QUICSessionTicketOptions struct, Extra [][]uint8 #63691
pkg crypto/x509, func ParseOID(string) (OID, error) #66249
pkg crypto/x509, method (*OID) UnmarshalBinary([]uint8) error #66249
pkg crypto/x509, method (*OID) UnmarshalText([]uint8) error #66249
pkg crypto/x509, method (OID) MarshalBinary() ([]uint8, error) #66249
pkg crypto/x509, method (OID) MarshalText() ([]uint8, error) #66249
pkg debug/elf, const PT_OPENBSD_NOBTCFI = 1705237480 #66054
pkg debug/elf, const PT_OPENBSD_NOBTCFI ProgType #66054
pkg debug/elf, const STT_GNU_IFUNC = 10 #66836
pkg debug/elf, const STT_GNU_IFUNC SymType #66836
pkg debug/elf, const STT_RELC = 8 #66836
pkg debug/elf, const STT_RELC SymType #66836
pkg debug/elf, const STT_SRELC = 9 #66836
pkg debug/elf, const STT_SRELC SymType #66836
pkg encoding/binary, func Append([]uint8, ByteOrder, interface{}) ([]uint8, error) #60023
pkg encoding/binary, func Decode([]uint8, ByteOrder, interface{}) (int, error) #60023
pkg encoding/binary, func Encode([]uint8, ByteOrder, interface{}) (int, error) #60023
pkg go/ast, func Preorder(Node) iter.Seq[Node] #66339
pkg go/types, method (*Alias) Origin() *Alias #67143
pkg go/types, method (*Alias) Rhs() Type #66559
pkg go/types, method (*Alias) SetTypeParams([]*TypeParam) #67143
pkg go/types, method (*Alias) TypeArgs() *TypeList #67143
pkg go/types, method (*Alias) TypeParams() *TypeParamList #67143
pkg go/types, method (*Func) Signature() *Signature #65772
pkg iter, func Pull2[$0 interface{}, $1 interface{}](Seq2[$0, $1]) (func() ($0, $1, bool), func()) #61897
pkg iter, func Pull[$0 interface{}](Seq[$0]) (func() ($0, bool), func()) #61897
pkg iter, type Seq2[$0 interface{}, $1 interface{}] func(func($0, $1) bool) #61897
pkg iter, type Seq[$0 interface{}] func(func($0) bool) #61897
pkg maps, func All[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0) iter.Seq2[$1, $2] #61900
pkg maps, func Collect[$0 comparable, $1 interface{}](iter.Seq2[$0, $1]) map[$0]$1 #61900
pkg maps, func Insert[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0, iter.Seq2[$1, $2]) #61900
pkg maps, func Keys[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0) iter.Seq[$1] #61900
pkg maps, func Values[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0) iter.Seq[$2] #61900
pkg math/rand/v2, func Uint() uint #61716
pkg math/rand/v2, method (*ChaCha8) Read([]uint8) (int, error) #67059
pkg math/rand/v2, method (*Rand) Uint() uint #61716
pkg net, method (*DNSError) Unwrap() error #63116
pkg net, method (*TCPConn) SetKeepAliveConfig(KeepAliveConfig) error #62254
pkg net, type DNSError struct, UnwrapErr error #63116
pkg net, type Dialer struct, KeepAliveConfig KeepAliveConfig #62254
pkg net, type KeepAliveConfig struct #62254
pkg net, type KeepAliveConfig struct, Count int #62254
pkg net, type KeepAliveConfig struct, Enable bool #62254
pkg net, type KeepAliveConfig struct, Idle time.Duration #62254
pkg net, type KeepAliveConfig struct, Interval time.Duration #62254
pkg net, type ListenConfig struct, KeepAliveConfig KeepAliveConfig #62254
pkg net/http, func ParseCookie(string) ([]*Cookie, error) #66008
pkg net/http, func ParseSetCookie(string) (*Cookie, error) #66008
pkg net/http, method (*Request) CookiesNamed(string) []*Cookie #61472
pkg net/http, type Cookie struct, Partitioned bool #62490
pkg net/http, type Cookie struct, Quoted bool #46443
pkg net/http, type Request struct, Pattern string #66405
pkg net/http/httptest, func NewRequestWithContext(context.Context, string, string, io.Reader) *http.Request #59473
pkg os, func CopyFS(string, fs.FS) error #62484
pkg path/filepath, func Localize(string) (string, error) #57151
pkg reflect, func SliceAt(Type, unsafe.Pointer, int) Value #61308
pkg reflect, method (Value) Seq() iter.Seq[Value] #66056
pkg reflect, method (Value) Seq2() iter.Seq2[Value, Value] #66056
pkg reflect, type Type interface, CanSeq() bool #66056
pkg reflect, type Type interface, CanSeq2() bool #66056
pkg reflect, type Type interface, OverflowComplex(complex128) bool #60427
pkg reflect, type Type interface, OverflowFloat(float64) bool #60427
pkg reflect, type Type interface, OverflowInt(int64) bool #60427
pkg reflect, type Type interface, OverflowUint(uint64) bool #60427
pkg runtime/debug, func SetCrashOutput(*os.File, CrashOptions) error #42888
pkg runtime/debug, type CrashOptions struct #67182
pkg slices, func All[$0 interface{ ~[]$1 }, $1 interface{}]($0) iter.Seq2[int, $1] #61899
pkg slices, func AppendSeq[$0 interface{ ~[]$1 }, $1 interface{}]($0, iter.Seq[$1]) $0 #61899
pkg slices, func Backward[$0 interface{ ~[]$1 }, $1 interface{}]($0) iter.Seq2[int, $1] #61899
pkg slices, func Chunk[$0 interface{ ~[]$1 }, $1 interface{}]($0, int) iter.Seq[$0] #53987
pkg slices, func Collect[$0 interface{}](iter.Seq[$0]) []$0 #61899
pkg slices, func Repeat[$0 interface{ ~[]$1 }, $1 interface{}]($0, int) $0 #65238
pkg slices, func SortedFunc[$0 interface{}](iter.Seq[$0], func($0, $0) int) []$0 #61899
pkg slices, func SortedStableFunc[$0 interface{}](iter.Seq[$0], func($0, $0) int) []$0 #61899
pkg slices, func Sorted[$0 cmp.Ordered](iter.Seq[$0]) []$0 #61899
pkg slices, func Values[$0 interface{ ~[]$1 }, $1 interface{}]($0) iter.Seq[$1] #61899
pkg structs, type HostLayout struct #66408
pkg sync, method (*Map) Clear() #61696
pkg sync/atomic, func AndInt32(*int32, int32) int32 #61395
pkg sync/atomic, func AndInt64(*int64, int64) int64 #61395
pkg sync/atomic, func AndUint32(*uint32, uint32) uint32 #61395
pkg sync/atomic, func AndUint64(*uint64, uint64) uint64 #61395
pkg sync/atomic, func AndUintptr(*uintptr, uintptr) uintptr #61395
pkg sync/atomic, func OrInt32(*int32, int32) int32 #61395
pkg sync/atomic, func OrInt64(*int64, int64) int64 #61395
pkg sync/atomic, func OrUint32(*uint32, uint32) uint32 #61395
pkg sync/atomic, func OrUint64(*uint64, uint64) uint64 #61395
pkg sync/atomic, func OrUintptr(*uintptr, uintptr) uintptr #61395
pkg sync/atomic, method (*Int32) And(int32) int32 #61395
pkg sync/atomic, method (*Int32) Or(int32) int32 #61395
pkg sync/atomic, method (*Int64) And(int64) int64 #61395
pkg sync/atomic, method (*Int64) Or(int64) int64 #61395
pkg sync/atomic, method (*Uint32) And(uint32) uint32 #61395
pkg sync/atomic, method (*Uint32) Or(uint32) uint32 #61395
pkg sync/atomic, method (*Uint64) And(uint64) uint64 #61395
pkg sync/atomic, method (*Uint64) Or(uint64) uint64 #61395
pkg sync/atomic, method (*Uintptr) And(uintptr) uintptr #61395
pkg sync/atomic, method (*Uintptr) Or(uintptr) uintptr #61395
pkg syscall (openbsd-386), const EBADMSG = 92 #67998
pkg syscall (openbsd-386), const ELAST = 95 #67998
pkg syscall (openbsd-386), const ENOTRECOVERABLE = 93 #67998
pkg syscall (openbsd-386), const ENOTRECOVERABLE Errno #67998
pkg syscall (openbsd-386), const EOWNERDEAD = 94 #67998
pkg syscall (openbsd-386), const EOWNERDEAD Errno #67998
pkg syscall (openbsd-386), const EPROTO = 95 #67998
pkg syscall (openbsd-386-cgo), const EBADMSG = 92 #67998
pkg syscall (openbsd-386-cgo), const ELAST = 95 #67998
pkg syscall (openbsd-386-cgo), const ENOTRECOVERABLE = 93 #67998
pkg syscall (openbsd-386-cgo), const ENOTRECOVERABLE Errno #67998
pkg syscall (openbsd-386-cgo), const EOWNERDEAD = 94 #67998
pkg syscall (openbsd-386-cgo), const EOWNERDEAD Errno #67998
pkg syscall (openbsd-386-cgo), const EPROTO = 95 #67998
pkg syscall (openbsd-amd64), const EBADMSG = 92 #67998
pkg syscall (openbsd-amd64), const ELAST = 95 #67998
pkg syscall (openbsd-amd64), const ENOTRECOVERABLE = 93 #67998
pkg syscall (openbsd-amd64), const ENOTRECOVERABLE Errno #67998
pkg syscall (openbsd-amd64), const EOWNERDEAD = 94 #67998
pkg syscall (openbsd-amd64), const EOWNERDEAD Errno #67998
pkg syscall (openbsd-amd64), const EPROTO = 95 #67998
pkg syscall (openbsd-amd64-cgo), const EBADMSG = 92 #67998
pkg syscall (openbsd-amd64-cgo), const ELAST = 95 #67998
pkg syscall (openbsd-amd64-cgo), const ENOTRECOVERABLE = 93 #67998
pkg syscall (openbsd-amd64-cgo), const ENOTRECOVERABLE Errno #67998
pkg syscall (openbsd-amd64-cgo), const EOWNERDEAD = 94 #67998
pkg syscall (openbsd-amd64-cgo), const EOWNERDEAD Errno #67998
pkg syscall (openbsd-amd64-cgo), const EPROTO = 95 #67998
pkg syscall (windows-386), const WSAENOPROTOOPT = 10042 #62254
pkg syscall (windows-386), const WSAENOPROTOOPT Errno #62254
pkg syscall (windows-amd64), const WSAENOPROTOOPT = 10042 #62254
pkg syscall (windows-amd64), const WSAENOPROTOOPT Errno #62254
pkg syscall, const EBADMSG Errno #67998
pkg syscall, const EPROTO Errno #67998
pkg unicode/utf16, func RuneLen(int32) int #44940
pkg unique, func Make[$0 comparable]($0) Handle[$0] #62483
pkg unique, method (Handle[$0]) Value() $0 #62483
pkg unique, type Handle[$0 comparable] struct #62483

View File

@@ -1,2 +1,2 @@
branch: release-branch.go1.23
branch: dev.inline
parent-branch: master

View File

@@ -1,75 +0,0 @@
# Release Notes
The `initial` and `next` subdirectories of this directory are for release notes.
## For developers
Release notes should be added to `next` by editing existing files or creating
new files. **Do not add RELNOTE=yes comments in CLs.** Instead, add a file to
the CL (or ask the author to do so).
At the end of the development cycle, the files will be merged by being
concatenated in sorted order by pathname. Files in the directory matching the
glob "*stdlib/*minor" are treated specially. They should be in subdirectories
corresponding to standard library package paths, and headings for those package
paths will be generated automatically.
Files in this repo's `api/next` directory must have corresponding files in
`doc/next/*stdlib/*minor`.
The files should be in the subdirectory for the package with the new
API, and should be named after the issue number of the API proposal.
For example, if the directory `6-stdlib/99-minor` is present,
then an `api/next` file with the line
pkg net/http, function F #12345
should have a corresponding file named `doc/next/6-stdlib/99-minor/net/http/12345.md`.
At a minimum, that file should contain either a full sentence or a TODO,
ideally referring to a person with the responsibility to complete the note.
If your CL addresses an accepted proposal, mention the proposal issue number in
your release note in the form `/issue/NUMBER`. A link to the issue in the text
will have this form (see below). If you don't want to mention the issue in the
text, add it as a comment:
```
<!-- go.dev/issue/12345 -->
```
If an accepted proposal is mentioned in a CL but not in the release notes, it will be
flagged as a TODO by the automated tooling. That is true even for proposals that add API.
Use the following forms in your markdown:
[http.Request] # symbol documentation; auto-linked as in Go doc strings
[Request] # short form, for symbols in the package being documented
[net/http] # package link
[#12345](/issue/12345) # GitHub issues
[CL 6789](/cl/6789) # Gerrit changelists
To preview `next` content in merged form using a local instance of the website, run:
```
go run golang.org/x/website/cmd/golangorg@latest -goroot=..
```
Then open http://localhost:6060/doc/next. Refresh the page to see your latest edits.
## For the release team
The `relnote` tool, at `golang.org/x/build/cmd/relnote`, operates on the files
in `doc/next`.
As a release cycle nears completion, run `relnote todo` to get a list of
unfinished release note work.
To prepare the release notes for a release, run `relnote generate`.
That will merge the `.md` files in `next` into a single file.
Atomically (as close to it as possible) add that file to `_content/doc` directory
of the website repository and remove the `doc/next` directory in this repository.
To begin the next release development cycle, populate the contents of `next`
with those of `initial`. From the repo root:
> cd doc
> cp -r initial/* next
Then edit `next/1-intro.md` to refer to the next version.

View File

@@ -464,23 +464,6 @@ Function is the outermost frame of the call stack. Traceback should stop at this
</li>
</ul>
<h3 id="special-instructions">Special instructions</h3>
<p>
The <code>PCALIGN</code> pseudo-instruction is used to indicate that the next instruction should be aligned
to a specified boundary by padding with no-op instructions.
</p>
<p>
It is currently supported on arm64, amd64, ppc64, loong64 and riscv64.
For example, the start of the <code>MOVD</code> instruction below is aligned to 32 bytes:
<pre>
PCALIGN $32
MOVD $2, R0
</pre>
</p>
<h3 id="data-offsets">Interacting with Go types and constants</h3>
<p>

View File

@@ -7,11 +7,8 @@
<h2 id="Introduction">Introduction</h2>
<p>
This is the reference manual for the Go programming language as it was for
language version 1.17, in October 2021, before the introduction of generics.
It is provided for historical interest.
The current reference manual can be found <a href="/doc/go_spec.html">here</a>.
For more information and other documents, see <a href="/">go.dev</a>.
This is a reference manual for the Go programming language. For
more information and other documents, see <a href="/">golang.org</a>.
</p>
<p>
@@ -656,7 +653,7 @@ and are discussed in that section.
<p>
Numeric constants represent exact values of arbitrary precision and do not overflow.
Consequently, there are no constants denoting the IEEE 754 negative zero, infinity,
Consequently, there are no constants denoting the IEEE-754 negative zero, infinity,
and not-a-number values.
</p>
@@ -882,8 +879,8 @@ int16 the set of all signed 16-bit integers (-32768 to 32767)
int32 the set of all signed 32-bit integers (-2147483648 to 2147483647)
int64 the set of all signed 64-bit integers (-9223372036854775808 to 9223372036854775807)
float32 the set of all IEEE 754 32-bit floating-point numbers
float64 the set of all IEEE 754 64-bit floating-point numbers
float32 the set of all IEEE-754 32-bit floating-point numbers
float64 the set of all IEEE-754 64-bit floating-point numbers
complex64 the set of all complex numbers with float32 real and imaginary parts
complex128 the set of all complex numbers with float64 real and imaginary parts
@@ -917,7 +914,7 @@ are required when different numeric types are mixed in an expression
or assignment. For instance, <code>int32</code> and <code>int</code>
are not the same type even though they may have the same size on a
particular architecture.
</p>
<h3 id="String_types">String types</h3>
@@ -1454,7 +1451,6 @@ maps grow to accommodate the number of items
stored in them, with the exception of <code>nil</code> maps.
A <code>nil</code> map is equivalent to an empty map except that no elements
may be added.
</p>
<h3 id="Channel_types">Channel types</h3>
@@ -3645,8 +3641,6 @@ As the <code>++</code> and <code>--</code> operators form
statements, not expressions, they fall
outside the operator hierarchy.
As a consequence, statement <code>*p++</code> is the same as <code>(*p)++</code>.
</p>
<p>
There are five precedence levels for binary operators.
Multiplication operators bind strongest, followed by addition
@@ -3814,7 +3808,7 @@ For floating-point and complex numbers,
<code>+x</code> is the same as <code>x</code>,
while <code>-x</code> is the negation of <code>x</code>.
The result of a floating-point or complex division by zero is not specified beyond the
IEEE 754 standard; whether a <a href="#Run_time_panics">run-time panic</a>
IEEE-754 standard; whether a <a href="#Run_time_panics">run-time panic</a>
occurs is implementation-specific.
</p>
@@ -3904,7 +3898,7 @@ These terms and the result of the comparisons are defined as follows:
<li>
Floating-point values are comparable and ordered,
as defined by the IEEE 754 standard.
as defined by the IEEE-754 standard.
</li>
<li>
@@ -4252,7 +4246,7 @@ When converting an integer or floating-point number to a floating-point type,
or a complex number to another complex type, the result value is rounded
to the precision specified by the destination type.
For instance, the value of a variable <code>x</code> of type <code>float32</code>
may be stored using additional precision beyond that of an IEEE 754 32-bit number,
may be stored using additional precision beyond that of an IEEE-754 32-bit number,
but float32(x) represents the result of rounding <code>x</code>'s value to
32-bit precision. Similarly, <code>x + 0.1</code> may use more than 32 bits
of precision, but <code>float32(x + 0.1)</code> does not.

1246
doc/go1.21.html Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -98,12 +98,12 @@ which in turn are made up of memory operations.
A <i>memory operation</i> is modeled by four details:
</p>
<ul>
<li>its kind, indicating whether it is an ordinary data read, an ordinary data write,
or a <i>synchronizing operation</i> such as an atomic data access,
a mutex operation, or a channel operation,</li>
<li>its location in the program,</li>
<li>the memory location or variable being accessed, and</li>
<li>the values read or written by the operation.</li>
<li>its kind, indicating whether it is an ordinary data read, an ordinary data write,
or a <i>synchronizing operation</i> such as an atomic data access,
a mutex operation, or a channel operation,
<li>its location in the program,
<li>the memory location or variable being accessed, and
<li>the values read or written by the operation.
</ul>
<p>
Some memory operations are <i>read-like</i>, including read, atomic read, mutex lock, and channel receive.
@@ -159,11 +159,10 @@ union of the sequenced before and synchronized before relations.
For an ordinary (non-synchronizing) data read <i>r</i> on a memory location <i>x</i>,
<i>W</i>(<i>r</i>) must be a write <i>w</i> that is <i>visible</i> to <i>r</i>,
where visible means that both of the following hold:
</p>
<ol>
<li><i>w</i> happens before <i>r</i>.</li>
<li><i>w</i> does not happen before any other write <i>w'</i> (to <i>x</i>) that happens before <i>r</i>.</li>
<li><i>w</i> happens before <i>r</i>.
<li><i>w</i> does not happen before any other write <i>w'</i> (to <i>x</i>) that happens before <i>r</i>.
</ol>
<p>
@@ -222,7 +221,7 @@ for programs that do contain races.
</p>
<p>
Any implementation can, upon detecting a data race,
First, any implementation can, upon detecting a data race,
report the race and halt execution of the program.
Implementations using ThreadSanitizer
(accessed with “<code>go</code> <code>build</code> <code>-race</code>”)
@@ -230,18 +229,7 @@ do exactly this.
</p>
<p>
A read of an array, struct, or complex number
may by implemented as a read of each individual sub-value
(array element, struct field, or real/imaginary component),
in any order.
Similarly, a write of an array, struct, or complex number
may be implemented as a write of each individual sub-value,
in any order.
</p>
<p>
A read <i>r</i> of a memory location <i>x</i>
holding a value
Otherwise, a read <i>r</i> of a memory location <i>x</i>
that is not larger than a machine word must observe
some write <i>w</i> such that <i>r</i> does not happen before <i>w</i>
and there is no write <i>w'</i> such that <i>w</i> happens before <i>w'</i>

File diff suppressed because it is too large Load Diff

View File

@@ -88,38 +88,14 @@ Because this method of setting GODEBUG defaults was introduced only in Go 1.21,
programs listing versions of Go earlier than Go 1.20 are configured to match Go 1.20,
not the older version.
To override these defaults, starting in Go 1.23, the work module's `go.mod`
or the workspace's `go.work` can list one or more `godebug` lines:
godebug (
default=go1.21
panicnil=1
asynctimerchan=0
)
The special key `default` indicates a Go version to take unspecified
settings from. This allows setting the GODEBUG defaults separately
from the Go language version in the module.
In this example, the program is asking for Go 1.21 semantics and
then asking for the old pre-Go 1.21 `panic(nil)` behavior and the
new Go 1.23 `asynctimerchan=0` behavior.
Only the work module's `go.mod` is consulted for `godebug` directives.
Any directives in required dependency modules are ignored.
It is an error to list a `godebug` with an unrecognized setting.
(Toolchains older than Go 1.23 reject all `godebug` lines, since they do not
understand `godebug` at all.)
The defaults from the `go` and `godebug` lines apply to all main
packages that are built. For more fine-grained control,
starting in Go 1.21, a main package's source files
To override these defaults, a main package's source files
can include one or more `//go:debug` directives at the top of the file
(preceding the `package` statement).
The `godebug` lines in the previous example would be written:
Continuing the `panicnil` example, if the module or workspace is updated
to say `go` `1.21`, the program can opt back into the old `panic(nil)`
behavior by including this directive:
//go:debug default=go1.21
//go:debug panicnil=1
//go:debug asynctimerchan=0
Starting in Go 1.21, the Go toolchain treats a `//go:debug` directive
with an unrecognized GODEBUG setting as an invalid program.
@@ -150,133 +126,6 @@ for example,
see the [runtime documentation](/pkg/runtime#hdr-Environment_Variables)
and the [go command documentation](/cmd/go#hdr-Build_and_test_caching).
### Go 1.23
Go 1.23 changed the channels created by package time to be unbuffered
(synchronous), which makes correct use of the [`Timer.Stop`](/pkg/time/#Timer.Stop)
and [`Timer.Reset`](/pkg/time/#Timer.Reset) method results much easier.
The [`asynctimerchan` setting](/pkg/time/#NewTimer) disables this change.
There are no runtime metrics for this change,
This setting may be removed in a future release, Go 1.27 at the earliest.
Go 1.23 changed the mode bits reported by [`os.Lstat`](/pkg/os#Lstat) and [`os.Stat`](/pkg/os#Stat)
for reparse points, which can be controlled with the `winsymlink` setting.
As of Go 1.23 (`winsymlink=1`), mount points no longer have [`os.ModeSymlink`](/pkg/os#ModeSymlink)
set, and reparse points that are not symlinks, Unix sockets, or dedup files now
always have [`os.ModeIrregular`](/pkg/os#ModeIrregular) set. As a result of these changes,
[`filepath.EvalSymlinks`](/pkg/path/filepath#EvalSymlinks) no longer evaluates
mount points, which was a source of many inconsistencies and bugs.
At previous versions (`winsymlink=0`), mount points are treated as symlinks,
and other reparse points with non-default [`os.ModeType`](/pkg/os#ModeType) bits
(such as [`os.ModeDir`](/pkg/os#ModeDir)) do not have the `ModeIrregular` bit set.
Go 1.23 changed [`os.Readlink`](/pkg/os#Readlink) and [`filepath.EvalSymlinks`](/pkg/path/filepath#EvalSymlinks)
to avoid trying to normalize volumes to drive letters, which was not always even possible.
This behavior is controlled by the `winreadlinkvolume` setting.
For Go 1.23, it defaults to `winreadlinkvolume=1`.
Previous versions default to `winreadlinkvolume=0`.
Go 1.23 enabled the experimental post-quantum key exchange mechanism
X25519Kyber768Draft00 by default. The default can be reverted using the
[`tlskyber` setting](/pkg/crypto/tls/#Config.CurvePreferences).
Go 1.23 changed the behavior of
[crypto/x509.ParseCertificate](/pkg/crypto/x509/#ParseCertificate) to reject
serial numbers that are negative. This change can be reverted with
the [`x509negativeserial` setting](/pkg/crypto/x509/#ParseCertificate).
Go 1.23 re-enabled support in html/template for ECMAScript 6 template literals by default.
The [`jstmpllitinterp` setting](/pkg/html/template#hdr-Security_Model) no longer has
any effect.
Go 1.23 changed the default TLS cipher suites used by clients and servers when
not explicitly configured, removing 3DES cipher suites. The default can be reverted
using the [`tls3des` setting](/pkg/crypto/tls/#Config.CipherSuites).
Go 1.23 changed the behavior of [`tls.X509KeyPair`](/pkg/crypto/tls#X509KeyPair)
and [`tls.LoadX509KeyPair`](/pkg/crypto/tls#LoadX509KeyPair) to populate the
Leaf field of the returned [`tls.Certificate`](/pkg/crypto/tls#Certificate).
This behavior is controlled by the `x509keypairleaf` setting. For Go 1.23, it
defaults to `x509keypairleaf=1`. Previous versions default to
`x509keypairleaf=0`.
Go 1.23 changed
[`net/http.ServeContent`](/pkg/net/http#ServeContent),
[`net/http.ServeFile`](/pkg/net/http#ServeFile), and
[`net/http.ServeFS`](/pkg/net/http#ServeFS) to
remove Cache-Control, Content-Encoding, Etag, and Last-Modified headers
when serving an error. This behavior is controlled by
the [`httpservecontentkeepheaders` setting](/pkg/net/http#ServeContent).
Using `httpservecontentkeepheaders=1` restores the pre-Go 1.23 behavior.
### Go 1.22
Go 1.22 adds a configurable limit to control the maximum acceptable RSA key size
that can be used in TLS handshakes, controlled by the [`tlsmaxrsasize` setting](/pkg/crypto/tls#Conn.Handshake).
The default is tlsmaxrsasize=8192, limiting RSA to 8192-bit keys. To avoid
denial of service attacks, this setting and default was backported to Go
1.19.13, Go 1.20.8, and Go 1.21.1.
Go 1.22 made it an error for a request or response read by a net/http
client or server to have an empty Content-Length header.
This behavior is controlled by the `httplaxcontentlength` setting.
Go 1.22 changed the behavior of ServeMux to accept extended
patterns and unescape both patterns and request paths by segment.
This behavior can be controlled by the
[`httpmuxgo121` setting](/pkg/net/http/#ServeMux).
Go 1.22 added the [Alias type](/pkg/go/types#Alias) to [go/types](/pkg/go/types)
for the explicit representation of [type aliases](/ref/spec#Type_declarations).
Whether the type checker produces `Alias` types or not is controlled by the
[`gotypesalias` setting](/pkg/go/types#Alias).
For Go 1.22 it defaults to `gotypesalias=0`.
For Go 1.23, `gotypesalias=1` will become the default.
This setting will be removed in a future release, Go 1.27 at the earliest.
Go 1.22 changed the default minimum TLS version supported by both servers
and clients to TLS 1.2. The default can be reverted to TLS 1.0 using the
[`tls10server` setting](/pkg/crypto/tls/#Config).
Go 1.22 changed the default TLS cipher suites used by clients and servers when
not explicitly configured, removing the cipher suites which used RSA based key
exchange. The default can be reverted using the [`tlsrsakex` setting](/pkg/crypto/tls/#Config).
Go 1.22 disabled
[`ConnectionState.ExportKeyingMaterial`](/pkg/crypto/tls/#ConnectionState.ExportKeyingMaterial)
when the connection supports neither TLS 1.3 nor Extended Master Secret
(implemented in Go 1.21). It can be reenabled with the [`tlsunsafeekm`
setting](/pkg/crypto/tls/#ConnectionState.ExportKeyingMaterial).
Go 1.22 changed how the runtime interacts with transparent huge pages on Linux.
In particular, a common default Linux kernel configuration can result in
significant memory overheads, and Go 1.22 no longer works around this default.
To work around this issue without adjusting kernel settings, transparent huge
pages can be disabled for Go memory with the
[`disablethp` setting](/pkg/runtime#hdr-Environment_Variable).
This behavior was backported to Go 1.21.1, but the setting is only available
starting with Go 1.21.6.
This setting may be removed in a future release, and users impacted by this issue
should adjust their Linux configuration according to the recommendations in the
[GC guide](/doc/gc-guide#Linux_transparent_huge_pages), or switch to a Linux
distribution that disables transparent huge pages altogether.
Go 1.22 added contention on runtime-internal locks to the [`mutex`
profile](/pkg/runtime/pprof#Profile). Contention on these locks is always
reported at `runtime._LostContendedRuntimeLock`. Complete stack traces of
runtime locks can be enabled with the [`runtimecontentionstacks`
setting](/pkg/runtime#hdr-Environment_Variable). These stack traces have
non-standard semantics, see setting documentation for details.
Go 1.22 added a new [`crypto/x509.Certificate`](/pkg/crypto/x509/#Certificate)
field, [`Policies`](/pkg/crypto/x509/#Certificate.Policies), which supports
certificate policy OIDs with components larger than 31 bits. By default this
field is only used during parsing, when it is populated with policy OIDs, but
not used during marshaling. It can be used to marshal these larger OIDs, instead
of the existing PolicyIdentifiers field, by using the
[`x509usepolicies` setting.](/pkg/crypto/x509/#CreateCertificate).
### Go 1.21
Go 1.21 made it a run-time error to call `panic` with a nil interface value,
@@ -293,10 +142,6 @@ forms, controlled by the
respectively.
This behavior was backported to Go 1.19.8+ and Go 1.20.3+.
Go 1.21 adds the support of Multipath TCP but it is only used if the application
explicitly asked for it. This behavior can be controlled by the
[`multipathtcp` setting](/pkg/net#Dialer.SetMultipathTCP).
There is no plan to remove any of these settings.
### Go 1.20
@@ -331,17 +176,10 @@ Go 1.19 made it an error for path lookups to resolve to binaries in the current
controlled by the [`execerrdot` setting](/pkg/os/exec#hdr-Executables_in_the_current_directory).
There is no plan to remove this setting.
Go 1.19 started sending EDNS0 additional headers on DNS requests.
This can reportedly break the DNS server provided on some routers,
such as CenturyLink Zyxel C3000Z.
This can be changed by the [`netedns0` setting](/pkg/net#hdr-Name_Resolution).
This setting is available in Go 1.21.12, Go 1.22.5, Go 1.23, and later.
There is no plan to remove this setting.
### Go 1.18
Go 1.18 removed support for SHA1 in most X.509 certificates,
controlled by the [`x509sha1` setting](/pkg/crypto/x509#InsecureAlgorithmError).
controlled by the [`x509sha1` setting](/crypto/x509#InsecureAlgorithmError).
This setting will be removed in a future release, Go 1.22 at the earliest.
### Go 1.10

View File

@@ -1,14 +0,0 @@
<!--
NOTE: In this document and others in this directory, the convention is to
set fixed-width phrases with non-fixed-width spaces, as in
`hello` `world`.
-->
<style>
main ul li { margin: 0.5em 0; }
</style>
## DRAFT RELEASE NOTES — Introduction to Go 1.N {#introduction}
**Go 1.N is not yet released. These are work-in-progress release notes.
Go 1.N is expected to be released in {Month} {Year}.**

View File

@@ -1,3 +0,0 @@
## Changes to the language {#language}

View File

@@ -1,6 +0,0 @@
## Tools {#tools}
### Go command {#go-command}
### Cgo {#cgo}

View File

@@ -1 +0,0 @@
## Runtime {#runtime}

View File

@@ -1,7 +0,0 @@
## Compiler {#compiler}
## Assembler {#assembler}
## Linker {#linker}

View File

@@ -1,2 +0,0 @@
## Standard library {#library}

View File

@@ -1,3 +0,0 @@
### Minor changes to the library {#minor_library_changes}

View File

@@ -1 +0,0 @@
API changes and other small changes to the standard library go here.

View File

@@ -1,2 +0,0 @@
## Ports {#ports}

View File

@@ -31,7 +31,7 @@ import (
)
func usage() {
fmt.Fprintf(os.Stderr, "usage: go run mkzip.go zoneinfo.zip\n")
fmt.Fprintf(os.Stderr, "usage: go run mkzip.go ../../zoneinfo.zip\n")
os.Exit(2)
}

View File

@@ -24,8 +24,8 @@
# in the CL match the update.bash in the CL.
# Versions to use.
CODE=2024a
DATA=2024a
CODE=2023c
DATA=2023c
set -e

Binary file not shown.

View File

@@ -3,4 +3,4 @@
// tests and tools.
module misc
go 1.22
go 1.21

View File

@@ -204,7 +204,6 @@ func runMain() (int, error) {
`; export GOPROXY=` + os.Getenv("GOPROXY") +
`; export GOCACHE="` + deviceRoot + `/gocache"` +
`; export PATH="` + deviceGoroot + `/bin":$PATH` +
`; export HOME="` + deviceRoot + `/home"` +
`; cd "` + deviceCwd + `"` +
"; '" + deviceBin + "' " + strings.Join(os.Args[2:], " ")
code, err := adbRun(cmd)

View File

@@ -1,22 +1,19 @@
#!/bin/sh
# This script configures clang to target the iOS simulator. If you'd like to
# build for real iOS devices, change SDK to "iphoneos" and PLATFORM to "ios".
# This uses the latest available iOS SDK, which is recommended. To select a
# specific SDK, run 'xcodebuild -showsdks' to see the available SDKs and replace
# iphonesimulator with one of them.
SDK=iphonesimulator
PLATFORM=ios-simulator
# This uses the latest available iOS SDK, which is recommended.
# To select a specific SDK, run 'xcodebuild -showsdks'
# to see the available SDKs and replace iphoneos with one of them.
if [ "$GOARCH" == "arm64" ]; then
SDK=iphoneos
PLATFORM=ios
CLANGARCH="arm64"
else
SDK=iphonesimulator
PLATFORM=ios-simulator
CLANGARCH="x86_64"
fi
SDK_PATH=`xcrun --sdk $SDK --show-sdk-path`
export IPHONEOS_DEPLOYMENT_TARGET=5.1
# cmd/cgo doesn't support llvm-gcc-4.2, so we have to use clang.
CLANG=`xcrun --sdk $SDK --find clang`

View File

@@ -3,6 +3,7 @@
// license that can be found in the LICENSE file.
//go:build ignore
// +build ignore
// detect attempts to autodetect the correct
// values of the environment variables

View File

@@ -1,21 +1,44 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This program can be used as go_ios_$GOARCH_exec by the Go tool. It executes
// binaries on the iOS Simulator using the XCode toolchain.
// This program can be used as go_ios_$GOARCH_exec by the Go tool.
// It executes binaries on an iOS device using the XCode toolchain
// and the ios-deploy program: https://github.com/phonegap/ios-deploy
//
// This script supports an extra flag, -lldb, that pauses execution
// just before the main program begins and allows the user to control
// the remote lldb session. This flag is appended to the end of the
// script's arguments and is not passed through to the underlying
// binary.
//
// This script requires that three environment variables be set:
//
// GOIOS_DEV_ID: The codesigning developer id or certificate identifier
// GOIOS_APP_ID: The provisioning app id prefix. Must support wildcard app ids.
// GOIOS_TEAM_ID: The team id that owns the app id prefix.
//
// $GOROOT/misc/ios contains a script, detect.go, that attempts to autodetect these.
package main
import (
"bytes"
"encoding/xml"
"errors"
"fmt"
"go/build"
"io"
"log"
"net"
"os"
"os/exec"
"os/signal"
"path/filepath"
"runtime"
"strconv"
"strings"
"syscall"
"time"
)
const debug = false
@@ -87,8 +110,18 @@ func runMain() (int, error) {
return 1, err
}
err = runOnSimulator(appdir)
if goarch := os.Getenv("GOARCH"); goarch == "arm64" {
err = runOnDevice(appdir)
} else {
err = runOnSimulator(appdir)
}
if err != nil {
// If the lldb driver completed with an exit code, use that.
if err, ok := err.(*exec.ExitError); ok {
if ws, ok := err.Sys().(interface{ ExitStatus() int }); ok {
return ws.ExitStatus(), nil
}
}
return 1, err
}
return 0, nil
@@ -102,6 +135,61 @@ func runOnSimulator(appdir string) error {
return runSimulator(appdir, bundleID, os.Args[2:])
}
func runOnDevice(appdir string) error {
// e.g. B393DDEB490947F5A463FD074299B6C0AXXXXXXX
devID = getenv("GOIOS_DEV_ID")
// e.g. Z8B3JBXXXX.org.golang.sample, Z8B3JBXXXX prefix is available at
// https://developer.apple.com/membercenter/index.action#accountSummary as Team ID.
appID = getenv("GOIOS_APP_ID")
// e.g. Z8B3JBXXXX, available at
// https://developer.apple.com/membercenter/index.action#accountSummary as Team ID.
teamID = getenv("GOIOS_TEAM_ID")
// Device IDs as listed with ios-deploy -c.
deviceID = os.Getenv("GOIOS_DEVICE_ID")
if _, id, ok := strings.Cut(appID, "."); ok {
bundleID = id
}
if err := signApp(appdir); err != nil {
return err
}
if err := uninstallDevice(bundleID); err != nil {
return err
}
if err := installDevice(appdir); err != nil {
return err
}
if err := mountDevImage(); err != nil {
return err
}
// Kill any hanging debug bridges that might take up port 3222.
exec.Command("killall", "idevicedebugserverproxy").Run()
closer, err := startDebugBridge()
if err != nil {
return err
}
defer closer()
return runDevice(appdir, bundleID, os.Args[2:])
}
func getenv(envvar string) string {
s := os.Getenv(envvar)
if s == "" {
log.Fatalf("%s not set\nrun $GOROOT/misc/ios/detect.go to attempt to autodetect", envvar)
}
return s
}
func assembleApp(appdir, bin string) error {
if err := os.MkdirAll(appdir, 0755); err != nil {
return err
@@ -129,6 +217,236 @@ func assembleApp(appdir, bin string) error {
return nil
}
func signApp(appdir string) error {
entitlementsPath := filepath.Join(tmpdir, "Entitlements.plist")
cmd := exec.Command(
"codesign",
"-f",
"-s", devID,
"--entitlements", entitlementsPath,
appdir,
)
if debug {
log.Println(strings.Join(cmd.Args, " "))
}
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("codesign: %v", err)
}
return nil
}
// mountDevImage ensures a developer image is mounted on the device.
// The image contains the device lldb server for idevicedebugserverproxy
// to connect to.
func mountDevImage() error {
// Check for existing mount.
cmd := idevCmd(exec.Command("ideviceimagemounter", "-l", "-x"))
out, err := cmd.CombinedOutput()
if err != nil {
os.Stderr.Write(out)
return fmt.Errorf("ideviceimagemounter: %v", err)
}
var info struct {
Dict struct {
Data []byte `xml:",innerxml"`
} `xml:"dict"`
}
if err := xml.Unmarshal(out, &info); err != nil {
return fmt.Errorf("mountDevImage: failed to decode mount information: %v", err)
}
dict, err := parsePlistDict(info.Dict.Data)
if err != nil {
return fmt.Errorf("mountDevImage: failed to parse mount information: %v", err)
}
if dict["ImagePresent"] == "true" && dict["Status"] == "Complete" {
return nil
}
// Some devices only give us an ImageSignature key.
if _, exists := dict["ImageSignature"]; exists {
return nil
}
// No image is mounted. Find a suitable image.
imgPath, err := findDevImage()
if err != nil {
return err
}
sigPath := imgPath + ".signature"
cmd = idevCmd(exec.Command("ideviceimagemounter", imgPath, sigPath))
if out, err := cmd.CombinedOutput(); err != nil {
os.Stderr.Write(out)
return fmt.Errorf("ideviceimagemounter: %v", err)
}
return nil
}
// findDevImage use the device iOS version and build to locate a suitable
// developer image.
func findDevImage() (string, error) {
cmd := idevCmd(exec.Command("ideviceinfo"))
out, err := cmd.Output()
if err != nil {
return "", fmt.Errorf("ideviceinfo: %v", err)
}
var iosVer, buildVer string
lines := bytes.Split(out, []byte("\n"))
for _, line := range lines {
key, val, ok := strings.Cut(string(line), ": ")
if !ok {
continue
}
switch key {
case "ProductVersion":
iosVer = val
case "BuildVersion":
buildVer = val
}
}
if iosVer == "" || buildVer == "" {
return "", errors.New("failed to parse ideviceinfo output")
}
verSplit := strings.Split(iosVer, ".")
if len(verSplit) > 2 {
// Developer images are specific to major.minor ios version.
// Cut off the patch version.
iosVer = strings.Join(verSplit[:2], ".")
}
sdkBase := "/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/DeviceSupport"
patterns := []string{fmt.Sprintf("%s (%s)", iosVer, buildVer), fmt.Sprintf("%s (*)", iosVer), fmt.Sprintf("%s*", iosVer)}
for _, pattern := range patterns {
matches, err := filepath.Glob(filepath.Join(sdkBase, pattern, "DeveloperDiskImage.dmg"))
if err != nil {
return "", fmt.Errorf("findDevImage: %v", err)
}
if len(matches) > 0 {
return matches[0], nil
}
}
return "", fmt.Errorf("failed to find matching developer image for iOS version %s build %s", iosVer, buildVer)
}
// startDebugBridge ensures that the idevicedebugserverproxy runs on
// port 3222.
func startDebugBridge() (func(), error) {
errChan := make(chan error, 1)
cmd := idevCmd(exec.Command("idevicedebugserverproxy", "3222"))
var stderr bytes.Buffer
cmd.Stderr = &stderr
if err := cmd.Start(); err != nil {
return nil, fmt.Errorf("idevicedebugserverproxy: %v", err)
}
go func() {
if err := cmd.Wait(); err != nil {
if _, ok := err.(*exec.ExitError); ok {
errChan <- fmt.Errorf("idevicedebugserverproxy: %s", stderr.Bytes())
} else {
errChan <- fmt.Errorf("idevicedebugserverproxy: %v", err)
}
}
errChan <- nil
}()
closer := func() {
cmd.Process.Kill()
<-errChan
}
// Dial localhost:3222 to ensure the proxy is ready.
delay := time.Second / 4
for attempt := 0; attempt < 5; attempt++ {
conn, err := net.DialTimeout("tcp", "localhost:3222", 5*time.Second)
if err == nil {
conn.Close()
return closer, nil
}
select {
case <-time.After(delay):
delay *= 2
case err := <-errChan:
return nil, err
}
}
closer()
return nil, errors.New("failed to set up idevicedebugserverproxy")
}
// findDeviceAppPath returns the device path to the app with the
// given bundle ID. It parses the output of ideviceinstaller -l -o xml,
// looking for the bundle ID and the corresponding Path value.
func findDeviceAppPath(bundleID string) (string, error) {
cmd := idevCmd(exec.Command("ideviceinstaller", "-l", "-o", "xml"))
out, err := cmd.CombinedOutput()
if err != nil {
os.Stderr.Write(out)
return "", fmt.Errorf("ideviceinstaller: -l -o xml %v", err)
}
var list struct {
Apps []struct {
Data []byte `xml:",innerxml"`
} `xml:"array>dict"`
}
if err := xml.Unmarshal(out, &list); err != nil {
return "", fmt.Errorf("failed to parse ideviceinstaller output: %v", err)
}
for _, app := range list.Apps {
values, err := parsePlistDict(app.Data)
if err != nil {
return "", fmt.Errorf("findDeviceAppPath: failed to parse app dict: %v", err)
}
if values["CFBundleIdentifier"] == bundleID {
if path, ok := values["Path"]; ok {
return path, nil
}
}
}
return "", fmt.Errorf("failed to find device path for bundle: %s", bundleID)
}
// Parse an xml encoded plist. Plist values are mapped to string.
func parsePlistDict(dict []byte) (map[string]string, error) {
d := xml.NewDecoder(bytes.NewReader(dict))
values := make(map[string]string)
var key string
var hasKey bool
for {
tok, err := d.Token()
if err == io.EOF {
break
}
if err != nil {
return nil, err
}
if tok, ok := tok.(xml.StartElement); ok {
if tok.Name.Local == "key" {
if err := d.DecodeElement(&key, &tok); err != nil {
return nil, err
}
hasKey = true
} else if hasKey {
var val string
var err error
switch n := tok.Name.Local; n {
case "true", "false":
// Bools are represented as <true/> and <false/>.
val = n
err = d.Skip()
default:
err = d.DecodeElement(&val, &tok)
}
if err != nil {
return nil, err
}
values[key] = val
hasKey = false
} else {
if err := d.Skip(); err != nil {
return nil, err
}
}
}
}
return values, nil
}
func installSimulator(appdir string) error {
cmd := exec.Command(
"xcrun", "simctl", "install",
@@ -142,20 +460,138 @@ func installSimulator(appdir string) error {
return nil
}
func runSimulator(appdir, bundleID string, args []string) error {
xcrunArgs := []string{"simctl", "spawn",
"booted",
appdir + "/gotest",
func uninstallDevice(bundleID string) error {
cmd := idevCmd(exec.Command(
"ideviceinstaller",
"-U", bundleID,
))
if out, err := cmd.CombinedOutput(); err != nil {
os.Stderr.Write(out)
return fmt.Errorf("ideviceinstaller -U %q: %s", bundleID, err)
}
xcrunArgs = append(xcrunArgs, args...)
cmd := exec.Command("xcrun", xcrunArgs...)
cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr
err := cmd.Run()
return nil
}
func installDevice(appdir string) error {
attempt := 0
for {
cmd := idevCmd(exec.Command(
"ideviceinstaller",
"-i", appdir,
))
if out, err := cmd.CombinedOutput(); err != nil {
// Sometimes, installing the app fails for some reason.
// Give the device a few seconds and try again.
if attempt < 5 {
time.Sleep(5 * time.Second)
attempt++
continue
}
os.Stderr.Write(out)
return fmt.Errorf("ideviceinstaller -i %q: %v (%d attempts)", appdir, err, attempt)
}
return nil
}
}
func idevCmd(cmd *exec.Cmd) *exec.Cmd {
if deviceID != "" {
// Inject -u device_id after the executable, but before the arguments.
args := []string{cmd.Args[0], "-u", deviceID}
cmd.Args = append(args, cmd.Args[1:]...)
}
return cmd
}
func runSimulator(appdir, bundleID string, args []string) error {
cmd := exec.Command(
"xcrun", "simctl", "launch",
"--wait-for-debugger",
"booted",
bundleID,
)
out, err := cmd.CombinedOutput()
if err != nil {
os.Stderr.Write(out)
return fmt.Errorf("xcrun simctl launch booted %q: %v", bundleID, err)
}
var processID int
var ignore string
if _, err := fmt.Sscanf(string(out), "%s %d", &ignore, &processID); err != nil {
return fmt.Errorf("runSimulator: couldn't find processID from `simctl launch`: %v (%q)", err, out)
}
_, err = runLLDB("ios-simulator", appdir, strconv.Itoa(processID), args)
return err
}
return nil
func runDevice(appdir, bundleID string, args []string) error {
attempt := 0
for {
// The device app path reported by the device might be stale, so retry
// the lookup of the device path along with the lldb launching below.
deviceapp, err := findDeviceAppPath(bundleID)
if err != nil {
// The device app path might not yet exist for a newly installed app.
if attempt == 5 {
return err
}
attempt++
time.Sleep(5 * time.Second)
continue
}
out, err := runLLDB("remote-ios", appdir, deviceapp, args)
// If the program was not started it can be retried without papering over
// real test failures.
started := bytes.HasPrefix(out, []byte("lldb: running program"))
if started || err == nil || attempt == 5 {
return err
}
// Sometimes, the app was not yet ready to launch or the device path was
// stale. Retry.
attempt++
time.Sleep(5 * time.Second)
}
}
func runLLDB(target, appdir, deviceapp string, args []string) ([]byte, error) {
var env []string
for _, e := range os.Environ() {
// Don't override TMPDIR, HOME, GOCACHE on the device.
if strings.HasPrefix(e, "TMPDIR=") || strings.HasPrefix(e, "HOME=") || strings.HasPrefix(e, "GOCACHE=") {
continue
}
env = append(env, e)
}
lldb := exec.Command(
"python",
"-", // Read script from stdin.
target,
appdir,
deviceapp,
)
lldb.Args = append(lldb.Args, args...)
lldb.Env = env
lldb.Stdin = strings.NewReader(lldbDriver)
lldb.Stdout = os.Stdout
var out bytes.Buffer
lldb.Stderr = io.MultiWriter(&out, os.Stderr)
err := lldb.Start()
if err == nil {
// Forward SIGQUIT to the lldb driver which in turn will forward
// to the running program.
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGQUIT)
proc := lldb.Process
go func() {
for sig := range sigs {
proc.Signal(sig)
}
}()
err = lldb.Wait()
signal.Stop(sigs)
close(sigs)
}
return out.Bytes(), err
}
func copyLocalDir(dst, src string) error {
@@ -364,3 +800,112 @@ const resourceRules = `<?xml version="1.0" encoding="UTF-8"?>
</dict>
</plist>
`
const lldbDriver = `
import sys
import os
import signal
platform, exe, device_exe_or_pid, args = sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4:]
env = []
for k, v in os.environ.items():
env.append(k + "=" + v)
sys.path.append('/Applications/Xcode.app/Contents/SharedFrameworks/LLDB.framework/Resources/Python')
import lldb
debugger = lldb.SBDebugger.Create()
debugger.SetAsync(True)
debugger.SkipLLDBInitFiles(True)
err = lldb.SBError()
target = debugger.CreateTarget(exe, None, platform, True, err)
if not target.IsValid() or not err.Success():
sys.stderr.write("lldb: failed to setup up target: %s\n" % (err))
sys.exit(1)
listener = debugger.GetListener()
if platform == 'remote-ios':
target.modules[0].SetPlatformFileSpec(lldb.SBFileSpec(device_exe_or_pid))
process = target.ConnectRemote(listener, 'connect://localhost:3222', None, err)
else:
process = target.AttachToProcessWithID(listener, int(device_exe_or_pid), err)
if not err.Success():
sys.stderr.write("lldb: failed to connect to remote target %s: %s\n" % (device_exe_or_pid, err))
sys.exit(1)
# Don't stop on signals.
sigs = process.GetUnixSignals()
for i in range(0, sigs.GetNumSignals()):
sig = sigs.GetSignalAtIndex(i)
sigs.SetShouldStop(sig, False)
sigs.SetShouldNotify(sig, False)
event = lldb.SBEvent()
running = False
prev_handler = None
def signal_handler(signal, frame):
process.Signal(signal)
def run_program():
# Forward SIGQUIT to the program.
prev_handler = signal.signal(signal.SIGQUIT, signal_handler)
# Tell the Go driver that the program is running and should not be retried.
sys.stderr.write("lldb: running program\n")
running = True
# Process is stopped at attach/launch. Let it run.
process.Continue()
if platform != 'remote-ios':
# For the local emulator the program is ready to run.
# For remote device runs, we need to wait for eStateConnected,
# below.
run_program()
while True:
if not listener.WaitForEvent(1, event):
continue
if not lldb.SBProcess.EventIsProcessEvent(event):
continue
if running:
# Pass through stdout and stderr.
while True:
out = process.GetSTDOUT(8192)
if not out:
break
sys.stdout.write(out)
while True:
out = process.GetSTDERR(8192)
if not out:
break
sys.stderr.write(out)
state = process.GetStateFromEvent(event)
if state in [lldb.eStateCrashed, lldb.eStateDetached, lldb.eStateUnloaded, lldb.eStateExited]:
if running:
signal.signal(signal.SIGQUIT, prev_handler)
break
elif state == lldb.eStateConnected:
if platform == 'remote-ios':
process.RemoteLaunch(args, env, None, None, None, None, 0, False, err)
if not err.Success():
sys.stderr.write("lldb: failed to launch remote process: %s\n" % (err))
process.Kill()
debugger.Terminate()
sys.exit(1)
run_program()
exitStatus = process.GetExitStatus()
exitDesc = process.GetExitDescription()
process.Kill()
debugger.Terminate()
if exitStatus == 0 and exitDesc is not None:
# Ensure tests fail when killed by a signal.
exitStatus = 123
sys.exit(exitStatus)
`

View File

@@ -10,11 +10,11 @@ case "$GOWASIRUNTIME" in
"wasmer")
exec wasmer run --dir=/ --env PWD="$PWD" --env PATH="$PATH" ${GOWASIRUNTIMEARGS:-} "$1" -- "${@:2}"
;;
"wazero")
exec wazero run -mount /:/ -env-inherit -cachedir "${TMPDIR:-/tmp}"/wazero ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}"
"wasmtime")
exec wasmtime run --dir=/ --env PWD="$PWD" --env PATH="$PATH" --max-wasm-stack 1048576 ${GOWASIRUNTIMEARGS:-} "$1" -- "${@:2}"
;;
"wasmtime" | "")
exec wasmtime run --dir=/ --env PWD="$PWD" --env PATH="$PATH" -W max-wasm-stack=1048576 ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}"
"wazero" | "")
exec wazero run -mount /:/ -env-inherit -cachedir "${TMPDIR:-/tmp}"/wazero ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}"
;;
*)
echo "Unknown Go WASI runtime specified: $GOWASIRUNTIME"

View File

@@ -31,14 +31,14 @@ Maintaining vendor directories
Before updating vendor directories, ensure that module mode is enabled.
Make sure that GO111MODULE is not set in the environment, or that it is
set to 'on' or 'auto', and if you use a go.work file, set GOWORK=off.
set to 'on' or 'auto'.
Requirements may be added, updated, and removed with 'go get'.
The vendor directory may be updated with 'go mod vendor'.
A typical sequence might be:
cd src # or src/cmd
go get golang.org/x/net@master
cd src
go get golang.org/x/net@latest
go mod tidy
go mod vendor

View File

@@ -10,4 +10,4 @@ if [ ! -f make.bash ]; then
fi
. ./make.bash "$@" --no-banner
bash run.bash --no-rebuild
"$GOTOOLDIR/dist" banner # print build info
$GOTOOLDIR/dist banner # print build info

View File

@@ -612,7 +612,7 @@ func (fi headerFileInfo) String() string {
}
// sysStat, if non-nil, populates h from system-dependent fields of fi.
var sysStat func(fi fs.FileInfo, h *Header, doNameLookups bool) error
var sysStat func(fi fs.FileInfo, h *Header) error
const (
// Mode constants from the USTAR spec:
@@ -632,17 +632,13 @@ const (
c_ISSOCK = 0140000 // Socket
)
// FileInfoHeader creates a partially-populated [Header] from fi.
// FileInfoHeader creates a partially-populated Header from fi.
// If fi describes a symlink, FileInfoHeader records link as the link target.
// If fi describes a directory, a slash is appended to the name.
//
// Since fs.FileInfo's Name method only returns the base name of
// the file it describes, it may be necessary to modify Header.Name
// to provide the full path name of the file.
//
// If fi implements [FileInfoNames]
// Header.Gname and Header.Uname
// are provided by the methods of the interface.
func FileInfoHeader(fi fs.FileInfo, link string) (*Header, error) {
if fi == nil {
return nil, errors.New("archive/tar: FileInfo is nil")
@@ -715,36 +711,12 @@ func FileInfoHeader(fi fs.FileInfo, link string) (*Header, error) {
}
}
}
var doNameLookups = true
if iface, ok := fi.(FileInfoNames); ok {
doNameLookups = false
var err error
h.Gname, err = iface.Gname()
if err != nil {
return nil, err
}
h.Uname, err = iface.Uname()
if err != nil {
return nil, err
}
}
if sysStat != nil {
return h, sysStat(fi, h, doNameLookups)
return h, sysStat(fi, h)
}
return h, nil
}
// FileInfoNames extends [fs.FileInfo].
// Passing an instance of this to [FileInfoHeader] permits the caller
// to avoid a system-dependent name lookup by specifying the Uname and Gname directly.
type FileInfoNames interface {
fs.FileInfo
// Uname should give a user name.
Uname() (string, error)
// Gname should give a group name.
Gname() (string, error)
}
// isHeaderOnlyType checks if the given type flag is of the type that has no
// data section even if a size is specified.
func isHeaderOnlyType(flag byte) bool {
@@ -755,3 +727,10 @@ func isHeaderOnlyType(flag byte) bool {
return false
}
}
func min(a, b int64) int64 {
if a < b {
return a
}
return b
}

View File

@@ -33,7 +33,7 @@ import "strings"
// sub-second times | no | yes | no
// sparse files | no | yes | yes
//
// The table's upper portion shows the [Header] fields, where each format reports
// The table's upper portion shows the Header fields, where each format reports
// the maximum number of bytes allowed for each string field and
// the integer type used to store each numeric field
// (where timestamps are stored as the number of seconds since the Unix epoch).

View File

@@ -35,7 +35,7 @@ type fileReader interface {
WriteTo(io.Writer) (int64, error)
}
// NewReader creates a new [Reader] reading from r.
// NewReader creates a new Reader reading from r.
func NewReader(r io.Reader) *Reader {
return &Reader{r: r, curr: &regFileReader{r, 0}}
}
@@ -47,10 +47,10 @@ func NewReader(r io.Reader) *Reader {
//
// If Next encounters a non-local name (as defined by [filepath.IsLocal])
// and the GODEBUG environment variable contains `tarinsecurepath=0`,
// Next returns the header with an [ErrInsecurePath] error.
// Next returns the header with an ErrInsecurePath error.
// A future version of Go may introduce this behavior by default.
// Programs that want to accept non-local names can ignore
// the [ErrInsecurePath] error and use the returned header.
// the ErrInsecurePath error and use the returned header.
func (tr *Reader) Next() (*Header, error) {
if tr.err != nil {
return nil, tr.err
@@ -623,14 +623,14 @@ func readGNUSparseMap0x1(paxHdrs map[string]string) (sparseDatas, error) {
// Read reads from the current file in the tar archive.
// It returns (0, io.EOF) when it reaches the end of that file,
// until [Next] is called to advance to the next file.
// until Next is called to advance to the next file.
//
// If the current file is sparse, then the regions marked as a hole
// are read back as NUL-bytes.
//
// Calling Read on special types like [TypeLink], [TypeSymlink], [TypeChar],
// [TypeBlock], [TypeDir], and [TypeFifo] returns (0, [io.EOF]) regardless of what
// the [Header.Size] claims.
// Calling Read on special types like TypeLink, TypeSymlink, TypeChar,
// TypeBlock, TypeDir, and TypeFifo returns (0, io.EOF) regardless of what
// the Header.Size claims.
func (tr *Reader) Read(b []byte) (int, error) {
if tr.err != nil {
return 0, tr.err
@@ -811,7 +811,9 @@ func (sr sparseFileReader) physicalRemaining() int64 {
type zeroReader struct{}
func (zeroReader) Read(b []byte) (int, error) {
clear(b)
for i := range b {
b[i] = 0
}
return len(b), nil
}

View File

@@ -23,30 +23,30 @@ func init() {
// The downside is that renaming uname or gname by the OS never takes effect.
var userMap, groupMap sync.Map // map[int]string
func statUnix(fi fs.FileInfo, h *Header, doNameLookups bool) error {
func statUnix(fi fs.FileInfo, h *Header) error {
sys, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return nil
}
h.Uid = int(sys.Uid)
h.Gid = int(sys.Gid)
if doNameLookups {
// Best effort at populating Uname and Gname.
// The os/user functions may fail for any number of reasons
// (not implemented on that platform, cgo not enabled, etc).
if u, ok := userMap.Load(h.Uid); ok {
h.Uname = u.(string)
} else if u, err := user.LookupId(strconv.Itoa(h.Uid)); err == nil {
h.Uname = u.Username
userMap.Store(h.Uid, h.Uname)
}
if g, ok := groupMap.Load(h.Gid); ok {
h.Gname = g.(string)
} else if g, err := user.LookupGroupId(strconv.Itoa(h.Gid)); err == nil {
h.Gname = g.Name
groupMap.Store(h.Gid, h.Gname)
}
// Best effort at populating Uname and Gname.
// The os/user functions may fail for any number of reasons
// (not implemented on that platform, cgo not enabled, etc).
if u, ok := userMap.Load(h.Uid); ok {
h.Uname = u.(string)
} else if u, err := user.LookupId(strconv.Itoa(h.Uid)); err == nil {
h.Uname = u.Username
userMap.Store(h.Uid, h.Uname)
}
if g, ok := groupMap.Load(h.Gid); ok {
h.Gname = g.(string)
} else if g, err := user.LookupGroupId(strconv.Itoa(h.Gid)); err == nil {
h.Gname = g.Name
groupMap.Store(h.Gid, h.Gname)
}
h.AccessTime = statAtime(sys)
h.ChangeTime = statCtime(sys)

View File

@@ -73,7 +73,7 @@ func (f *formatter) formatString(b []byte, s string) {
// in the V7 path field as a directory even though the full path
// recorded elsewhere (e.g., via PAX record) contains no trailing slash.
if len(s) > len(b) && b[len(b)-1] == '/' {
n := len(strings.TrimRight(s[:len(b)-1], "/"))
n := len(strings.TrimRight(s[:len(b)], "/"))
b[n] = 0 // Replace trailing slash with NUL terminator
}
}

View File

@@ -848,53 +848,3 @@ func Benchmark(b *testing.B) {
})
}
var _ fileInfoNames = fileInfoNames{}
type fileInfoNames struct{}
func (f *fileInfoNames) Name() string {
return "tmp"
}
func (f *fileInfoNames) Size() int64 {
return 0
}
func (f *fileInfoNames) Mode() fs.FileMode {
return 0777
}
func (f *fileInfoNames) ModTime() time.Time {
return time.Time{}
}
func (f *fileInfoNames) IsDir() bool {
return false
}
func (f *fileInfoNames) Sys() any {
return nil
}
func (f *fileInfoNames) Uname() (string, error) {
return "Uname", nil
}
func (f *fileInfoNames) Gname() (string, error) {
return "Gname", nil
}
func TestFileInfoHeaderUseFileInfoNames(t *testing.T) {
info := &fileInfoNames{}
header, err := FileInfoHeader(info, "")
if err != nil {
t.Fatal(err)
}
if header.Uname != "Uname" {
t.Fatalf("header.Uname: got %s, want %s", header.Uname, "Uname")
}
if header.Gname != "Gname" {
t.Fatalf("header.Gname: got %s, want %s", header.Gname, "Gname")
}
}

View File

@@ -5,18 +5,16 @@
package tar
import (
"errors"
"fmt"
"io"
"io/fs"
"path"
"slices"
"sort"
"strings"
"time"
)
// Writer provides sequential writing of a tar archive.
// [Writer.WriteHeader] begins a new file with the provided [Header],
// Write.WriteHeader begins a new file with the provided Header,
// and then Writer can be treated as an io.Writer to supply that file's data.
type Writer struct {
w io.Writer
@@ -46,7 +44,7 @@ type fileWriter interface {
// Flush finishes writing the current file's block padding.
// The current file must be fully written before Flush can be called.
//
// This is unnecessary as the next call to [Writer.WriteHeader] or [Writer.Close]
// This is unnecessary as the next call to WriteHeader or Close
// will implicitly flush out the file's padding.
func (tw *Writer) Flush() error {
if tw.err != nil {
@@ -174,7 +172,7 @@ func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error {
for k := range paxHdrs {
keys = append(keys, k)
}
slices.Sort(keys)
sort.Strings(keys)
// Write each record to a buffer.
var buf strings.Builder
@@ -405,43 +403,6 @@ func (tw *Writer) writeRawHeader(blk *block, size int64, flag byte) error {
return nil
}
// AddFS adds the files from fs.FS to the archive.
// It walks the directory tree starting at the root of the filesystem
// adding each file to the tar archive while maintaining the directory structure.
func (tw *Writer) AddFS(fsys fs.FS) error {
return fs.WalkDir(fsys, ".", func(name string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if d.IsDir() {
return nil
}
info, err := d.Info()
if err != nil {
return err
}
// TODO(#49580): Handle symlinks when fs.ReadLinkFS is available.
if !info.Mode().IsRegular() {
return errors.New("tar: cannot add non-regular file")
}
h, err := FileInfoHeader(info, "")
if err != nil {
return err
}
h.Name = name
if err := tw.WriteHeader(h); err != nil {
return err
}
f, err := fsys.Open(name)
if err != nil {
return err
}
defer f.Close()
_, err = io.Copy(tw, f)
return err
})
}
// splitUSTARPath splits a path according to USTAR prefix and suffix rules.
// If the path is not splittable, then it will return ("", "", false).
func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
@@ -464,12 +425,12 @@ func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
}
// Write writes to the current file in the tar archive.
// Write returns the error [ErrWriteTooLong] if more than
// Header.Size bytes are written after [Writer.WriteHeader].
// Write returns the error ErrWriteTooLong if more than
// Header.Size bytes are written after WriteHeader.
//
// Calling Write on special types like [TypeLink], [TypeSymlink], [TypeChar],
// [TypeBlock], [TypeDir], and [TypeFifo] returns (0, [ErrWriteTooLong]) regardless
// of what the [Header.Size] claims.
// Calling Write on special types like TypeLink, TypeSymlink, TypeChar,
// TypeBlock, TypeDir, and TypeFifo returns (0, ErrWriteTooLong) regardless
// of what the Header.Size claims.
func (tw *Writer) Write(b []byte) (int, error) {
if tw.err != nil {
return 0, tw.err
@@ -503,7 +464,7 @@ func (tw *Writer) readFrom(r io.Reader) (int64, error) {
}
// Close closes the tar archive by flushing the padding, and writing the footer.
// If the current file (from a prior call to [Writer.WriteHeader]) is not fully written,
// If the current file (from a prior call to WriteHeader) is not fully written,
// then this returns an error.
func (tw *Writer) Close() error {
if tw.err == ErrWriteAfterClose {

View File

@@ -9,14 +9,12 @@ import (
"encoding/hex"
"errors"
"io"
"io/fs"
"os"
"path"
"reflect"
"slices"
"sort"
"strings"
"testing"
"testing/fstest"
"testing/iotest"
"time"
)
@@ -581,10 +579,10 @@ func TestPaxSymlink(t *testing.T) {
t.Fatal(err)
}
hdr, err := FileInfoHeader(fileinfo, "")
hdr.Typeflag = TypeSymlink
if err != nil {
t.Fatalf("os.Stat:1 %v", err)
}
hdr.Typeflag = TypeSymlink
// Force a PAX long linkname to be written
longLinkname := strings.Repeat("1234567890/1234567890", 10)
hdr.Linkname = longLinkname
@@ -749,7 +747,7 @@ func TestPaxHeadersSorted(t *testing.T) {
bytes.Index(buf.Bytes(), []byte("foo=foo")),
bytes.Index(buf.Bytes(), []byte("qux=qux")),
}
if !slices.IsSorted(indices) {
if !sort.IntsAreSorted(indices) {
t.Fatal("PAX headers are not sorted")
}
}
@@ -761,10 +759,10 @@ func TestUSTARLongName(t *testing.T) {
t.Fatal(err)
}
hdr, err := FileInfoHeader(fileinfo, "")
hdr.Typeflag = TypeDir
if err != nil {
t.Fatalf("os.Stat:1 %v", err)
}
hdr.Typeflag = TypeDir
// Force a PAX long name to be written. The name was taken from a practical example
// that fails and replaced ever char through numbers to anonymize the sample.
longName := "/0000_0000000/00000-000000000/0000_0000000/00000-0000000000000/0000_0000000/00000-0000000-00000000/0000_0000000/00000000/0000_0000000/000/0000_0000000/00000000v00/0000_0000000/000000/0000_0000000/0000000/0000_0000000/00000y-00/0000/0000/00000000/0x000000/"
@@ -1335,67 +1333,3 @@ func TestFileWriter(t *testing.T) {
}
}
}
func TestWriterAddFS(t *testing.T) {
fsys := fstest.MapFS{
"file.go": {Data: []byte("hello")},
"subfolder/another.go": {Data: []byte("world")},
}
var buf bytes.Buffer
tw := NewWriter(&buf)
if err := tw.AddFS(fsys); err != nil {
t.Fatal(err)
}
// Test that we can get the files back from the archive
tr := NewReader(&buf)
entries, err := fsys.ReadDir(".")
if err != nil {
t.Fatal(err)
}
var curfname string
for _, entry := range entries {
curfname = entry.Name()
if entry.IsDir() {
curfname += "/"
continue
}
hdr, err := tr.Next()
if err == io.EOF {
break // End of archive
}
if err != nil {
t.Fatal(err)
}
data, err := io.ReadAll(tr)
if err != nil {
t.Fatal(err)
}
if hdr.Name != curfname {
t.Fatalf("got filename %v, want %v",
curfname, hdr.Name)
}
origdata := fsys[curfname].Data
if string(data) != string(origdata) {
t.Fatalf("got file content %v, want %v",
data, origdata)
}
}
}
func TestWriterAddFSNonRegularFiles(t *testing.T) {
fsys := fstest.MapFS{
"device": {Data: []byte("hello"), Mode: 0755 | fs.ModeDevice},
"symlink": {Data: []byte("world"), Mode: 0755 | fs.ModeSymlink},
}
var buf bytes.Buffer
tw := NewWriter(&buf)
if err := tw.AddFS(fsys); err == nil {
t.Fatal("expected error, got nil")
}
}

View File

@@ -16,7 +16,7 @@ import (
"os"
"path"
"path/filepath"
"slices"
"sort"
"strings"
"sync"
"time"
@@ -48,15 +48,15 @@ type Reader struct {
fileList []fileListEntry
}
// A ReadCloser is a [Reader] that must be closed when no longer needed.
// A ReadCloser is a Reader that must be closed when no longer needed.
type ReadCloser struct {
f *os.File
Reader
}
// A File is a single file in a ZIP archive.
// The file information is in the embedded [FileHeader].
// The file content can be accessed by calling [File.Open].
// The file information is in the embedded FileHeader.
// The file content can be accessed by calling Open.
type File struct {
FileHeader
zip *Reader
@@ -93,16 +93,16 @@ func OpenReader(name string) (*ReadCloser, error) {
return r, err
}
// NewReader returns a new [Reader] reading from r, which is assumed to
// NewReader returns a new Reader reading from r, which is assumed to
// have the given size in bytes.
//
// If any file inside the archive uses a non-local name
// (as defined by [filepath.IsLocal]) or a name containing backslashes
// and the GODEBUG environment variable contains `zipinsecurepath=0`,
// NewReader returns the reader with an [ErrInsecurePath] error.
// NewReader returns the reader with an ErrInsecurePath error.
// A future version of Go may introduce this behavior by default.
// Programs that want to accept non-local names can ignore
// the [ErrInsecurePath] error and use the returned reader.
// the ErrInsecurePath error and use the returned reader.
func NewReader(r io.ReaderAt, size int64) (*Reader, error) {
if size < 0 {
return nil, errors.New("zip: size cannot be negative")
@@ -178,7 +178,7 @@ func (r *Reader) init(rdr io.ReaderAt, size int64) error {
// RegisterDecompressor registers or overrides a custom decompressor for a
// specific method ID. If a decompressor for a given method is not found,
// [Reader] will default to looking up the decompressor at the package level.
// Reader will default to looking up the decompressor at the package level.
func (r *Reader) RegisterDecompressor(method uint16, dcomp Decompressor) {
if r.decompressors == nil {
r.decompressors = make(map[uint16]Decompressor)
@@ -202,7 +202,7 @@ func (rc *ReadCloser) Close() error {
// DataOffset returns the offset of the file's possibly-compressed
// data, relative to the beginning of the zip file.
//
// Most callers should instead use [File.Open], which transparently
// Most callers should instead use Open, which transparently
// decompresses data and verifies checksums.
func (f *File) DataOffset() (offset int64, err error) {
bodyOffset, err := f.findBodyOffset()
@@ -212,7 +212,7 @@ func (f *File) DataOffset() (offset int64, err error) {
return f.headerOffset + bodyOffset, nil
}
// Open returns a [ReadCloser] that provides access to the [File]'s contents.
// Open returns a ReadCloser that provides access to the File's contents.
// Multiple files may be read concurrently.
func (f *File) Open() (io.ReadCloser, error) {
bodyOffset, err := f.findBodyOffset()
@@ -255,7 +255,7 @@ func (f *File) Open() (io.ReadCloser, error) {
return rc, nil
}
// OpenRaw returns a [Reader] that provides access to the [File]'s contents without
// OpenRaw returns a Reader that provides access to the File's contents without
// decompression.
func (f *File) OpenRaw() (io.Reader, error) {
bodyOffset, err := f.findBodyOffset()
@@ -469,8 +469,8 @@ parseExtras:
const ticksPerSecond = 1e7 // Windows timestamp resolution
ts := int64(attrBuf.uint64()) // ModTime since Windows epoch
secs := ts / ticksPerSecond
nsecs := (1e9 / ticksPerSecond) * (ts % ticksPerSecond)
secs := int64(ts / ticksPerSecond)
nsecs := (1e9 / ticksPerSecond) * int64(ts%ticksPerSecond)
epoch := time.Date(1601, time.January, 1, 0, 0, 0, 0, time.UTC)
modified = time.Unix(epoch.Unix()+secs, nsecs)
}
@@ -699,13 +699,9 @@ func findSignatureInBlock(b []byte) int {
if b[i] == 'P' && b[i+1] == 'K' && b[i+2] == 0x05 && b[i+3] == 0x06 {
// n is length of comment
n := int(b[i+directoryEndLen-2]) | int(b[i+directoryEndLen-1])<<8
if n+directoryEndLen+i > len(b) {
// Truncated comment.
// Some parsers (such as Info-ZIP) ignore the truncated comment
// rather than treating it as a hard error.
return -1
if n+directoryEndLen+i <= len(b) {
return i
}
return i
}
}
return -1
@@ -862,19 +858,14 @@ func (r *Reader) initFileList() {
}
}
slices.SortFunc(r.fileList, func(a, b fileListEntry) int {
return fileEntryCompare(a.name, b.name)
})
sort.Slice(r.fileList, func(i, j int) bool { return fileEntryLess(r.fileList[i].name, r.fileList[j].name) })
})
}
func fileEntryCompare(x, y string) int {
func fileEntryLess(x, y string) bool {
xdir, xelem, _ := split(x)
ydir, yelem, _ := split(y)
if xdir != ydir {
return strings.Compare(xdir, ydir)
}
return strings.Compare(xelem, yelem)
return xdir < ydir || xdir == ydir && xelem < yelem
}
// Open opens the named file in the ZIP archive,
@@ -925,12 +916,9 @@ func (r *Reader) openLookup(name string) *fileListEntry {
dir, elem, _ := split(name)
files := r.fileList
i, _ := slices.BinarySearchFunc(files, dir, func(a fileListEntry, dir string) (ret int) {
idir, ielem, _ := split(a.name)
if dir != idir {
return strings.Compare(idir, dir)
}
return strings.Compare(ielem, elem)
i := sort.Search(len(files), func(i int) bool {
idir, ielem, _ := split(files[i].name)
return idir > dir || idir == dir && ielem >= elem
})
if i < len(files) {
fname := files[i].name
@@ -943,21 +931,13 @@ func (r *Reader) openLookup(name string) *fileListEntry {
func (r *Reader) openReadDir(dir string) []fileListEntry {
files := r.fileList
i, _ := slices.BinarySearchFunc(files, dir, func(a fileListEntry, dir string) int {
idir, _, _ := split(a.name)
if dir != idir {
return strings.Compare(idir, dir)
}
// find the first entry with dir
return +1
i := sort.Search(len(files), func(i int) bool {
idir, _, _ := split(files[i].name)
return idir >= dir
})
j, _ := slices.BinarySearchFunc(files, dir, func(a fileListEntry, dir string) int {
jdir, _, _ := split(a.name)
if dir != jdir {
return strings.Compare(jdir, dir)
}
// find the last entry with dir
return -1
j := sort.Search(len(files), func(j int) bool {
jdir, _, _ := split(files[j].name)
return jdir > dir
})
return files[i:j]
}

View File

@@ -570,14 +570,6 @@ var tests = []ZipTest{
},
},
},
// Issue 66869: Don't skip over an EOCDR with a truncated comment.
// The test file sneakily hides a second EOCDR before the first one;
// previously we would extract one file ("file") from this archive,
// while most other tools would reject the file or extract a different one ("FILE").
{
Name: "comment-truncated.zip",
Error: ErrFormat,
},
}
func TestReader(t *testing.T) {
@@ -912,7 +904,9 @@ func returnRecursiveZip() (r io.ReaderAt, size int64) {
// type zeros struct{}
//
// func (zeros) Read(b []byte) (int, error) {
// clear(b)
// for i := range b {
// b[i] = 0
// }
// return len(b), nil
// }
//
@@ -1192,7 +1186,7 @@ func TestIssue12449(t *testing.T) {
0x00, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00,
}
// Read in the archive.
_, err := NewReader(bytes.NewReader(data), int64(len(data)))
_, err := NewReader(bytes.NewReader([]byte(data)), int64(len(data)))
if err != nil {
t.Errorf("Error reading the archive: %v", err)
}
@@ -1339,7 +1333,7 @@ func TestCVE202127919(t *testing.T) {
0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x39, 0x00,
0x00, 0x00, 0x59, 0x00, 0x00, 0x00, 0x00, 0x00,
}
r, err := NewReader(bytes.NewReader(data), int64(len(data)))
r, err := NewReader(bytes.NewReader([]byte(data)), int64(len(data)))
if err != ErrInsecurePath {
t.Fatalf("Error reading the archive: %v", err)
}
@@ -1565,7 +1559,7 @@ func TestCVE202141772(t *testing.T) {
0x00, 0x04, 0x00, 0x04, 0x00, 0x31, 0x01, 0x00,
0x00, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00,
}
r, err := NewReader(bytes.NewReader(data), int64(len(data)))
r, err := NewReader(bytes.NewReader([]byte(data)), int64(len(data)))
if err != ErrInsecurePath {
t.Fatalf("Error reading the archive: %v", err)
}
@@ -1828,7 +1822,7 @@ func TestBaseOffsetPlusOverflow(t *testing.T) {
}
}()
// Previously, this would trigger a panic as we attempt to read from
// an io.SectionReader which would access a slice at a negative offset
// a io.SectionReader which would access a slice at a negative offset
// as the section reader offset & size were < 0.
NewReader(bytes.NewReader(data), int64(len(data))+1875)
}

View File

@@ -19,7 +19,7 @@ import (
type Compressor func(w io.Writer) (io.WriteCloser, error)
// A Decompressor returns a new decompressing reader, reading from r.
// The [io.ReadCloser]'s Close method must be used to release associated resources.
// The ReadCloser's Close method must be used to release associated resources.
// The Decompressor itself must be safe to invoke from multiple goroutines
// simultaneously, but each returned reader will be used only by
// one goroutine at a time.
@@ -115,7 +115,7 @@ func init() {
}
// RegisterDecompressor allows custom decompressors for a specified method ID.
// The common methods [Store] and [Deflate] are built in.
// The common methods Store and Deflate are built in.
func RegisterDecompressor(method uint16, dcomp Decompressor) {
if _, dup := decompressors.LoadOrStore(method, dcomp); dup {
panic("decompressor already registered")
@@ -123,7 +123,7 @@ func RegisterDecompressor(method uint16, dcomp Decompressor) {
}
// RegisterCompressor registers custom compressors for a specified method ID.
// The common methods [Store] and [Deflate] are built in.
// The common methods Store and Deflate are built in.
func RegisterCompressor(method uint16, comp Compressor) {
if _, dup := compressors.LoadOrStore(method, comp); dup {
panic("compressor already registered")

View File

@@ -17,7 +17,7 @@ for normal archives both fields will be the same. For files requiring
the ZIP64 format the 32 bit fields will be 0xffffffff and the 64 bit
fields must be used instead.
[ZIP specification]: https://support.pkware.com/pkzip/appnote
[ZIP specification]: https://www.pkware.com/appnote
*/
package zip
@@ -82,7 +82,7 @@ const (
// FileHeader describes a file within a ZIP file.
// See the [ZIP specification] for details.
//
// [ZIP specification]: https://support.pkware.com/pkzip/appnote
// [ZIP specification]: https://www.pkware.com/appnote
type FileHeader struct {
// Name is the name of the file.
//
@@ -143,9 +143,9 @@ type FileHeader struct {
// Deprecated: Use CompressedSize64 instead.
CompressedSize uint32
// UncompressedSize is the uncompressed size of the file in bytes.
// UncompressedSize is the compressed size of the file in bytes.
// If either the uncompressed or compressed size of the file
// does not fit in 32 bits, UncompressedSize is set to ^uint32(0).
// does not fit in 32 bits, CompressedSize is set to ^uint32(0).
//
// Deprecated: Use UncompressedSize64 instead.
UncompressedSize uint32
@@ -160,12 +160,12 @@ type FileHeader struct {
ExternalAttrs uint32 // Meaning depends on CreatorVersion
}
// FileInfo returns an fs.FileInfo for the [FileHeader].
// FileInfo returns an fs.FileInfo for the FileHeader.
func (h *FileHeader) FileInfo() fs.FileInfo {
return headerFileInfo{h}
}
// headerFileInfo implements [fs.FileInfo].
// headerFileInfo implements fs.FileInfo.
type headerFileInfo struct {
fh *FileHeader
}
@@ -194,7 +194,7 @@ func (fi headerFileInfo) String() string {
return fs.FormatFileInfo(fi)
}
// FileInfoHeader creates a partially-populated [FileHeader] from an
// FileInfoHeader creates a partially-populated FileHeader from an
// fs.FileInfo.
// Because fs.FileInfo's Name method returns only the base name of
// the file it describes, it may be necessary to modify the Name field
@@ -245,7 +245,7 @@ func timeZone(offset time.Duration) *time.Location {
// msDosTimeToTime converts an MS-DOS date and time into a time.Time.
// The resolution is 2s.
// See: https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-dosdatetimetofiletime
// See: https://msdn.microsoft.com/en-us/library/ms724247(v=VS.85).aspx
func msDosTimeToTime(dosDate, dosTime uint16) time.Time {
return time.Date(
// date bits 0-4: day of month; 5-8: month; 9-15: years since 1980
@@ -265,7 +265,7 @@ func msDosTimeToTime(dosDate, dosTime uint16) time.Time {
// timeToMsDosTime converts a time.Time to an MS-DOS date and time.
// The resolution is 2s.
// See: https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-filetimetodosdatetime
// See: https://msdn.microsoft.com/en-us/library/ms724274(v=VS.85).aspx
func timeToMsDosTime(t time.Time) (fDate uint16, fTime uint16) {
fDate = uint16(t.Day() + int(t.Month())<<5 + (t.Year()-1980)<<9)
fTime = uint16(t.Second()/2 + t.Minute()<<5 + t.Hour()<<11)
@@ -273,17 +273,17 @@ func timeToMsDosTime(t time.Time) (fDate uint16, fTime uint16) {
}
// ModTime returns the modification time in UTC using the legacy
// [ModifiedDate] and [ModifiedTime] fields.
// ModifiedDate and ModifiedTime fields.
//
// Deprecated: Use [Modified] instead.
// Deprecated: Use Modified instead.
func (h *FileHeader) ModTime() time.Time {
return msDosTimeToTime(h.ModifiedDate, h.ModifiedTime)
}
// SetModTime sets the [Modified], [ModifiedTime], and [ModifiedDate] fields
// SetModTime sets the Modified, ModifiedTime, and ModifiedDate fields
// to the given time in UTC.
//
// Deprecated: Use [Modified] instead.
// Deprecated: Use Modified instead.
func (h *FileHeader) SetModTime(t time.Time) {
t = t.UTC() // Convert to UTC for compatibility
h.Modified = t
@@ -309,7 +309,7 @@ const (
msdosReadOnly = 0x01
)
// Mode returns the permission and mode bits for the [FileHeader].
// Mode returns the permission and mode bits for the FileHeader.
func (h *FileHeader) Mode() (mode fs.FileMode) {
switch h.CreatorVersion >> 8 {
case creatorUnix, creatorMacOSX:
@@ -323,7 +323,7 @@ func (h *FileHeader) Mode() (mode fs.FileMode) {
return mode
}
// SetMode changes the permission and mode bits for the [FileHeader].
// SetMode changes the permission and mode bits for the FileHeader.
func (h *FileHeader) SetMode(mode fs.FileMode) {
h.CreatorVersion = h.CreatorVersion&0xff | creatorUnix<<8
h.ExternalAttrs = fileModeToUnixMode(mode) << 16

Binary file not shown.

View File

@@ -11,7 +11,6 @@ import (
"hash"
"hash/crc32"
"io"
"io/fs"
"strings"
"unicode/utf8"
)
@@ -41,7 +40,7 @@ type header struct {
raw bool
}
// NewWriter returns a new [Writer] writing a zip file to w.
// NewWriter returns a new Writer writing a zip file to w.
func NewWriter(w io.Writer) *Writer {
return &Writer{cw: &countWriter{w: bufio.NewWriter(w)}}
}
@@ -64,7 +63,7 @@ func (w *Writer) Flush() error {
}
// SetComment sets the end-of-central-directory comment field.
// It can only be called before [Writer.Close].
// It can only be called before Close.
func (w *Writer) SetComment(comment string) error {
if len(comment) > uint16max {
return errors.New("zip: Writer.Comment too long")
@@ -208,15 +207,14 @@ func (w *Writer) Close() error {
}
// Create adds a file to the zip file using the provided name.
// It returns a [Writer] to which the file contents should be written.
// The file contents will be compressed using the [Deflate] method.
// It returns a Writer to which the file contents should be written.
// The file contents will be compressed using the Deflate method.
// The name must be a relative path: it must not start with a drive
// letter (e.g. C:) or leading slash, and only forward slashes are
// allowed. To create a directory instead of a file, add a trailing
// slash to the name. Duplicate names will not overwrite previous entries
// and are appended to the zip file.
// The file's contents must be written to the [io.Writer] before the next
// call to [Writer.Create], [Writer.CreateHeader], or [Writer.Close].
// slash to the name.
// The file's contents must be written to the io.Writer before the next
// call to Create, CreateHeader, or Close.
func (w *Writer) Create(name string) (io.Writer, error) {
header := &FileHeader{
Name: name,
@@ -263,13 +261,13 @@ func (w *Writer) prepare(fh *FileHeader) error {
return nil
}
// CreateHeader adds a file to the zip archive using the provided [FileHeader]
// for the file metadata. [Writer] takes ownership of fh and may mutate
// its fields. The caller must not modify fh after calling [Writer.CreateHeader].
// CreateHeader adds a file to the zip archive using the provided FileHeader
// for the file metadata. Writer takes ownership of fh and may mutate
// its fields. The caller must not modify fh after calling CreateHeader.
//
// This returns a [Writer] to which the file contents should be written.
// This returns a Writer to which the file contents should be written.
// The file's contents must be written to the io.Writer before the next
// call to [Writer.Create], [Writer.CreateHeader], [Writer.CreateRaw], or [Writer.Close].
// call to Create, CreateHeader, CreateRaw, or Close.
func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) {
if err := w.prepare(fh); err != nil {
return nil, err
@@ -407,8 +405,8 @@ func writeHeader(w io.Writer, h *header) error {
// flags.
if h.raw && !h.hasDataDescriptor() {
b.uint32(h.CRC32)
b.uint32(uint32(min(h.CompressedSize64, uint32max)))
b.uint32(uint32(min(h.UncompressedSize64, uint32max)))
b.uint32(uint32(min64(h.CompressedSize64, uint32max)))
b.uint32(uint32(min64(h.UncompressedSize64, uint32max)))
} else {
// When this package handle the compression, these values are
// always written to the trailing data descriptor.
@@ -428,23 +426,26 @@ func writeHeader(w io.Writer, h *header) error {
return err
}
// CreateRaw adds a file to the zip archive using the provided [FileHeader] and
// returns a [Writer] to which the file contents should be written. The file's
// contents must be written to the io.Writer before the next call to [Writer.Create],
// [Writer.CreateHeader], [Writer.CreateRaw], or [Writer.Close].
func min64(x, y uint64) uint64 {
if x < y {
return x
}
return y
}
// CreateRaw adds a file to the zip archive using the provided FileHeader and
// returns a Writer to which the file contents should be written. The file's
// contents must be written to the io.Writer before the next call to Create,
// CreateHeader, CreateRaw, or Close.
//
// In contrast to [Writer.CreateHeader], the bytes passed to Writer are not compressed.
//
// CreateRaw's argument is stored in w. If the argument is a pointer to the embedded
// [FileHeader] in a [File] obtained from a [Reader] created from in-memory data,
// then w will refer to all of that memory.
// In contrast to CreateHeader, the bytes passed to Writer are not compressed.
func (w *Writer) CreateRaw(fh *FileHeader) (io.Writer, error) {
if err := w.prepare(fh); err != nil {
return nil, err
}
fh.CompressedSize = uint32(min(fh.CompressedSize64, uint32max))
fh.UncompressedSize = uint32(min(fh.UncompressedSize64, uint32max))
fh.CompressedSize = uint32(min64(fh.CompressedSize64, uint32max))
fh.UncompressedSize = uint32(min64(fh.UncompressedSize64, uint32max))
h := &header{
FileHeader: fh,
@@ -469,17 +470,14 @@ func (w *Writer) CreateRaw(fh *FileHeader) (io.Writer, error) {
return fw, nil
}
// Copy copies the file f (obtained from a [Reader]) into w. It copies the raw
// Copy copies the file f (obtained from a Reader) into w. It copies the raw
// form directly bypassing decompression, compression, and validation.
func (w *Writer) Copy(f *File) error {
r, err := f.OpenRaw()
if err != nil {
return err
}
// Copy the FileHeader so w doesn't store a pointer to the data
// of f's entire archive. See #65499.
fh := f.FileHeader
fw, err := w.CreateRaw(&fh)
fw, err := w.CreateRaw(&f.FileHeader)
if err != nil {
return err
}
@@ -488,7 +486,7 @@ func (w *Writer) Copy(f *File) error {
}
// RegisterCompressor registers or overrides a custom compressor for a specific
// method ID. If a compressor for a given method is not found, [Writer] will
// method ID. If a compressor for a given method is not found, Writer will
// default to looking up the compressor at the package level.
func (w *Writer) RegisterCompressor(method uint16, comp Compressor) {
if w.compressors == nil {
@@ -497,44 +495,6 @@ func (w *Writer) RegisterCompressor(method uint16, comp Compressor) {
w.compressors[method] = comp
}
// AddFS adds the files from fs.FS to the archive.
// It walks the directory tree starting at the root of the filesystem
// adding each file to the zip using deflate while maintaining the directory structure.
func (w *Writer) AddFS(fsys fs.FS) error {
return fs.WalkDir(fsys, ".", func(name string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if d.IsDir() {
return nil
}
info, err := d.Info()
if err != nil {
return err
}
if !info.Mode().IsRegular() {
return errors.New("zip: cannot add non-regular file")
}
h, err := FileInfoHeader(info)
if err != nil {
return err
}
h.Name = name
h.Method = Deflate
fw, err := w.CreateHeader(h)
if err != nil {
return err
}
f, err := fsys.Open(name)
if err != nil {
return err
}
defer f.Close()
_, err = io.Copy(fw, f)
return err
})
}
func (w *Writer) compressor(method uint16) Compressor {
comp := w.compressors[method]
if comp == nil {
@@ -609,7 +569,7 @@ func (w *fileWriter) writeDataDescriptor() error {
}
// Write data descriptor. This is more complicated than one would
// think, see e.g. comments in zipfile.c:putextended() and
// https://bugs.openjdk.org/browse/JDK-7073588.
// http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=7073588.
// The approach here is to write 8 byte sizes if needed without
// adding a zip64 extra in the local header (too late anyway).
var buf []byte

View File

@@ -16,7 +16,6 @@ import (
"os"
"strings"
"testing"
"testing/fstest"
"time"
)
@@ -603,71 +602,3 @@ func BenchmarkCompressedZipGarbage(b *testing.B) {
}
})
}
func writeTestsToFS(tests []WriteTest) fs.FS {
fsys := fstest.MapFS{}
for _, wt := range tests {
fsys[wt.Name] = &fstest.MapFile{
Data: wt.Data,
Mode: wt.Mode,
}
}
return fsys
}
func TestWriterAddFS(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
tests := []WriteTest{
{
Name: "file.go",
Data: []byte("hello"),
Mode: 0644,
},
{
Name: "subfolder/another.go",
Data: []byte("world"),
Mode: 0644,
},
}
err := w.AddFS(writeTestsToFS(tests))
if err != nil {
t.Fatal(err)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
// read it back
r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
if err != nil {
t.Fatal(err)
}
for i, wt := range tests {
testReadFile(t, r.File[i], &wt)
}
}
func TestIssue61875(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
tests := []WriteTest{
{
Name: "symlink",
Data: []byte("../link/target"),
Method: Deflate,
Mode: 0755 | fs.ModeSymlink,
},
{
Name: "device",
Data: []byte(""),
Method: Deflate,
Mode: 0755 | fs.ModeDevice,
},
}
err := w.AddFS(writeTestsToFS(tests))
if err == nil {
t.Errorf("expected error, got nil")
}
}

View File

@@ -8,14 +8,13 @@ package zip
import (
"bytes"
"cmp"
"errors"
"fmt"
"hash"
"internal/testenv"
"io"
"runtime"
"slices"
"sort"
"strings"
"testing"
"time"
@@ -199,6 +198,13 @@ func (r *rleBuffer) Write(p []byte) (n int, err error) {
return len(p), nil
}
func min(x, y int64) int64 {
if x < y {
return x
}
return y
}
func memset(a []byte, b byte) {
if len(a) == 0 {
return
@@ -215,8 +221,9 @@ func (r *rleBuffer) ReadAt(p []byte, off int64) (n int, err error) {
if len(p) == 0 {
return
}
skipParts, _ := slices.BinarySearchFunc(r.buf, off, func(rb repeatedByte, off int64) int {
return cmp.Compare(rb.off+rb.n, off)
skipParts := sort.Search(len(r.buf), func(i int) bool {
part := &r.buf[i]
return part.off+part.n > off
})
parts := r.buf[skipParts:]
if len(parts) > 0 {
@@ -590,7 +597,7 @@ func testZip64(t testing.TB, size int64) *rleBuffer {
}
// read back zip file and check that we get to the end of it
r, err := NewReader(buf, buf.Size())
r, err := NewReader(buf, int64(buf.Size()))
if err != nil {
t.Fatal("reader:", err)
}
@@ -814,6 +821,8 @@ func TestSuffixSaver(t *testing.T) {
type zeros struct{}
func (zeros) Read(p []byte) (int, error) {
clear(p)
for i := range p {
p[i] = 0
}
return len(p), nil
}

View File

@@ -41,21 +41,24 @@ type Reader struct {
const minReadBufferSize = 16
const maxConsecutiveEmptyReads = 100
// NewReaderSize returns a new [Reader] whose buffer has at least the specified
// size. If the argument io.Reader is already a [Reader] with large enough
// size, it returns the underlying [Reader].
// NewReaderSize returns a new Reader whose buffer has at least the specified
// size. If the argument io.Reader is already a Reader with large enough
// size, it returns the underlying Reader.
func NewReaderSize(rd io.Reader, size int) *Reader {
// Is it already a Reader?
b, ok := rd.(*Reader)
if ok && len(b.buf) >= size {
return b
}
if size < minReadBufferSize {
size = minReadBufferSize
}
r := new(Reader)
r.reset(make([]byte, max(size, minReadBufferSize)), rd)
r.reset(make([]byte, size), rd)
return r
}
// NewReader returns a new [Reader] whose buffer has the default size.
// NewReader returns a new Reader whose buffer has the default size.
func NewReader(rd io.Reader) *Reader {
return NewReaderSize(rd, defaultBufSize)
}
@@ -65,9 +68,9 @@ func (b *Reader) Size() int { return len(b.buf) }
// Reset discards any buffered data, resets all state, and switches
// the buffered reader to read from r.
// Calling Reset on the zero value of [Reader] initializes the internal buffer
// Calling Reset on the zero value of Reader initializes the internal buffer
// to the default size.
// Calling b.Reset(b) (that is, resetting a [Reader] to itself) does nothing.
// Calling b.Reset(b) (that is, resetting a Reader to itself) does nothing.
func (b *Reader) Reset(r io.Reader) {
// If a Reader r is passed to NewReader, NewReader will return r.
// Different layers of code may do that, and then later pass r
@@ -132,9 +135,9 @@ func (b *Reader) readErr() error {
// Peek returns the next n bytes without advancing the reader. The bytes stop
// being valid at the next read call. If Peek returns fewer than n bytes, it
// also returns an error explaining why the read is short. The error is
// [ErrBufferFull] if n is larger than b's buffer size.
// ErrBufferFull if n is larger than b's buffer size.
//
// Calling Peek prevents a [Reader.UnreadByte] or [Reader.UnreadRune] call from succeeding
// Calling Peek prevents a UnreadByte or UnreadRune call from succeeding
// until the next read operation.
func (b *Reader) Peek(n int) ([]byte, error) {
if n < 0 {
@@ -204,10 +207,10 @@ func (b *Reader) Discard(n int) (discarded int, err error) {
// Read reads data into p.
// It returns the number of bytes read into p.
// The bytes are taken from at most one Read on the underlying [Reader],
// The bytes are taken from at most one Read on the underlying Reader,
// hence n may be less than len(p).
// To read exactly len(p) bytes, use io.ReadFull(b, p).
// If the underlying [Reader] can return a non-zero count with io.EOF,
// If the underlying Reader can return a non-zero count with io.EOF,
// then this Read method can do so as well; see the [io.Reader] docs.
func (b *Reader) Read(p []byte) (n int, err error) {
n = len(p)
@@ -277,7 +280,7 @@ func (b *Reader) ReadByte() (byte, error) {
// UnreadByte unreads the last byte. Only the most recently read byte can be unread.
//
// UnreadByte returns an error if the most recent method called on the
// [Reader] was not a read operation. Notably, [Reader.Peek], [Reader.Discard], and [Reader.WriteTo] are not
// Reader was not a read operation. Notably, Peek, Discard, and WriteTo are not
// considered read operations.
func (b *Reader) UnreadByte() error {
if b.lastByte < 0 || b.r == 0 && b.w > 0 {
@@ -318,8 +321,8 @@ func (b *Reader) ReadRune() (r rune, size int, err error) {
}
// UnreadRune unreads the last rune. If the most recent method called on
// the [Reader] was not a [Reader.ReadRune], [Reader.UnreadRune] returns an error. (In this
// regard it is stricter than [Reader.UnreadByte], which will unread the last byte
// the Reader was not a ReadRune, UnreadRune returns an error. (In this
// regard it is stricter than UnreadByte, which will unread the last byte
// from any read operation.)
func (b *Reader) UnreadRune() error {
if b.lastRuneSize < 0 || b.r < b.lastRuneSize {
@@ -339,10 +342,10 @@ func (b *Reader) Buffered() int { return b.w - b.r }
// The bytes stop being valid at the next read.
// If ReadSlice encounters an error before finding a delimiter,
// it returns all the data in the buffer and the error itself (often io.EOF).
// ReadSlice fails with error [ErrBufferFull] if the buffer fills without a delim.
// ReadSlice fails with error ErrBufferFull if the buffer fills without a delim.
// Because the data returned from ReadSlice will be overwritten
// by the next I/O operation, most clients should use
// [Reader.ReadBytes] or ReadString instead.
// ReadBytes or ReadString instead.
// ReadSlice returns err != nil if and only if line does not end in delim.
func (b *Reader) ReadSlice(delim byte) (line []byte, err error) {
s := 0 // search start index
@@ -386,7 +389,7 @@ func (b *Reader) ReadSlice(delim byte) (line []byte, err error) {
}
// ReadLine is a low-level line-reading primitive. Most callers should use
// [Reader.ReadBytes]('\n') or [Reader.ReadString]('\n') instead or use a [Scanner].
// ReadBytes('\n') or ReadString('\n') instead or use a Scanner.
//
// ReadLine tries to return a single line, not including the end-of-line bytes.
// If the line was too long for the buffer then isPrefix is set and the
@@ -398,7 +401,7 @@ func (b *Reader) ReadSlice(delim byte) (line []byte, err error) {
//
// The text returned from ReadLine does not include the line end ("\r\n" or "\n").
// No indication or error is given if the input ends without a final line end.
// Calling [Reader.UnreadByte] after ReadLine will always unread the last byte read
// Calling UnreadByte after ReadLine will always unread the last byte read
// (possibly a character belonging to the line end) even if that byte is not
// part of the line returned by ReadLine.
func (b *Reader) ReadLine() (line []byte, isPrefix bool, err error) {
@@ -508,9 +511,9 @@ func (b *Reader) ReadString(delim byte) (string, error) {
}
// WriteTo implements io.WriterTo.
// This may make multiple calls to the [Reader.Read] method of the underlying [Reader].
// If the underlying reader supports the [Reader.WriteTo] method,
// this calls the underlying [Reader.WriteTo] without buffering.
// This may make multiple calls to the Read method of the underlying Reader.
// If the underlying reader supports the WriteTo method,
// this calls the underlying WriteTo without buffering.
func (b *Reader) WriteTo(w io.Writer) (n int64, err error) {
b.lastByte = -1
b.lastRuneSize = -1
@@ -555,7 +558,7 @@ func (b *Reader) WriteTo(w io.Writer) (n int64, err error) {
var errNegativeWrite = errors.New("bufio: writer returned negative count from Write")
// writeBuf writes the [Reader]'s buffer to the writer.
// writeBuf writes the Reader's buffer to the writer.
func (b *Reader) writeBuf(w io.Writer) (int64, error) {
n, err := w.Write(b.buf[b.r:b.w])
if n < 0 {
@@ -567,12 +570,12 @@ func (b *Reader) writeBuf(w io.Writer) (int64, error) {
// buffered output
// Writer implements buffering for an [io.Writer] object.
// If an error occurs writing to a [Writer], no more data will be
// accepted and all subsequent writes, and [Writer.Flush], will return the error.
// Writer implements buffering for an io.Writer object.
// If an error occurs writing to a Writer, no more data will be
// accepted and all subsequent writes, and Flush, will return the error.
// After all data has been written, the client should call the
// [Writer.Flush] method to guarantee all data has been forwarded to
// the underlying [io.Writer].
// Flush method to guarantee all data has been forwarded to
// the underlying io.Writer.
type Writer struct {
err error
buf []byte
@@ -580,9 +583,9 @@ type Writer struct {
wr io.Writer
}
// NewWriterSize returns a new [Writer] whose buffer has at least the specified
// size. If the argument io.Writer is already a [Writer] with large enough
// size, it returns the underlying [Writer].
// NewWriterSize returns a new Writer whose buffer has at least the specified
// size. If the argument io.Writer is already a Writer with large enough
// size, it returns the underlying Writer.
func NewWriterSize(w io.Writer, size int) *Writer {
// Is it already a Writer?
b, ok := w.(*Writer)
@@ -598,9 +601,9 @@ func NewWriterSize(w io.Writer, size int) *Writer {
}
}
// NewWriter returns a new [Writer] whose buffer has the default size.
// If the argument io.Writer is already a [Writer] with large enough buffer size,
// it returns the underlying [Writer].
// NewWriter returns a new Writer whose buffer has the default size.
// If the argument io.Writer is already a Writer with large enough buffer size,
// it returns the underlying Writer.
func NewWriter(w io.Writer) *Writer {
return NewWriterSize(w, defaultBufSize)
}
@@ -610,9 +613,9 @@ func (b *Writer) Size() int { return len(b.buf) }
// Reset discards any unflushed buffered data, clears any error, and
// resets b to write its output to w.
// Calling Reset on the zero value of [Writer] initializes the internal buffer
// Calling Reset on the zero value of Writer initializes the internal buffer
// to the default size.
// Calling w.Reset(w) (that is, resetting a [Writer] to itself) does nothing.
// Calling w.Reset(w) (that is, resetting a Writer to itself) does nothing.
func (b *Writer) Reset(w io.Writer) {
// If a Writer w is passed to NewWriter, NewWriter will return w.
// Different layers of code may do that, and then later pass w
@@ -628,7 +631,7 @@ func (b *Writer) Reset(w io.Writer) {
b.wr = w
}
// Flush writes any buffered data to the underlying [io.Writer].
// Flush writes any buffered data to the underlying io.Writer.
func (b *Writer) Flush() error {
if b.err != nil {
return b.err
@@ -657,7 +660,7 @@ func (b *Writer) Available() int { return len(b.buf) - b.n }
// AvailableBuffer returns an empty buffer with b.Available() capacity.
// This buffer is intended to be appended to and
// passed to an immediately succeeding [Writer.Write] call.
// passed to an immediately succeeding Write call.
// The buffer is only valid until the next write operation on b.
func (b *Writer) AvailableBuffer() []byte {
return b.buf[b.n:][:0]
@@ -774,7 +777,7 @@ func (b *Writer) WriteString(s string) (int, error) {
return nn, nil
}
// ReadFrom implements [io.ReaderFrom]. If the underlying writer
// ReadFrom implements io.ReaderFrom. If the underlying writer
// supports the ReadFrom method, this calls the underlying ReadFrom.
// If there is buffered data and an underlying ReadFrom, this fills
// the buffer and writes it before calling ReadFrom.
@@ -826,14 +829,14 @@ func (b *Writer) ReadFrom(r io.Reader) (n int64, err error) {
// buffered input and output
// ReadWriter stores pointers to a [Reader] and a [Writer].
// It implements [io.ReadWriter].
// ReadWriter stores pointers to a Reader and a Writer.
// It implements io.ReadWriter.
type ReadWriter struct {
*Reader
*Writer
}
// NewReadWriter allocates a new [ReadWriter] that dispatches to r and w.
// NewReadWriter allocates a new ReadWriter that dispatches to r and w.
func NewReadWriter(r *Reader, w *Writer) *ReadWriter {
return &ReadWriter{r, w}
}

View File

@@ -6,7 +6,6 @@ package bufio_test
import (
"bufio"
"bytes"
"fmt"
"os"
"strconv"
@@ -138,36 +137,3 @@ func ExampleScanner_emptyFinalToken() {
}
// Output: "1" "2" "3" "4" ""
}
// Use a Scanner with a custom split function to parse a comma-separated
// list with an empty final value but stops at the token "STOP".
func ExampleScanner_earlyStop() {
onComma := func(data []byte, atEOF bool) (advance int, token []byte, err error) {
i := bytes.IndexByte(data, ',')
if i == -1 {
if !atEOF {
return 0, nil, nil
}
// If we have reached the end, return the last token.
return 0, data, bufio.ErrFinalToken
}
// If the token is "STOP", stop the scanning and ignore the rest.
if string(data[:i]) == "STOP" {
return i + 1, nil, bufio.ErrFinalToken
}
// Otherwise, return the token before the comma.
return i + 1, data[:i], nil
}
const input = "1,2,STOP,4,"
scanner := bufio.NewScanner(strings.NewReader(input))
scanner.Split(onComma)
for scanner.Scan() {
fmt.Printf("Got a token %q\n", scanner.Text())
}
if err := scanner.Err(); err != nil {
fmt.Fprintln(os.Stderr, "reading input:", err)
}
// Output:
// Got a token "1"
// Got a token "2"
}

View File

@@ -13,19 +13,19 @@ import (
// Scanner provides a convenient interface for reading data such as
// a file of newline-delimited lines of text. Successive calls to
// the [Scanner.Scan] method will step through the 'tokens' of a file, skipping
// the Scan method will step through the 'tokens' of a file, skipping
// the bytes between the tokens. The specification of a token is
// defined by a split function of type [SplitFunc]; the default split
// function breaks the input into lines with line termination stripped. [Scanner.Split]
// defined by a split function of type SplitFunc; the default split
// function breaks the input into lines with line termination stripped. Split
// functions are defined in this package for scanning a file into
// lines, bytes, UTF-8-encoded runes, and space-delimited words. The
// client may instead provide a custom split function.
//
// Scanning stops unrecoverably at EOF, the first I/O error, or a token too
// large to fit in the [Scanner.Buffer]. When a scan stops, the reader may have
// large to fit in the buffer. When a scan stops, the reader may have
// advanced arbitrarily far past the last token. Programs that need more
// control over error handling or large tokens, or must run sequential scans
// on a reader, should use [bufio.Reader] instead.
// on a reader, should use bufio.Reader instead.
type Scanner struct {
r io.Reader // The reader provided by the client.
split SplitFunc // The function to split the tokens.
@@ -42,23 +42,21 @@ type Scanner struct {
// SplitFunc is the signature of the split function used to tokenize the
// input. The arguments are an initial substring of the remaining unprocessed
// data and a flag, atEOF, that reports whether the [Reader] has no more data
// data and a flag, atEOF, that reports whether the Reader has no more data
// to give. The return values are the number of bytes to advance the input
// and the next token to return to the user, if any, plus an error, if any.
//
// Scanning stops if the function returns an error, in which case some of
// the input may be discarded. If that error is [ErrFinalToken], scanning
// stops with no error. A non-nil token delivered with [ErrFinalToken]
// will be the last token, and a nil token with [ErrFinalToken]
// immediately stops the scanning.
// the input may be discarded. If that error is ErrFinalToken, scanning
// stops with no error.
//
// Otherwise, the [Scanner] advances the input. If the token is not nil,
// the [Scanner] returns it to the user. If the token is nil, the
// Otherwise, the Scanner advances the input. If the token is not nil,
// the Scanner returns it to the user. If the token is nil, the
// Scanner reads more data and continues scanning; if there is no more
// data--if atEOF was true--the [Scanner] returns. If the data does not
// data--if atEOF was true--the Scanner returns. If the data does not
// yet hold a complete token, for instance if it has no newline while
// scanning lines, a [SplitFunc] can return (0, nil, nil) to signal the
// [Scanner] to read more data into the slice and try again with a
// scanning lines, a SplitFunc can return (0, nil, nil) to signal the
// Scanner to read more data into the slice and try again with a
// longer slice starting at the same point in the input.
//
// The function is never called with an empty data slice unless atEOF
@@ -76,7 +74,7 @@ var (
const (
// MaxScanTokenSize is the maximum size used to buffer a token
// unless the user provides an explicit buffer with [Scanner.Buffer].
// unless the user provides an explicit buffer with Scanner.Buffer.
// The actual maximum token size may be smaller as the buffer
// may need to include, for instance, a newline.
MaxScanTokenSize = 64 * 1024
@@ -84,8 +82,8 @@ const (
startBufSize = 4096 // Size of initial allocation for buffer.
)
// NewScanner returns a new [Scanner] to read from r.
// The split function defaults to [ScanLines].
// NewScanner returns a new Scanner to read from r.
// The split function defaults to ScanLines.
func NewScanner(r io.Reader) *Scanner {
return &Scanner{
r: r,
@@ -94,7 +92,7 @@ func NewScanner(r io.Reader) *Scanner {
}
}
// Err returns the first non-EOF error that was encountered by the [Scanner].
// Err returns the first non-EOF error that was encountered by the Scanner.
func (s *Scanner) Err() error {
if s.err == io.EOF {
return nil
@@ -102,36 +100,34 @@ func (s *Scanner) Err() error {
return s.err
}
// Bytes returns the most recent token generated by a call to [Scanner.Scan].
// Bytes returns the most recent token generated by a call to Scan.
// The underlying array may point to data that will be overwritten
// by a subsequent call to Scan. It does no allocation.
func (s *Scanner) Bytes() []byte {
return s.token
}
// Text returns the most recent token generated by a call to [Scanner.Scan]
// Text returns the most recent token generated by a call to Scan
// as a newly allocated string holding its bytes.
func (s *Scanner) Text() string {
return string(s.token)
}
// ErrFinalToken is a special sentinel error value. It is intended to be
// returned by a Split function to indicate that the scanning should stop
// with no error. If the token being delivered with this error is not nil,
// the token is the last token.
//
// returned by a Split function to indicate that the token being delivered
// with the error is the last token and scanning should stop after this one.
// After ErrFinalToken is received by Scan, scanning stops with no error.
// The value is useful to stop processing early or when it is necessary to
// deliver a final empty token (which is different from a nil token).
// One could achieve the same behavior with a custom error value but
// providing one here is tidier.
// deliver a final empty token. One could achieve the same behavior
// with a custom error value but providing one here is tidier.
// See the emptyFinalToken example for a use of this value.
var ErrFinalToken = errors.New("final token")
// Scan advances the [Scanner] to the next token, which will then be
// available through the [Scanner.Bytes] or [Scanner.Text] method. It returns false when
// there are no more tokens, either by reaching the end of the input or an error.
// After Scan returns false, the [Scanner.Err] method will return any error that
// occurred during scanning, except that if it was [io.EOF], [Scanner.Err]
// Scan advances the Scanner to the next token, which will then be
// available through the Bytes or Text method. It returns false when the
// scan stops, either by reaching the end of the input or an error.
// After Scan returns false, the Err method will return any error that
// occurred during scanning, except that if it was io.EOF, Err
// will return nil.
// Scan panics if the split function returns too many empty
// tokens without advancing the input. This is a common error mode for
@@ -152,10 +148,7 @@ func (s *Scanner) Scan() bool {
if err == ErrFinalToken {
s.token = token
s.done = true
// When token is not nil, it means the scanning stops
// with a trailing token, and thus the return value
// should be true to indicate the existence of the token.
return token != nil
return true
}
s.setErr(err)
return false
@@ -205,7 +198,9 @@ func (s *Scanner) Scan() bool {
if newSize == 0 {
newSize = startBufSize
}
newSize = min(newSize, s.maxTokenSize)
if newSize > s.maxTokenSize {
newSize = s.maxTokenSize
}
newBuf := make([]byte, newSize)
copy(newBuf, s.buf[s.start:s.end])
s.buf = newBuf
@@ -260,13 +255,13 @@ func (s *Scanner) setErr(err error) {
}
}
// Buffer sets the initial buffer to use when scanning
// and the maximum size of buffer that may be allocated during scanning.
// The maximum token size must be less than the larger of max and cap(buf).
// If max <= cap(buf), [Scanner.Scan] will use this buffer only and do no allocation.
// Buffer sets the initial buffer to use when scanning and the maximum
// size of buffer that may be allocated during scanning. The maximum
// token size is the larger of max and cap(buf). If max <= cap(buf),
// Scan will use this buffer only and do no allocation.
//
// By default, [Scanner.Scan] uses an internal buffer and sets the
// maximum token size to [MaxScanTokenSize].
// By default, Scan uses an internal buffer and sets the
// maximum token size to MaxScanTokenSize.
//
// Buffer panics if it is called after scanning has started.
func (s *Scanner) Buffer(buf []byte, max int) {
@@ -277,8 +272,8 @@ func (s *Scanner) Buffer(buf []byte, max int) {
s.maxTokenSize = max
}
// Split sets the split function for the [Scanner].
// The default split function is [ScanLines].
// Split sets the split function for the Scanner.
// The default split function is ScanLines.
//
// Split panics if it is called after scanning has started.
func (s *Scanner) Split(split SplitFunc) {
@@ -290,7 +285,7 @@ func (s *Scanner) Split(split SplitFunc) {
// Split functions
// ScanBytes is a split function for a [Scanner] that returns each byte as a token.
// ScanBytes is a split function for a Scanner that returns each byte as a token.
func ScanBytes(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
@@ -300,7 +295,7 @@ func ScanBytes(data []byte, atEOF bool) (advance int, token []byte, err error) {
var errorRune = []byte(string(utf8.RuneError))
// ScanRunes is a split function for a [Scanner] that returns each
// ScanRunes is a split function for a Scanner that returns each
// UTF-8-encoded rune as a token. The sequence of runes returned is
// equivalent to that from a range loop over the input as a string, which
// means that erroneous UTF-8 encodings translate to U+FFFD = "\xef\xbf\xbd".
@@ -346,7 +341,7 @@ func dropCR(data []byte) []byte {
return data
}
// ScanLines is a split function for a [Scanner] that returns each line of
// ScanLines is a split function for a Scanner that returns each line of
// text, stripped of any trailing end-of-line marker. The returned line may
// be empty. The end-of-line marker is one optional carriage return followed
// by one mandatory newline. In regular expression notation, it is `\r?\n`.
@@ -393,7 +388,7 @@ func isSpace(r rune) bool {
return false
}
// ScanWords is a split function for a [Scanner] that returns each
// ScanWords is a split function for a Scanner that returns each
// space-separated word of text, with surrounding spaces deleted. It will
// never return an empty string. The definition of space is set by
// unicode.IsSpace.

View File

@@ -68,7 +68,7 @@ func TestScanRune(t *testing.T) {
var i, runeCount int
var expect rune
// Use a string range loop to validate the sequence of runes.
for i, expect = range test {
for i, expect = range string(test) {
if !s.Scan() {
break
}

View File

@@ -6,12 +6,8 @@
# Usage: buildall.bash [-e] [pattern]
#
# buildall.bash builds the standard library for all Go-supported
# architectures.
#
# Originally the Go build system used it as a smoke test to quickly
# flag portability issues in builders named "misc-compile" or "all-compile".
# As of CL 464955, the build system uses make.bash -compile-only instead,
# so this script no longer runs in any automated fashion.
# architectures. It is used by the "misc-compile" trybot builders,
# as a smoke test to quickly flag portability issues.
#
# Options:
# -e: stop at first failure
@@ -41,7 +37,7 @@ GOROOT="$(cd .. && pwd)"
gettargets() {
../bin/go tool dist list | sed -e 's|/|-|' |
grep -E -v '^(android|ios)' # need C toolchain even for cross-compiling
egrep -v '^(android|ios)' # need C toolchain even for cross-compiling
echo linux-arm-arm5
}

View File

@@ -53,10 +53,10 @@ type int32 int32
// Range: -9223372036854775808 through 9223372036854775807.
type int64 int64
// float32 is the set of all IEEE 754 32-bit floating-point numbers.
// float32 is the set of all IEEE-754 32-bit floating-point numbers.
type float32 float32
// float64 is the set of all IEEE 754 64-bit floating-point numbers.
// float64 is the set of all IEEE-754 64-bit floating-point numbers.
type float64 float64
// complex64 is the set of all complex numbers with float32 real and
@@ -284,10 +284,9 @@ func panic(v any)
// by restoring normal execution and retrieves the error value passed to the
// call of panic. If recover is called outside the deferred function it will
// not stop a panicking sequence. In this case, or when the goroutine is not
// panicking, recover returns nil.
//
// Prior to Go 1.21, recover would also return nil if panic is called with
// a nil argument. See [panic] for details.
// panicking, or if the argument supplied to panic was nil, recover returns
// nil. Thus the return value from recover reports whether the goroutine is
// panicking.
func recover() any
// The print built-in function formats its arguments in an

View File

@@ -98,18 +98,3 @@ func TestIndexNearPageBoundary(t *testing.T) {
}
q[len(q)-1] = 0
}
func TestCountNearPageBoundary(t *testing.T) {
t.Parallel()
b := dangerousSlice(t)
for i := range b {
c := Count(b[i:], []byte{1})
if c != 0 {
t.Fatalf("Count(b[%d:], {1})=%d, want 0\n", i, c)
}
c = Count(b[:i], []byte{0})
if c != i {
t.Fatalf("Count(b[:%d], {0})=%d, want %d\n", i, c, i)
}
}
}

View File

@@ -15,7 +15,7 @@ import (
// smallBufferSize is an initial allocation minimal capacity.
const smallBufferSize = 64
// A Buffer is a variable-sized buffer of bytes with [Buffer.Read] and [Buffer.Write] methods.
// A Buffer is a variable-sized buffer of bytes with Read and Write methods.
// The zero value for Buffer is an empty buffer ready to use.
type Buffer struct {
buf []byte // contents are the bytes buf[off : len(buf)]
@@ -48,21 +48,21 @@ const maxInt = int(^uint(0) >> 1)
// Bytes returns a slice of length b.Len() holding the unread portion of the buffer.
// The slice is valid for use only until the next buffer modification (that is,
// only until the next call to a method like [Buffer.Read], [Buffer.Write], [Buffer.Reset], or [Buffer.Truncate]).
// only until the next call to a method like Read, Write, Reset, or Truncate).
// The slice aliases the buffer content at least until the next buffer modification,
// so immediate changes to the slice will affect the result of future reads.
func (b *Buffer) Bytes() []byte { return b.buf[b.off:] }
// AvailableBuffer returns an empty buffer with b.Available() capacity.
// This buffer is intended to be appended to and
// passed to an immediately succeeding [Buffer.Write] call.
// passed to an immediately succeeding Write call.
// The buffer is only valid until the next write operation on b.
func (b *Buffer) AvailableBuffer() []byte { return b.buf[len(b.buf):] }
// String returns the contents of the unread portion of the buffer
// as a string. If the [Buffer] is a nil pointer, it returns "<nil>".
// as a string. If the Buffer is a nil pointer, it returns "<nil>".
//
// To build strings more efficiently, see the [strings.Builder] type.
// To build strings more efficiently, see the strings.Builder type.
func (b *Buffer) String() string {
if b == nil {
// Special case, useful in debugging.
@@ -102,7 +102,7 @@ func (b *Buffer) Truncate(n int) {
// Reset resets the buffer to be empty,
// but it retains the underlying storage for use by future writes.
// Reset is the same as [Buffer.Truncate](0).
// Reset is the same as Truncate(0).
func (b *Buffer) Reset() {
b.buf = b.buf[:0]
b.off = 0
@@ -160,7 +160,7 @@ func (b *Buffer) grow(n int) int {
// another n bytes. After Grow(n), at least n bytes can be written to the
// buffer without another allocation.
// If n is negative, Grow will panic.
// If the buffer can't grow it will panic with [ErrTooLarge].
// If the buffer can't grow it will panic with ErrTooLarge.
func (b *Buffer) Grow(n int) {
if n < 0 {
panic("bytes.Buffer.Grow: negative count")
@@ -171,7 +171,7 @@ func (b *Buffer) Grow(n int) {
// Write appends the contents of p to the buffer, growing the buffer as
// needed. The return value n is the length of p; err is always nil. If the
// buffer becomes too large, Write will panic with [ErrTooLarge].
// buffer becomes too large, Write will panic with ErrTooLarge.
func (b *Buffer) Write(p []byte) (n int, err error) {
b.lastRead = opInvalid
m, ok := b.tryGrowByReslice(len(p))
@@ -183,7 +183,7 @@ func (b *Buffer) Write(p []byte) (n int, err error) {
// WriteString appends the contents of s to the buffer, growing the buffer as
// needed. The return value n is the length of s; err is always nil. If the
// buffer becomes too large, WriteString will panic with [ErrTooLarge].
// buffer becomes too large, WriteString will panic with ErrTooLarge.
func (b *Buffer) WriteString(s string) (n int, err error) {
b.lastRead = opInvalid
m, ok := b.tryGrowByReslice(len(s))
@@ -193,16 +193,16 @@ func (b *Buffer) WriteString(s string) (n int, err error) {
return copy(b.buf[m:], s), nil
}
// MinRead is the minimum slice size passed to a [Buffer.Read] call by
// [Buffer.ReadFrom]. As long as the [Buffer] has at least MinRead bytes beyond
// what is required to hold the contents of r, [Buffer.ReadFrom] will not grow the
// MinRead is the minimum slice size passed to a Read call by
// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond
// what is required to hold the contents of r, ReadFrom will not grow the
// underlying buffer.
const MinRead = 512
// ReadFrom reads data from r until EOF and appends it to the buffer, growing
// the buffer as needed. The return value n is the number of bytes read. Any
// error except io.EOF encountered during the read is also returned. If the
// buffer becomes too large, ReadFrom will panic with [ErrTooLarge].
// buffer becomes too large, ReadFrom will panic with ErrTooLarge.
func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) {
b.lastRead = opInvalid
for {
@@ -253,7 +253,7 @@ func growSlice(b []byte, n int) []byte {
// WriteTo writes data to w until the buffer is drained or an error occurs.
// The return value n is the number of bytes written; it always fits into an
// int, but it is int64 to match the [io.WriterTo] interface. Any error
// int, but it is int64 to match the io.WriterTo interface. Any error
// encountered during the write is also returned.
func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {
b.lastRead = opInvalid
@@ -279,9 +279,9 @@ func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {
}
// WriteByte appends the byte c to the buffer, growing the buffer as needed.
// The returned error is always nil, but is included to match [bufio.Writer]'s
// The returned error is always nil, but is included to match bufio.Writer's
// WriteByte. If the buffer becomes too large, WriteByte will panic with
// [ErrTooLarge].
// ErrTooLarge.
func (b *Buffer) WriteByte(c byte) error {
b.lastRead = opInvalid
m, ok := b.tryGrowByReslice(1)
@@ -294,8 +294,8 @@ func (b *Buffer) WriteByte(c byte) error {
// WriteRune appends the UTF-8 encoding of Unicode code point r to the
// buffer, returning its length and an error, which is always nil but is
// included to match [bufio.Writer]'s WriteRune. The buffer is grown as needed;
// if it becomes too large, WriteRune will panic with [ErrTooLarge].
// included to match bufio.Writer's WriteRune. The buffer is grown as needed;
// if it becomes too large, WriteRune will panic with ErrTooLarge.
func (b *Buffer) WriteRune(r rune) (n int, err error) {
// Compare as uint32 to correctly handle negative runes.
if uint32(r) < utf8.RuneSelf {
@@ -313,7 +313,7 @@ func (b *Buffer) WriteRune(r rune) (n int, err error) {
// Read reads the next len(p) bytes from the buffer or until the buffer
// is drained. The return value n is the number of bytes read. If the
// buffer has no data to return, err is [io.EOF] (unless len(p) is zero);
// buffer has no data to return, err is io.EOF (unless len(p) is zero);
// otherwise it is nil.
func (b *Buffer) Read(p []byte) (n int, err error) {
b.lastRead = opInvalid
@@ -334,7 +334,7 @@ func (b *Buffer) Read(p []byte) (n int, err error) {
}
// Next returns a slice containing the next n bytes from the buffer,
// advancing the buffer as if the bytes had been returned by [Buffer.Read].
// advancing the buffer as if the bytes had been returned by Read.
// If there are fewer than n bytes in the buffer, Next returns the entire buffer.
// The slice is only valid until the next call to a read or write method.
func (b *Buffer) Next(n int) []byte {
@@ -352,7 +352,7 @@ func (b *Buffer) Next(n int) []byte {
}
// ReadByte reads and returns the next byte from the buffer.
// If no byte is available, it returns error [io.EOF].
// If no byte is available, it returns error io.EOF.
func (b *Buffer) ReadByte() (byte, error) {
if b.empty() {
// Buffer is empty, reset to recover space.
@@ -388,10 +388,10 @@ func (b *Buffer) ReadRune() (r rune, size int, err error) {
return r, n, nil
}
// UnreadRune unreads the last rune returned by [Buffer.ReadRune].
// UnreadRune unreads the last rune returned by ReadRune.
// If the most recent read or write operation on the buffer was
// not a successful [Buffer.ReadRune], UnreadRune returns an error. (In this regard
// it is stricter than [Buffer.UnreadByte], which will unread the last byte
// not a successful ReadRune, UnreadRune returns an error. (In this regard
// it is stricter than UnreadByte, which will unread the last byte
// from any read operation.)
func (b *Buffer) UnreadRune() error {
if b.lastRead <= opInvalid {
@@ -424,7 +424,7 @@ func (b *Buffer) UnreadByte() error {
// ReadBytes reads until the first occurrence of delim in the input,
// returning a slice containing the data up to and including the delimiter.
// If ReadBytes encounters an error before finding a delimiter,
// it returns the data read before the error and the error itself (often [io.EOF]).
// it returns the data read before the error and the error itself (often io.EOF).
// ReadBytes returns err != nil if and only if the returned data does not end in
// delim.
func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) {
@@ -452,7 +452,7 @@ func (b *Buffer) readSlice(delim byte) (line []byte, err error) {
// ReadString reads until the first occurrence of delim in the input,
// returning a string containing the data up to and including the delimiter.
// If ReadString encounters an error before finding a delimiter,
// it returns the data read before the error and the error itself (often [io.EOF]).
// it returns the data read before the error and the error itself (often io.EOF).
// ReadString returns err != nil if and only if the returned data does not end
// in delim.
func (b *Buffer) ReadString(delim byte) (line string, err error) {
@@ -460,23 +460,23 @@ func (b *Buffer) ReadString(delim byte) (line string, err error) {
return string(slice), err
}
// NewBuffer creates and initializes a new [Buffer] using buf as its
// initial contents. The new [Buffer] takes ownership of buf, and the
// NewBuffer creates and initializes a new Buffer using buf as its
// initial contents. The new Buffer takes ownership of buf, and the
// caller should not use buf after this call. NewBuffer is intended to
// prepare a [Buffer] to read existing data. It can also be used to set
// prepare a Buffer to read existing data. It can also be used to set
// the initial size of the internal buffer for writing. To do that,
// buf should have the desired capacity but a length of zero.
//
// In most cases, new([Buffer]) (or just declaring a [Buffer] variable) is
// sufficient to initialize a [Buffer].
// In most cases, new(Buffer) (or just declaring a Buffer variable) is
// sufficient to initialize a Buffer.
func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} }
// NewBufferString creates and initializes a new [Buffer] using string s as its
// NewBufferString creates and initializes a new Buffer using string s as its
// initial contents. It is intended to prepare a buffer to read an existing
// string.
//
// In most cases, new([Buffer]) (or just declaring a [Buffer] variable) is
// sufficient to initialize a [Buffer].
// In most cases, new(Buffer) (or just declaring a Buffer variable) is
// sufficient to initialize a Buffer.
func NewBufferString(s string) *Buffer {
return &Buffer{buf: []byte(s)}
}

View File

@@ -7,7 +7,6 @@ package bytes_test
import (
. "bytes"
"fmt"
"internal/testenv"
"io"
"math/rand"
"strconv"
@@ -95,22 +94,6 @@ func TestNewBuffer(t *testing.T) {
check(t, "NewBuffer", buf, testString)
}
var buf Buffer
// Calling NewBuffer and immediately shallow copying the Buffer struct
// should not result in any allocations.
// This can be used to reset the underlying []byte of an existing Buffer.
func TestNewBufferShallow(t *testing.T) {
testenv.SkipIfOptimizationOff(t)
n := testing.AllocsPerRun(1000, func() {
buf = *NewBuffer(testBytes)
})
if n > 0 {
t.Errorf("allocations occurred while shallow copying")
}
check(t, "NewBuffer", &buf, testString)
}
func TestNewBufferString(t *testing.T) {
buf := NewBufferString(testString)
check(t, "NewBufferString", buf, testString)

View File

@@ -10,7 +10,6 @@ import (
"internal/bytealg"
"unicode"
"unicode/utf8"
_ "unsafe" // for linkname
)
// Equal reports whether a and b
@@ -113,7 +112,7 @@ func LastIndex(s, sep []byte) int {
case n == 0:
return len(s)
case n == 1:
return bytealg.LastIndexByte(s, sep[0])
return LastIndexByte(s, sep[0])
case n == len(s):
if Equal(s, sep) {
return 0
@@ -122,18 +121,41 @@ func LastIndex(s, sep []byte) int {
case n > len(s):
return -1
}
return bytealg.LastIndexRabinKarp(s, sep)
// Rabin-Karp search from the end of the string
hashss, pow := bytealg.HashStrRevBytes(sep)
last := len(s) - n
var h uint32
for i := len(s) - 1; i >= last; i-- {
h = h*bytealg.PrimeRK + uint32(s[i])
}
if h == hashss && Equal(s[last:], sep) {
return last
}
for i := last - 1; i >= 0; i-- {
h *= bytealg.PrimeRK
h += uint32(s[i])
h -= pow * uint32(s[i+n])
if h == hashss && Equal(s[i:i+n], sep) {
return i
}
}
return -1
}
// LastIndexByte returns the index of the last instance of c in s, or -1 if c is not present in s.
func LastIndexByte(s []byte, c byte) int {
return bytealg.LastIndexByte(s, c)
for i := len(s) - 1; i >= 0; i-- {
if s[i] == c {
return i
}
}
return -1
}
// IndexRune interprets s as a sequence of UTF-8-encoded code points.
// It returns the byte index of the first occurrence in s of the given rune.
// It returns -1 if rune is not present in s.
// If r is [utf8.RuneError], it returns the first instance of any
// If r is utf8.RuneError, it returns the first instance of any
// invalid UTF-8 byte sequence.
func IndexRune(s []byte, r rune) int {
switch {
@@ -355,20 +377,22 @@ func genSplit(s, sep []byte, sepSave, n int) [][]byte {
// the subslices between those separators.
// If sep is empty, SplitN splits after each UTF-8 sequence.
// The count determines the number of subslices to return:
// - n > 0: at most n subslices; the last subslice will be the unsplit remainder;
// - n == 0: the result is nil (zero subslices);
// - n < 0: all subslices.
//
// To split around the first instance of a separator, see [Cut].
// n > 0: at most n subslices; the last subslice will be the unsplit remainder.
// n == 0: the result is nil (zero subslices)
// n < 0: all subslices
//
// To split around the first instance of a separator, see Cut.
func SplitN(s, sep []byte, n int) [][]byte { return genSplit(s, sep, 0, n) }
// SplitAfterN slices s into subslices after each instance of sep and
// returns a slice of those subslices.
// If sep is empty, SplitAfterN splits after each UTF-8 sequence.
// The count determines the number of subslices to return:
// - n > 0: at most n subslices; the last subslice will be the unsplit remainder;
// - n == 0: the result is nil (zero subslices);
// - n < 0: all subslices.
//
// n > 0: at most n subslices; the last subslice will be the unsplit remainder.
// n == 0: the result is nil (zero subslices)
// n < 0: all subslices
func SplitAfterN(s, sep []byte, n int) [][]byte {
return genSplit(s, sep, len(sep), n)
}
@@ -378,7 +402,7 @@ func SplitAfterN(s, sep []byte, n int) [][]byte {
// If sep is empty, Split splits after each UTF-8 sequence.
// It is equivalent to SplitN with a count of -1.
//
// To split around the first instance of a separator, see [Cut].
// To split around the first instance of a separator, see Cut.
func Split(s, sep []byte) [][]byte { return genSplit(s, sep, 0, -1) }
// SplitAfter slices s into all subslices after each instance of sep and
@@ -393,7 +417,7 @@ var asciiSpace = [256]uint8{'\t': 1, '\n': 1, '\v': 1, '\f': 1, '\r': 1, ' ': 1}
// Fields interprets s as a sequence of UTF-8-encoded code points.
// It splits the slice s around each instance of one or more consecutive white space
// characters, as defined by [unicode.IsSpace], returning a slice of subslices of s or an
// characters, as defined by unicode.IsSpace, returning a slice of subslices of s or an
// empty slice if s contains only white space.
func Fields(s []byte) [][]byte {
// First count the fields.
@@ -524,7 +548,7 @@ func Join(s [][]byte, sep []byte) []byte {
n += len(v)
}
b := bytealg.MakeNoZero(n)[:n:n]
b := bytealg.MakeNoZero(n)
bp := copy(b, s[0])
for _, v := range s[1:] {
bp += copy(b[bp:], sep)
@@ -533,12 +557,12 @@ func Join(s [][]byte, sep []byte) []byte {
return b
}
// HasPrefix reports whether the byte slice s begins with prefix.
// HasPrefix tests whether the byte slice s begins with prefix.
func HasPrefix(s, prefix []byte) bool {
return len(s) >= len(prefix) && Equal(s[0:len(prefix)], prefix)
}
// HasSuffix reports whether the byte slice s ends with suffix.
// HasSuffix tests whether the byte slice s ends with suffix.
func HasSuffix(s, suffix []byte) bool {
return len(s) >= len(suffix) && Equal(s[len(s)-len(suffix):], suffix)
}
@@ -567,18 +591,6 @@ func Map(mapping func(r rune) rune, s []byte) []byte {
return b
}
// Despite being an exported symbol,
// Repeat is linknamed by widely used packages.
// Notable members of the hall of shame include:
// - gitee.com/quant1x/num
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
// Note that this comment is not part of the doc comment.
//
//go:linkname Repeat
// Repeat returns a new byte slice consisting of count copies of b.
//
// It panics if count is negative or if the result of (len(b) * count)
@@ -594,7 +606,7 @@ func Repeat(b []byte, count int) []byte {
if count < 0 {
panic("bytes: negative Repeat count")
}
if len(b) > maxInt/count {
if len(b) >= maxInt/count {
panic("bytes: Repeat output length overflow")
}
n := len(b) * count
@@ -621,7 +633,7 @@ func Repeat(b []byte, count int) []byte {
chunkMax = len(b)
}
}
nb := bytealg.MakeNoZero(n)[:n:n]
nb := bytealg.MakeNoZero(n)
bp := copy(nb, b)
for bp < n {
chunk := bp
@@ -651,7 +663,7 @@ func ToUpper(s []byte) []byte {
// Just return a copy.
return append([]byte(""), s...)
}
b := bytealg.MakeNoZero(len(s))[:len(s):len(s)]
b := bytealg.MakeNoZero(len(s))
for i := 0; i < len(s); i++ {
c := s[i]
if 'a' <= c && c <= 'z' {
@@ -681,7 +693,7 @@ func ToLower(s []byte) []byte {
if !hasUpper {
return append([]byte(""), s...)
}
b := bytealg.MakeNoZero(len(s))[:len(s):len(s)]
b := bytealg.MakeNoZero(len(s))
for i := 0; i < len(s); i++ {
c := s[i]
if 'A' <= c && c <= 'Z' {
@@ -1324,7 +1336,7 @@ func Index(s, sep []byte) int {
// we should cutover at even larger average skips,
// because Equal becomes that much more expensive.
// This code does not take that effect into account.
j := bytealg.IndexRabinKarp(s[i:], sep)
j := bytealg.IndexRabinKarpBytes(s[i:], sep)
if j < 0 {
return -1
}

View File

@@ -1,21 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build js && wasm
package bytes_test
import (
"bytes"
"testing"
)
func TestIssue65571(t *testing.T) {
b := make([]byte, 1<<31+1)
b[1<<31] = 1
i := bytes.IndexByte(b, 1)
if i != 1<<31 {
t.Errorf("IndexByte(b, 1) = %d; want %d", i, 1<<31)
}
}

View File

@@ -629,11 +629,6 @@ func BenchmarkEqual(b *testing.B) {
})
sizes := []int{1, 6, 9, 15, 16, 20, 32, 4 << 10, 4 << 20, 64 << 20}
b.Run("same", func(b *testing.B) {
benchBytes(b, sizes, bmEqual(func(a, b []byte) bool { return Equal(a, a) }))
})
benchBytes(b, sizes, bmEqual(Equal))
}
@@ -1242,48 +1237,33 @@ func repeat(b []byte, count int) (err error) {
// See Issue golang.org/issue/16237
func TestRepeatCatchesOverflow(t *testing.T) {
type testCase struct {
tests := [...]struct {
s string
count int
errStr string
}
runTestCases := func(prefix string, tests []testCase) {
for i, tt := range tests {
err := repeat([]byte(tt.s), tt.count)
if tt.errStr == "" {
if err != nil {
t.Errorf("#%d panicked %v", i, err)
}
continue
}
if err == nil || !strings.Contains(err.Error(), tt.errStr) {
t.Errorf("%s#%d got %q want %q", prefix, i, err, tt.errStr)
}
}
}
const maxInt = int(^uint(0) >> 1)
runTestCases("", []testCase{
}{
0: {"--", -2147483647, "negative"},
1: {"", maxInt, ""},
1: {"", int(^uint(0) >> 1), ""},
2: {"-", 10, ""},
3: {"gopher", 0, ""},
4: {"-", -1, "negative"},
5: {"--", -102, "negative"},
6: {string(make([]byte, 255)), int((^uint(0))/255 + 1), "overflow"},
})
const is64Bit = 1<<(^uintptr(0)>>63)/2 != 0
if !is64Bit {
return
}
runTestCases("64-bit", []testCase{
0: {"-", maxInt, "out of range"},
})
for i, tt := range tests {
err := repeat([]byte(tt.s), tt.count)
if tt.errStr == "" {
if err != nil {
t.Errorf("#%d panicked %v", i, err)
}
continue
}
if err == nil || !strings.Contains(err.Error(), tt.errStr) {
t.Errorf("#%d expected %q got %q", i, tt.errStr, err)
}
}
}
func runesEqual(a, b []rune) bool {

View File

@@ -7,6 +7,7 @@ package bytes_test
import (
. "bytes"
"fmt"
"internal/testenv"
"testing"
)
@@ -72,7 +73,7 @@ func TestCompareBytes(t *testing.T) {
}
lengths = append(lengths, 256, 512, 1024, 1333, 4095, 4096, 4097)
if !testing.Short() {
if !testing.Short() || testenv.Builder() != "" {
lengths = append(lengths, 65535, 65536, 65537, 99999)
}

View File

@@ -10,7 +10,7 @@ import (
"fmt"
"io"
"os"
"slices"
"sort"
"strconv"
"unicode"
)
@@ -81,9 +81,9 @@ func ExampleBuffer_Next() {
var b bytes.Buffer
b.Grow(64)
b.Write([]byte("abcde"))
fmt.Printf("%s\n", b.Next(2))
fmt.Printf("%s\n", b.Next(2))
fmt.Printf("%s", b.Next(2))
fmt.Printf("%s\n", string(b.Next(2)))
fmt.Printf("%s\n", string(b.Next(2)))
fmt.Printf("%s", string(b.Next(2)))
// Output:
// ab
// cd
@@ -102,7 +102,7 @@ func ExampleBuffer_Read() {
fmt.Println(n)
fmt.Println(b.String())
fmt.Println(string(rdbuf))
// Output:
// Output
// 1
// bcde
// a
@@ -118,7 +118,7 @@ func ExampleBuffer_ReadByte() {
}
fmt.Println(c)
fmt.Println(b.String())
// Output:
// Output
// 97
// bcde
}
@@ -165,8 +165,11 @@ func ExampleCompare_search() {
// Binary search to find a matching byte slice.
var needle []byte
var haystack [][]byte // Assume sorted
_, found := slices.BinarySearchFunc(haystack, needle, bytes.Compare)
if found {
i := sort.Search(len(haystack), func(i int) bool {
// Return haystack[i] >= needle.
return bytes.Compare(haystack[i], needle) >= 0
})
if i < len(haystack) && bytes.Equal(haystack[i], needle) {
// Found it!
}
}
@@ -209,17 +212,6 @@ func ExampleContainsRune() {
// false
}
func ExampleContainsFunc() {
f := func(r rune) bool {
return r >= 'a' && r <= 'z'
}
fmt.Println(bytes.ContainsFunc([]byte("HELLO"), f))
fmt.Println(bytes.ContainsFunc([]byte("World"), f))
// Output:
// false
// true
}
func ExampleCount() {
fmt.Println(bytes.Count([]byte("cheese"), []byte("e")))
fmt.Println(bytes.Count([]byte("five"), []byte(""))) // before & after each rune

View File

@@ -10,10 +10,10 @@ import (
"unicode/utf8"
)
// A Reader implements the [io.Reader], [io.ReaderAt], [io.WriterTo], [io.Seeker],
// [io.ByteScanner], and [io.RuneScanner] interfaces by reading from
// A Reader implements the io.Reader, io.ReaderAt, io.WriterTo, io.Seeker,
// io.ByteScanner, and io.RuneScanner interfaces by reading from
// a byte slice.
// Unlike a [Buffer], a Reader is read-only and supports seeking.
// Unlike a Buffer, a Reader is read-only and supports seeking.
// The zero value for Reader operates like a Reader of an empty slice.
type Reader struct {
s []byte
@@ -31,11 +31,11 @@ func (r *Reader) Len() int {
}
// Size returns the original length of the underlying byte slice.
// Size is the number of bytes available for reading via [Reader.ReadAt].
// The result is unaffected by any method calls except [Reader.Reset].
// Size is the number of bytes available for reading via ReadAt.
// The result is unaffected by any method calls except Reset.
func (r *Reader) Size() int64 { return int64(len(r.s)) }
// Read implements the [io.Reader] interface.
// Read implements the io.Reader interface.
func (r *Reader) Read(b []byte) (n int, err error) {
if r.i >= int64(len(r.s)) {
return 0, io.EOF
@@ -46,7 +46,7 @@ func (r *Reader) Read(b []byte) (n int, err error) {
return
}
// ReadAt implements the [io.ReaderAt] interface.
// ReadAt implements the io.ReaderAt interface.
func (r *Reader) ReadAt(b []byte, off int64) (n int, err error) {
// cannot modify state - see io.ReaderAt
if off < 0 {
@@ -62,7 +62,7 @@ func (r *Reader) ReadAt(b []byte, off int64) (n int, err error) {
return
}
// ReadByte implements the [io.ByteReader] interface.
// ReadByte implements the io.ByteReader interface.
func (r *Reader) ReadByte() (byte, error) {
r.prevRune = -1
if r.i >= int64(len(r.s)) {
@@ -73,7 +73,7 @@ func (r *Reader) ReadByte() (byte, error) {
return b, nil
}
// UnreadByte complements [Reader.ReadByte] in implementing the [io.ByteScanner] interface.
// UnreadByte complements ReadByte in implementing the io.ByteScanner interface.
func (r *Reader) UnreadByte() error {
if r.i <= 0 {
return errors.New("bytes.Reader.UnreadByte: at beginning of slice")
@@ -83,7 +83,7 @@ func (r *Reader) UnreadByte() error {
return nil
}
// ReadRune implements the [io.RuneReader] interface.
// ReadRune implements the io.RuneReader interface.
func (r *Reader) ReadRune() (ch rune, size int, err error) {
if r.i >= int64(len(r.s)) {
r.prevRune = -1
@@ -99,7 +99,7 @@ func (r *Reader) ReadRune() (ch rune, size int, err error) {
return
}
// UnreadRune complements [Reader.ReadRune] in implementing the [io.RuneScanner] interface.
// UnreadRune complements ReadRune in implementing the io.RuneScanner interface.
func (r *Reader) UnreadRune() error {
if r.i <= 0 {
return errors.New("bytes.Reader.UnreadRune: at beginning of slice")
@@ -112,7 +112,7 @@ func (r *Reader) UnreadRune() error {
return nil
}
// Seek implements the [io.Seeker] interface.
// Seek implements the io.Seeker interface.
func (r *Reader) Seek(offset int64, whence int) (int64, error) {
r.prevRune = -1
var abs int64
@@ -133,7 +133,7 @@ func (r *Reader) Seek(offset int64, whence int) (int64, error) {
return abs, nil
}
// WriteTo implements the [io.WriterTo] interface.
// WriteTo implements the io.WriterTo interface.
func (r *Reader) WriteTo(w io.Writer) (n int64, err error) {
r.prevRune = -1
if r.i >= int64(len(r.s)) {
@@ -152,8 +152,8 @@ func (r *Reader) WriteTo(w io.Writer) (n int64, err error) {
return
}
// Reset resets the [Reader] to be reading from b.
// Reset resets the Reader to be reading from b.
func (r *Reader) Reset(b []byte) { *r = Reader{b, 0, -1} }
// NewReader returns a new [Reader] reading from b.
// NewReader returns a new Reader reading from b.
func NewReader(b []byte) *Reader { return &Reader{b, 0, -1} }

View File

@@ -109,18 +109,32 @@ func testAddr2Line(t *testing.T, dbgExePath, addr string) {
srcPath = filepath.FromSlash(srcPath)
fi2, err := os.Stat(srcPath)
// If GOROOT_FINAL is set and srcPath is not the file we expect, perhaps
// srcPath has had GOROOT_FINAL substituted for GOROOT and GOROOT hasn't been
// moved to its final location yet. If so, try the original location instead.
if gorootFinal := os.Getenv("GOROOT_FINAL"); gorootFinal != "" &&
(os.IsNotExist(err) || (err == nil && !os.SameFile(fi1, fi2))) {
// srcPath is clean, but GOROOT_FINAL itself might not be.
// (See https://golang.org/issue/41447.)
gorootFinal = filepath.Clean(gorootFinal)
if strings.HasPrefix(srcPath, gorootFinal) {
fi2, err = os.Stat(runtime.GOROOT() + strings.TrimPrefix(srcPath, gorootFinal))
}
}
if err != nil {
t.Fatalf("Stat failed: %v", err)
}
if !os.SameFile(fi1, fi2) {
t.Fatalf("addr2line_test.go and %s are not same file", srcPath)
}
if want := "124"; srcLineNo != want {
t.Fatalf("line number = %v; want %s", srcLineNo, want)
if srcLineNo != "138" {
t.Fatalf("line number = %v; want 138", srcLineNo)
}
}
// This is line 123. The test depends on that.
// This is line 137. The test depends on that.
func TestAddr2Line(t *testing.T) {
testenv.MustHaveGoBuild(t)

View File

@@ -28,7 +28,6 @@ import (
"strings"
"cmd/internal/objfile"
"cmd/internal/telemetry/counter"
)
func printUsage(w *os.File) {
@@ -46,7 +45,6 @@ func usage() {
func main() {
log.SetFlags(0)
log.SetPrefix("addr2line: ")
counter.Open()
// pprof expects this behavior when checking for addr2line
if len(os.Args) > 1 && os.Args[1] == "--help" {
@@ -56,8 +54,6 @@ func main() {
flag.Usage = usage
flag.Parse()
counter.Inc("addr2line/invocations")
counter.CountFlags("addr2line/flag:", *flag.CommandLine)
if flag.NArg() != 1 {
usage()
}

View File

@@ -2,10 +2,9 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This package computes the exported API of a set of Go packages.
// Package api computes the exported API of a set of Go packages.
// It is only a test, not a command, nor a usefully importable package.
package main
package api
import (
"bufio"
@@ -106,7 +105,7 @@ func Check(t *testing.T) {
}
var nextFiles []string
if v := runtime.Version(); strings.Contains(v, "devel") || strings.Contains(v, "beta") {
if strings.Contains(runtime.Version(), "devel") {
next, err := filepath.Glob(filepath.Join(testenv.GOROOT(t), "api/next/*.txt"))
if err != nil {
t.Fatal(err)
@@ -490,8 +489,7 @@ func (w *Walker) loadImports() {
if w.context.Dir != "" {
cmd.Dir = w.context.Dir
}
cmd.Stderr = os.Stderr
out, err := cmd.Output()
out, err := cmd.CombinedOutput()
if err != nil {
log.Fatalf("loading imports: %v\n%s", err, out)
}
@@ -843,9 +841,6 @@ func (w *Walker) writeType(buf *bytes.Buffer, typ types.Type) {
buf.WriteString(s)
w.writeType(buf, typ.Elem())
case *types.Alias:
w.writeType(buf, types.Unalias(typ))
case *types.Named:
obj := typ.Obj()
pkg := obj.Pkg()
@@ -854,16 +849,6 @@ func (w *Walker) writeType(buf *bytes.Buffer, typ types.Type) {
buf.WriteByte('.')
}
buf.WriteString(typ.Obj().Name())
if targs := typ.TypeArgs(); targs.Len() > 0 {
buf.WriteByte('[')
for i := 0; i < targs.Len(); i++ {
if i > 0 {
buf.WriteString(", ")
}
w.writeType(buf, targs.At(i))
}
buf.WriteByte(']')
}
case *types.TypeParam:
// Type parameter names may change, so use a placeholder instead.
@@ -970,17 +955,17 @@ func (w *Walker) emitType(obj *types.TypeName) {
if w.isDeprecated(obj) {
w.emitf("type %s //deprecated", name)
}
typ := obj.Type()
if obj.IsAlias() {
w.emitf("type %s = %s", name, w.typeString(typ))
return
}
if tparams := obj.Type().(*types.Named).TypeParams(); tparams != nil {
var buf bytes.Buffer
buf.WriteString(name)
w.writeTypeParams(&buf, tparams, true)
name = buf.String()
}
typ := obj.Type()
if obj.IsAlias() {
w.emitf("type %s = %s", name, w.typeString(typ))
return
}
switch typ := typ.Underlying().(type) {
case *types.Struct:
w.emitStructType(name, typ)

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
package api
import (
"flag"
@@ -285,25 +285,6 @@ func TestIssue41358(t *testing.T) {
}
}
func TestIssue64958(t *testing.T) {
defer func() {
if x := recover(); x != nil {
t.Errorf("expected no panic; recovered %v", x)
}
}()
testenv.MustHaveGoBuild(t)
for _, context := range contexts {
w := NewWalker(context, "testdata/src/issue64958")
pkg, err := w.importFrom("p", "", 0)
if err != nil {
t.Errorf("expected no error importing; got %T", err)
}
w.export(pkg)
}
}
func TestCheck(t *testing.T) {
if !*flagCheck {
t.Skip("-check not specified")

View File

@@ -4,7 +4,7 @@
//go:build boringcrypto
package main
package api
import (
"fmt"

View File

@@ -1,3 +0,0 @@
package p
type BasicAlias = uint8

View File

@@ -1,4 +1,4 @@
pkg p4, func NewPair[$0 interface{ M }, $1 interface{ ~int }]($0, $1) Pair[$0, $1]
pkg p4, func NewPair[$0 interface{ M }, $1 interface{ ~int }]($0, $1) Pair
pkg p4, method (Pair[$0, $1]) Second() $1
pkg p4, method (Pair[$0, $1]) First() $0
pkg p4, type Pair[$0 interface{ M }, $1 interface{ ~int }] struct

View File

@@ -33,8 +33,6 @@ Flags:
Dump instructions as they are parsed.
-dynlink
Support references to Go symbols defined in other shared libraries.
-e
No limit on number of errors reported.
-gensymabis
Write symbol ABI information to output file. Don't assemble.
-o file
@@ -47,8 +45,6 @@ Flags:
Enable spectre mitigations in list (all, ret).
-trimpath prefix
Remove prefix from recorded source file paths.
-v
Print debug output.
Input language:

View File

@@ -55,10 +55,6 @@ func IsLoong64RDTIME(op obj.As) bool {
return false
}
func IsLoong64AMO(op obj.As) bool {
return loong64.IsAtomicInst(op)
}
func loong64RegisterNumber(name string, n int16) (int16, bool) {
switch name {
case "F":

View File

@@ -25,7 +25,7 @@ func jumpPPC64(word string) bool {
// one of the CMP instructions that require special handling.
func IsPPC64CMP(op obj.As) bool {
switch op {
case ppc64.ACMP, ppc64.ACMPU, ppc64.ACMPW, ppc64.ACMPWU, ppc64.AFCMPO, ppc64.AFCMPU:
case ppc64.ACMP, ppc64.ACMPU, ppc64.ACMPW, ppc64.ACMPWU, ppc64.AFCMPU:
return true
}
return false

View File

@@ -16,7 +16,6 @@ import (
"cmd/asm/internal/lex"
"cmd/internal/obj"
"cmd/internal/obj/ppc64"
"cmd/internal/obj/riscv"
"cmd/internal/obj/x86"
"cmd/internal/sys"
)
@@ -47,11 +46,7 @@ func (p *Parser) append(prog *obj.Prog, cond string, doLabel bool) {
p.errorf("%v", err)
return
}
case sys.RISCV64:
if err := riscv.ParseSuffix(prog, cond); err != nil {
p.errorf("unrecognized suffix .%q", cond)
return
}
default:
p.errorf("unrecognized suffix .%q", cond)
return
@@ -450,7 +445,7 @@ func (p *Parser) asmJump(op obj.As, cond string, a []obj.Addr) {
// BC x,CR0EQ,...
// BC x,CR1LT,...
// BC x,CR1GT,...
// The first and second cases demonstrate a symbol name which is
// The first and second case demonstrate a symbol name which is
// effectively discarded. In these cases, the offset determines
// the CR bit.
prog.Reg = a[1].Reg
@@ -669,17 +664,9 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) {
prog.Reg = p.getRegister(prog, op, &a[1])
prog.To = a[2]
case sys.Loong64:
switch {
// Loong64 atomic instructions with one input and two outputs.
case arch.IsLoong64AMO(op):
prog.From = a[0]
prog.To = a[1]
prog.RegTo2 = a[2].Reg
default:
prog.From = a[0]
prog.Reg = p.getRegister(prog, op, &a[1])
prog.To = a[2]
}
prog.From = a[0]
prog.Reg = p.getRegister(prog, op, &a[1])
prog.To = a[2]
case sys.ARM:
// Special cases.
if arch.IsARMSTREX(op) {
@@ -922,7 +909,7 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) {
p.append(prog, cond, true)
}
// symbolName returns the symbol name, or an error string if none is available.
// symbolName returns the symbol name, or an error string if none if available.
func symbolName(addr *obj.Addr) string {
if addr.Sym != nil {
return addr.Sym.Name

View File

@@ -30,7 +30,7 @@ func testEndToEnd(t *testing.T, goarch, file string) {
architecture, ctxt := setArch(goarch)
architecture.Init(ctxt)
lexer := lex.NewLexer(input)
parser := NewParser(ctxt, architecture, lexer)
parser := NewParser(ctxt, architecture, lexer, false)
pList := new(obj.Plist)
var ok bool
testOut = new(strings.Builder) // The assembler writes test output to this buffer.
@@ -68,11 +68,6 @@ Diff:
continue
}
// Ignore GLOBL.
if strings.HasPrefix(line, "GLOBL ") {
continue
}
// The general form of a test input line is:
// // comment
// INST args [// printed form] [// hex encoding]
@@ -141,17 +136,11 @@ Diff:
// Turn relative (PC) into absolute (PC) automatically,
// so that most branch instructions don't need comments
// giving the absolute form.
if len(f) > 0 && strings.Contains(printed, "(PC)") {
index := len(f) - 1
suf := "(PC)"
for !strings.HasSuffix(f[index], suf) {
index--
suf = "(PC),"
}
str := f[index]
n, err := strconv.Atoi(str[:len(str)-len(suf)])
if len(f) > 0 && strings.HasSuffix(printed, "(PC)") {
last := f[len(f)-1]
n, err := strconv.Atoi(last[:len(last)-len("(PC)")])
if err == nil {
f[index] = fmt.Sprintf("%d%s", seq+n, suf)
f[len(f)-1] = fmt.Sprintf("%d(PC)", seq+n)
}
}
@@ -197,7 +186,7 @@ Diff:
t.Errorf(format, args...)
ok = false
}
obj.Flushplist(ctxt, pList, nil)
obj.Flushplist(ctxt, pList, nil, "")
for p := top; p != nil; p = p.Link {
if p.As == obj.ATEXT {
@@ -283,9 +272,8 @@ var (
func testErrors(t *testing.T, goarch, file string, flags ...string) {
input := filepath.Join("testdata", file+".s")
architecture, ctxt := setArch(goarch)
architecture.Init(ctxt)
lexer := lex.NewLexer(input)
parser := NewParser(ctxt, architecture, lexer)
parser := NewParser(ctxt, architecture, lexer, false)
pList := new(obj.Plist)
var ok bool
ctxt.Bso = bufio.NewWriter(os.Stdout)
@@ -311,7 +299,7 @@ func testErrors(t *testing.T, goarch, file string, flags ...string) {
}
}
pList.Firstpc, ok = parser.Parse()
obj.Flushplist(ctxt, pList, nil)
obj.Flushplist(ctxt, pList, nil, "")
if ok && !failed {
t.Errorf("asm: %s had no errors", file)
}
@@ -378,10 +366,10 @@ func Test386EndToEnd(t *testing.T) {
}
func TestARMEndToEnd(t *testing.T) {
defer func(old int) { buildcfg.GOARM.Version = old }(buildcfg.GOARM.Version)
defer func(old int) { buildcfg.GOARM = old }(buildcfg.GOARM)
for _, goarm := range []int{5, 6, 7} {
t.Logf("GOARM=%d", goarm)
buildcfg.GOARM.Version = goarm
buildcfg.GOARM = goarm
testEndToEnd(t, "arm", "arm")
if goarm == 6 {
testEndToEnd(t, "arm", "armv6")

View File

@@ -57,7 +57,7 @@ var exprTests = []exprTest{
}
func TestExpr(t *testing.T) {
p := NewParser(nil, nil, nil) // Expression evaluation uses none of these fields of the parser.
p := NewParser(nil, nil, nil, false) // Expression evaluation uses none of these fields of the parser.
for i, test := range exprTests {
p.start(lex.Tokenize(test.input))
result := int64(p.expr())
@@ -113,7 +113,7 @@ func TestBadExpr(t *testing.T) {
}
func runBadTest(i int, test badExprTest, t *testing.T) (err error) {
p := NewParser(nil, nil, nil) // Expression evaluation uses none of these fields of the parser.
p := NewParser(nil, nil, nil, false) // Expression evaluation uses none of these fields of the parser.
p.start(lex.Tokenize(test.input))
return tryParse(t, func() {
p.expr()

View File

@@ -39,7 +39,7 @@ func testBadInstParser(t *testing.T, goarch string, tests []badInstTest) {
for i, test := range tests {
arch, ctxt := setArch(goarch)
tokenizer := lex.NewTokenizer("", strings.NewReader(test.input+"\n"), nil)
parser := NewParser(ctxt, arch, tokenizer)
parser := NewParser(ctxt, arch, tokenizer, false)
err := tryParse(t, func() {
parser.Parse()

View File

@@ -23,14 +23,12 @@ func setArch(goarch string) (*arch.Arch, *obj.Link) {
if architecture == nil {
panic("asm: unrecognized architecture " + goarch)
}
ctxt := obj.Linknew(architecture.LinkArch)
ctxt.Pkgpath = "pkg"
return architecture, ctxt
return architecture, obj.Linknew(architecture.LinkArch)
}
func newParser(goarch string) *Parser {
architecture, ctxt := setArch(goarch)
return NewParser(ctxt, architecture, nil)
return NewParser(ctxt, architecture, nil, false)
}
// tryParse executes parse func in panicOnError=true context.
@@ -78,7 +76,7 @@ func testOperandParser(t *testing.T, parser *Parser, tests []operandTest) {
addr := obj.Addr{}
parser.operand(&addr)
var result string
if parser.allowABI {
if parser.compilingRuntime {
result = obj.DconvWithABIDetail(&emptyProg, &addr)
} else {
result = obj.Dconv(&emptyProg, &addr)
@@ -93,7 +91,7 @@ func TestAMD64OperandParser(t *testing.T) {
parser := newParser("amd64")
testOperandParser(t, parser, amd64OperandTests)
testBadOperandParser(t, parser, amd64BadOperandTests)
parser.allowABI = true
parser.compilingRuntime = true
testOperandParser(t, parser, amd64RuntimeOperandTests)
testBadOperandParser(t, parser, amd64BadOperandRuntimeTests)
}
@@ -306,8 +304,8 @@ var amd64OperandTests = []operandTest{
{"x·y+8(SB)", "x.y+8(SB)"},
{"x·y+8(SP)", "x.y+8(SP)"},
{"y+56(FP)", "y+56(FP)"},
{"·AddUint32(SB)", "pkg.AddUint32(SB)"},
{"·callReflect(SB)", "pkg.callReflect(SB)"},
{"·AddUint32(SB)", "\"\".AddUint32(SB)"},
{"·callReflect(SB)", "\"\".callReflect(SB)"},
{"[X0-X0]", "[X0-X0]"},
{"[ Z9 - Z12 ]", "[Z9-Z12]"},
{"[X0-AX]", "[X0-AX]"},
@@ -393,8 +391,8 @@ var x86OperandTests = []operandTest{
{"sec+4(FP)", "sec+4(FP)"},
{"shifts<>(SB)(CX*8)", "shifts<>(SB)(CX*8)"},
{"x+4(FP)", "x+4(FP)"},
{"·AddUint32(SB)", "pkg.AddUint32(SB)"},
{"·reflectcall(SB)", "pkg.reflectcall(SB)"},
{"·AddUint32(SB)", "\"\".AddUint32(SB)"},
{"·reflectcall(SB)", "\"\".reflectcall(SB)"},
{"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms.
}
@@ -442,7 +440,7 @@ var armOperandTests = []operandTest{
{"gosave<>(SB)", "gosave<>(SB)"},
{"retlo+12(FP)", "retlo+12(FP)"},
{"runtime·gogo(SB)", "runtime.gogo(SB)"},
{"·AddUint32(SB)", "pkg.AddUint32(SB)"},
{"·AddUint32(SB)", "\"\".AddUint32(SB)"},
{"(R1, R3)", "(R1, R3)"},
{"[R0,R1,g,R15", ""}, // Issue 11764 - asm hung parsing ']' missing register lists.
{"[):[o-FP", ""}, // Issue 12469 - there was no infinite loop for ARM; these are just sanity checks.
@@ -631,8 +629,8 @@ var ppc64OperandTests = []operandTest{
{"g", "g"},
{"ret+8(FP)", "ret+8(FP)"},
{"runtime·abort(SB)", "runtime.abort(SB)"},
{"·AddUint32(SB)", "pkg.AddUint32(SB)"},
{"·trunc(SB)", "pkg.trunc(SB)"},
{"·AddUint32(SB)", "\"\".AddUint32(SB)"},
{"·trunc(SB)", "\"\".trunc(SB)"},
{"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms.
}
@@ -664,7 +662,7 @@ var arm64OperandTests = []operandTest{
{"$(8-1)", "$7"},
{"a+0(FP)", "a(FP)"},
{"a1+8(FP)", "a1+8(FP)"},
{"·AddInt32(SB)", `pkg.AddInt32(SB)`},
{"·AddInt32(SB)", `"".AddInt32(SB)`},
{"runtime·divWVW(SB)", "runtime.divWVW(SB)"},
{"$argframe+0(FP)", "$argframe(FP)"},
{"$asmcgocall<>(SB)", "$asmcgocall<>(SB)"},
@@ -765,8 +763,8 @@ var mips64OperandTests = []operandTest{
{"RSB", "R28"},
{"ret+8(FP)", "ret+8(FP)"},
{"runtime·abort(SB)", "runtime.abort(SB)"},
{"·AddUint32(SB)", "pkg.AddUint32(SB)"},
{"·trunc(SB)", "pkg.trunc(SB)"},
{"·AddUint32(SB)", "\"\".AddUint32(SB)"},
{"·trunc(SB)", "\"\".trunc(SB)"},
{"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms.
}
@@ -848,8 +846,8 @@ var mipsOperandTests = []operandTest{
{"g", "g"},
{"ret+8(FP)", "ret+8(FP)"},
{"runtime·abort(SB)", "runtime.abort(SB)"},
{"·AddUint32(SB)", "pkg.AddUint32(SB)"},
{"·trunc(SB)", "pkg.trunc(SB)"},
{"·AddUint32(SB)", "\"\".AddUint32(SB)"},
{"·trunc(SB)", "\"\".trunc(SB)"},
{"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms.
}
@@ -930,8 +928,8 @@ var loong64OperandTests = []operandTest{
{"g", "g"},
{"ret+8(FP)", "ret+8(FP)"},
{"runtime·abort(SB)", "runtime.abort(SB)"},
{"·AddUint32(SB)", "pkg.AddUint32(SB)"},
{"·trunc(SB)", "pkg.trunc(SB)"},
{"·AddUint32(SB)", "\"\".AddUint32(SB)"},
{"·trunc(SB)", "\"\".trunc(SB)"},
{"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms.
}
@@ -1028,7 +1026,7 @@ var s390xOperandTests = []operandTest{
{"g", "g"},
{"ret+8(FP)", "ret+8(FP)"},
{"runtime·abort(SB)", "runtime.abort(SB)"},
{"·AddUint32(SB)", "pkg.AddUint32(SB)"},
{"·trunc(SB)", "pkg.trunc(SB)"},
{"·AddUint32(SB)", "\"\".AddUint32(SB)"},
{"·trunc(SB)", "\"\".trunc(SB)"},
{"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms.
}

View File

@@ -12,7 +12,6 @@ import (
"log"
"os"
"strconv"
"strings"
"text/scanner"
"unicode/utf8"
@@ -22,33 +21,31 @@ import (
"cmd/internal/obj"
"cmd/internal/obj/arm64"
"cmd/internal/obj/x86"
"cmd/internal/objabi"
"cmd/internal/src"
"cmd/internal/sys"
)
type Parser struct {
lex lex.TokenReader
lineNum int // Line number in source file.
errorLine int // Line number of last error.
errorCount int // Number of errors.
sawCode bool // saw code in this file (as opposed to comments and blank lines)
pc int64 // virtual PC; count of Progs; doesn't advance for GLOBL or DATA.
input []lex.Token
inputPos int
pendingLabels []string // Labels to attach to next instruction.
labels map[string]*obj.Prog
toPatch []Patch
addr []obj.Addr
arch *arch.Arch
ctxt *obj.Link
firstProg *obj.Prog
lastProg *obj.Prog
dataAddr map[string]int64 // Most recent address for DATA for this symbol.
isJump bool // Instruction being assembled is a jump.
allowABI bool // Whether ABI selectors are allowed.
pkgPrefix string // Prefix to add to local symbols.
errorWriter io.Writer
lex lex.TokenReader
lineNum int // Line number in source file.
errorLine int // Line number of last error.
errorCount int // Number of errors.
sawCode bool // saw code in this file (as opposed to comments and blank lines)
pc int64 // virtual PC; count of Progs; doesn't advance for GLOBL or DATA.
input []lex.Token
inputPos int
pendingLabels []string // Labels to attach to next instruction.
labels map[string]*obj.Prog
toPatch []Patch
addr []obj.Addr
arch *arch.Arch
ctxt *obj.Link
firstProg *obj.Prog
lastProg *obj.Prog
dataAddr map[string]int64 // Most recent address for DATA for this symbol.
isJump bool // Instruction being assembled is a jump.
compilingRuntime bool
errorWriter io.Writer
}
type Patch struct {
@@ -56,20 +53,15 @@ type Patch struct {
label string
}
func NewParser(ctxt *obj.Link, ar *arch.Arch, lexer lex.TokenReader) *Parser {
pkgPrefix := obj.UnlinkablePkg
if ctxt != nil {
pkgPrefix = objabi.PathToPrefix(ctxt.Pkgpath)
}
func NewParser(ctxt *obj.Link, ar *arch.Arch, lexer lex.TokenReader, compilingRuntime bool) *Parser {
return &Parser{
ctxt: ctxt,
arch: ar,
lex: lexer,
labels: make(map[string]*obj.Prog),
dataAddr: make(map[string]int64),
errorWriter: os.Stderr,
allowABI: ctxt != nil && objabi.LookupPkgSpecial(ctxt.Pkgpath).AllowAsmABI,
pkgPrefix: pkgPrefix,
ctxt: ctxt,
arch: ar,
lex: lexer,
labels: make(map[string]*obj.Prog),
dataAddr: make(map[string]int64),
errorWriter: os.Stderr,
compilingRuntime: compilingRuntime,
}
}
@@ -217,8 +209,8 @@ next:
for {
tok = p.nextToken()
if len(operands) == 0 && len(items) == 0 {
if p.arch.InFamily(sys.ARM, sys.ARM64, sys.AMD64, sys.I386, sys.RISCV64) && tok == '.' {
// Suffixes: ARM conditionals, RISCV rounding mode or x86 modifiers.
if p.arch.InFamily(sys.ARM, sys.ARM64, sys.AMD64, sys.I386) && tok == '.' {
// Suffixes: ARM conditionals or x86 modifiers.
tok = p.nextToken()
str := p.lex.Text()
if tok != scanner.Ident {
@@ -409,7 +401,7 @@ func (p *Parser) operand(a *obj.Addr) {
fallthrough
default:
// We have a symbol. Parse $sym±offset(symkind)
p.symbolReference(a, p.qualifySymbol(name), prefix)
p.symbolReference(a, name, prefix)
}
// fmt.Printf("SYM %s\n", obj.Dconv(&emptyProg, 0, a))
if p.peek() == scanner.EOF {
@@ -777,16 +769,6 @@ func (p *Parser) registerExtension(a *obj.Addr, name string, prefix rune) {
}
}
// qualifySymbol returns name as a package-qualified symbol name. If
// name starts with a period, qualifySymbol prepends the package
// prefix. Otherwise it returns name unchanged.
func (p *Parser) qualifySymbol(name string) string {
if strings.HasPrefix(name, ".") {
name = p.pkgPrefix + name
}
return name
}
// symbolReference parses a symbol that is known not to be a register.
func (p *Parser) symbolReference(a *obj.Addr, name string, prefix rune) {
// Identifier is a name.
@@ -882,7 +864,7 @@ func (p *Parser) symRefAttrs(name string, issueError bool) (bool, obj.ABI) {
isStatic = true
} else if tok == scanner.Ident {
abistr := p.get(scanner.Ident).String()
if !p.allowABI {
if !p.compilingRuntime {
if issueError {
p.errorf("ABI selector only permitted when compiling runtime, reference was to %q", name)
}
@@ -919,7 +901,6 @@ func (p *Parser) funcAddress() (string, obj.ABI, bool) {
if tok.ScanToken != scanner.Ident || p.atStartOfRegister(name) {
return "", obj.ABI0, false
}
name = p.qualifySymbol(name)
// Parse optional <> (indicates a static symbol) or
// <ABIxxx> (selecting text symbol with specific ABI).
noErrMsg := false
@@ -947,7 +928,7 @@ func (p *Parser) funcAddress() (string, obj.ABI, bool) {
}
// registerIndirect parses the general form of a register indirection.
// It can be (R1), (R2*scale), (R1)(R2*scale), (R1)(R2.SXTX<<3) or (R1)(R2<<3)
// It is can be (R1), (R2*scale), (R1)(R2*scale), (R1)(R2.SXTX<<3) or (R1)(R2<<3)
// where R1 may be a simple register or register pair R:R or (R, R) or (R+R).
// Or it might be a pseudo-indirection like (FP).
// We are sitting on the opening parenthesis.
@@ -1205,7 +1186,7 @@ func (p *Parser) registerListX86(a *obj.Addr) {
a.Offset = x86.EncodeRegisterRange(lo, hi)
}
// registerNumber is ARM-specific. It returns the number of the specified register.
// register number is ARM-specific. It returns the number of the specified register.
func (p *Parser) registerNumber(name string) uint16 {
if p.arch.Family == sys.ARM && name == "g" {
return 10

View File

@@ -64,16 +64,16 @@ func TestErroneous(t *testing.T) {
}
testcats := []struct {
allowABI bool
tests []errtest
compilingRuntime bool
tests []errtest
}{
{
allowABI: false,
tests: nonRuntimeTests,
compilingRuntime: false,
tests: nonRuntimeTests,
},
{
allowABI: true,
tests: runtimeTests,
compilingRuntime: true,
tests: runtimeTests,
},
}
@@ -85,7 +85,7 @@ func TestErroneous(t *testing.T) {
for _, cat := range testcats {
for _, test := range cat.tests {
parser.allowABI = cat.allowABI
parser.compilingRuntime = cat.compilingRuntime
parser.errorCount = 0
parser.lineNum++
if !parser.pseudo(test.pseudo, tokenize(test.operands)) {

Some files were not shown because too many files have changed in this diff Show More