mirror of
https://github.com/golang/go.git
synced 2026-01-30 23:52:05 +03:00
Compare commits
2 Commits
go1.24.1
...
dev.inline
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
71aaa8bde1 | ||
|
|
3aba453b66 |
45
.github/ISSUE_TEMPLATE/00-bug.md
vendored
Normal file
45
.github/ISSUE_TEMPLATE/00-bug.md
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
---
|
||||
name: Bugs
|
||||
about: The go command, standard library, or anything else
|
||||
title: "affected/package: "
|
||||
---
|
||||
|
||||
<!--
|
||||
Please answer these questions before submitting your issue. Thanks!
|
||||
-->
|
||||
|
||||
### What version of Go are you using (`go version`)?
|
||||
|
||||
<pre>
|
||||
$ go version
|
||||
|
||||
</pre>
|
||||
|
||||
### Does this issue reproduce with the latest release?
|
||||
|
||||
|
||||
|
||||
### What operating system and processor architecture are you using (`go env`)?
|
||||
|
||||
<details><summary><code>go env</code> Output</summary><br><pre>
|
||||
$ go env
|
||||
|
||||
</pre></details>
|
||||
|
||||
### What did you do?
|
||||
|
||||
<!--
|
||||
If possible, provide a recipe for reproducing the error.
|
||||
A complete runnable program is good.
|
||||
A link on go.dev/play is best.
|
||||
-->
|
||||
|
||||
|
||||
|
||||
### What did you expect to see?
|
||||
|
||||
|
||||
|
||||
### What did you see instead?
|
||||
|
||||
|
||||
94
.github/ISSUE_TEMPLATE/00-bug.yml
vendored
94
.github/ISSUE_TEMPLATE/00-bug.yml
vendored
@@ -1,94 +0,0 @@
|
||||
# https://docs.github.com/en/communities/using-templates-to-encourage-useful-issues-and-pull-requests/configuring-issue-templates-for-your-repository#creating-issue-forms
|
||||
# https://docs.github.com/en/communities/using-templates-to-encourage-useful-issues-and-pull-requests/syntax-for-githubs-form-schema
|
||||
name: Bugs
|
||||
description: The go command, standard library, or anything else
|
||||
title: "import/path: issue title"
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for helping us improve! 🙏 Please answer these questions and provide as much information as possible about your problem.
|
||||
|
||||
- type: input
|
||||
id: go-version
|
||||
attributes:
|
||||
label: Go version
|
||||
description: |
|
||||
What version of Go are you using (`go version`)?
|
||||
|
||||
Note: we only [support](https://go.dev/doc/devel/release#policy) the two most recent major releases.
|
||||
placeholder: ex. go version go1.20.7 darwin/arm64
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: go-env
|
||||
attributes:
|
||||
label: "Output of `go env` in your module/workspace:"
|
||||
placeholder: |
|
||||
GO111MODULE=""
|
||||
GOARCH="arm64"
|
||||
GOBIN="/Users/gopher/go/bin"
|
||||
GOCACHE="/Users/gopher/go/cache"
|
||||
GOENV="/Users/gopher/Library/Application Support/go/env"
|
||||
GOEXE=""
|
||||
GOEXPERIMENT=""
|
||||
GOFLAGS=""
|
||||
GOHOSTARCH="arm64"
|
||||
GOHOSTOS="darwin"
|
||||
GOINSECURE=""
|
||||
GOMODCACHE="/Users/gopher/go/pkg/mod"
|
||||
GONOPROXY=""
|
||||
GONOSUMDB=""
|
||||
GOOS="darwin"
|
||||
GOPATH="/Users/gopher/go"
|
||||
GOPRIVATE=""
|
||||
GOPROXY="https://proxy.golang.org,direct"
|
||||
GOROOT="/usr/local/go"
|
||||
GOSUMDB="sum.golang.org"
|
||||
GOTMPDIR=""
|
||||
GOTOOLDIR="/usr/local/go/pkg/tool/darwin_arm64"
|
||||
GOVCS=""
|
||||
GOVERSION="go1.20.7"
|
||||
GCCGO="gccgo"
|
||||
AR="ar"
|
||||
CC="clang"
|
||||
CXX="clang++"
|
||||
CGO_ENABLED="1"
|
||||
GOMOD="/dev/null"
|
||||
GOWORK=""
|
||||
CGO_CFLAGS="-O2 -g"
|
||||
CGO_CPPFLAGS=""
|
||||
CGO_CXXFLAGS="-O2 -g"
|
||||
CGO_FFLAGS="-O2 -g"
|
||||
CGO_LDFLAGS="-O2 -g"
|
||||
PKG_CONFIG="pkg-config"
|
||||
GOGCCFLAGS="-fPIC -arch arm64 -pthread -fno-caret-diagnostics -Qunused-arguments -fmessage-length=0 -fdebug-prefix-map=/var/folders/44/nbbyll_10jd0z8rj_qxm43740000gn/T/go-build2331607515=/tmp/go-build -gno-record-gcc-switches -fno-common"
|
||||
render: shell
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: what-did-you-do
|
||||
attributes:
|
||||
label: "What did you do?"
|
||||
description: "If possible, provide a recipe for reproducing the error. A complete runnable program is good. A link on [go.dev/play](https://go.dev/play) is best."
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: actual-behavior
|
||||
attributes:
|
||||
label: "What did you see happen?"
|
||||
description: Command invocations and their associated output, functions with their arguments and return results, full stacktraces for panics (upload a file if it is very long), etc. Prefer copying text output over using screenshots.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: expected-behavior
|
||||
attributes:
|
||||
label: "What did you expect to see?"
|
||||
description: Why is the current output incorrect, and any additional context we may need to understand the issue.
|
||||
validations:
|
||||
required: true
|
||||
49
.github/ISSUE_TEMPLATE/01-pkgsite.md
vendored
Normal file
49
.github/ISSUE_TEMPLATE/01-pkgsite.md
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
---
|
||||
name: Pkg.go.dev bugs or feature requests
|
||||
about: Issues or feature requests for the documentation site
|
||||
title: "x/pkgsite: "
|
||||
labels: pkgsite
|
||||
---
|
||||
|
||||
<!--
|
||||
Please answer these questions before submitting your issue. Thanks!
|
||||
-->
|
||||
|
||||
### What is the URL of the page with the issue?
|
||||
|
||||
|
||||
|
||||
### What is your user agent?
|
||||
|
||||
<!--
|
||||
You can find your user agent here:
|
||||
https://www.google.com/search?q=what+is+my+user+agent
|
||||
-->
|
||||
|
||||
|
||||
|
||||
### Screenshot
|
||||
|
||||
<!--
|
||||
Please paste a screenshot of the page.
|
||||
-->
|
||||
|
||||
|
||||
|
||||
### What did you do?
|
||||
|
||||
<!--
|
||||
If possible, provide a recipe for reproducing the error.
|
||||
|
||||
Starting with a Private/Incognito tab/window may help rule out problematic browser extensions.
|
||||
-->
|
||||
|
||||
|
||||
|
||||
### What did you expect to see?
|
||||
|
||||
|
||||
|
||||
### What did you see instead?
|
||||
|
||||
|
||||
47
.github/ISSUE_TEMPLATE/01-pkgsite.yml
vendored
47
.github/ISSUE_TEMPLATE/01-pkgsite.yml
vendored
@@ -1,47 +0,0 @@
|
||||
name: Pkg.go.dev bugs or feature requests
|
||||
description: Issues or feature requests for the documentation site
|
||||
title: "x/pkgsite: issue title"
|
||||
labels: ["pkgsite"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: "Please answer these questions before submitting your issue. Thanks!"
|
||||
- type: input
|
||||
id: url
|
||||
attributes:
|
||||
label: "What is the URL of the page with the issue?"
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: user-agent
|
||||
attributes:
|
||||
label: "What is your user agent?"
|
||||
description: "You can find your user agent here: https://www.google.com/search?q=what+is+my+user+agent"
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: screenshot
|
||||
attributes:
|
||||
label: "Screenshot"
|
||||
description: "Please paste a screenshot of the page."
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: what-did-you-do
|
||||
attributes:
|
||||
label: "What did you do?"
|
||||
description: "If possible, provide a recipe for reproducing the error. Starting with a Private/Incognito tab/window may help rule out problematic browser extensions."
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: actual-behavior
|
||||
attributes:
|
||||
label: "What did you see happen?"
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: expected-behavior
|
||||
attributes:
|
||||
label: "What did you expect to see?"
|
||||
validations:
|
||||
required: true
|
||||
39
.github/ISSUE_TEMPLATE/02-pkgsite-removal.md
vendored
Normal file
39
.github/ISSUE_TEMPLATE/02-pkgsite-removal.md
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
---
|
||||
name: Pkg.go.dev package removal request
|
||||
about: Request a package be removed from the documentation site (pkg.go.dev)
|
||||
title: "x/pkgsite: package removal request for [type path here]"
|
||||
labels: pkgsite/package-removal
|
||||
---
|
||||
|
||||
<!--
|
||||
Please answer these questions before submitting your issue. Thanks!
|
||||
-->
|
||||
|
||||
### What is the path of the package that you would like to have removed?
|
||||
|
||||
<!---
|
||||
We can remove packages with a shared path prefix.
|
||||
For example, a request for "github.com/author" would remove all pkg.go.dev pages with that package path prefix.
|
||||
--->
|
||||
|
||||
|
||||
|
||||
### Are you the owner of this package?
|
||||
|
||||
<!---
|
||||
Only the package owners can request to have their packages removed from pkg.go.dev.
|
||||
--->
|
||||
|
||||
|
||||
|
||||
### What is the reason that you could not retract this package instead?
|
||||
|
||||
<!---
|
||||
If you would like to have your module removed from pkg.go.dev, we recommend that you retract them, so that they can be removed from the go command and proxy.golang.org as well.
|
||||
|
||||
Retracting a module version involves adding a retract directive to your go.mod file and publishing a new version. For example: https://github.com/jba/retract-demo/blob/main/go.mod#L5-L8
|
||||
|
||||
See https://pkg.go.dev/about#removing-a-package for additional tips on retractions.
|
||||
--->
|
||||
|
||||
|
||||
42
.github/ISSUE_TEMPLATE/02-pkgsite-removal.yml
vendored
42
.github/ISSUE_TEMPLATE/02-pkgsite-removal.yml
vendored
@@ -1,42 +0,0 @@
|
||||
name: Pkg.go.dev package removal request
|
||||
description: Request a package be removed from the documentation site (pkg.go.dev)
|
||||
title: "x/pkgsite: package removal request for [type path here]"
|
||||
labels: ["pkgsite/package-removal"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: "Please answer these questions before submitting your issue. Thanks!"
|
||||
- type: input
|
||||
id: package-path
|
||||
attributes:
|
||||
label: "What is the path of the package that you would like to have removed?"
|
||||
description: |
|
||||
We can remove packages with a shared path prefix.
|
||||
For example, a request for 'github.com/author' would remove all pkg.go.dev pages with that package path prefix.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: package-owner
|
||||
attributes:
|
||||
label: "Are you the owner of this package?"
|
||||
description: |
|
||||
Only the package owners can request to have their packages removed from pkg.go.dev.
|
||||
If the package path doesn't include your github username, please provide some other form of proof of ownership.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: retraction-reason
|
||||
attributes:
|
||||
label: "What is the reason that you could not retract this package instead?"
|
||||
description: |
|
||||
Requesting we remove a module here only hides the generated documentation on pkg.go.dev.
|
||||
It does not affect the behaviour of proxy.golang.org or the go command.
|
||||
Instead we recommend using the retract directive which will be processed by all 3 of the above.
|
||||
|
||||
If you have deleted your repo, please recreate it and publish a retraction.
|
||||
|
||||
Retracting a module version involves adding a retract directive to your go.mod file and publishing a new version.
|
||||
For example: https://github.com/jba/retract-demo/blob/main/go.mod#L5-L8.
|
||||
See https://pkg.go.dev/about#removing-a-package for additional tips on retractions.
|
||||
validations:
|
||||
required: true
|
||||
61
.github/ISSUE_TEMPLATE/03-gopls.md
vendored
Normal file
61
.github/ISSUE_TEMPLATE/03-gopls.md
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
---
|
||||
name: Gopls bugs or feature requests
|
||||
about: Issues or feature requests for the Go language server (gopls)
|
||||
title: "x/tools/gopls: "
|
||||
labels: gopls Tools
|
||||
---
|
||||
|
||||
<!--
|
||||
Please answer these questions before submitting your issue. Thanks!
|
||||
-->
|
||||
|
||||
### gopls version
|
||||
|
||||
<!--
|
||||
Output of `gopls -v version` on the command line
|
||||
-->
|
||||
|
||||
|
||||
|
||||
### go env
|
||||
|
||||
<!--
|
||||
Output of `go env` on the command line in your workspace directory
|
||||
-->
|
||||
|
||||
|
||||
### What did you do?
|
||||
|
||||
<!--
|
||||
If possible, provide a recipe for reproducing the error.
|
||||
A complete runnable program is good.
|
||||
A link on go.dev/play is better.
|
||||
A failing unit test is the best.
|
||||
-->
|
||||
|
||||
|
||||
|
||||
### What did you expect to see?
|
||||
|
||||
|
||||
|
||||
### What did you see instead?
|
||||
|
||||
|
||||
|
||||
### Editor and settings
|
||||
|
||||
<!--
|
||||
Your editor and any settings you have configured (for example, your VSCode settings.json file)
|
||||
-->
|
||||
|
||||
|
||||
|
||||
### Logs
|
||||
|
||||
<!--
|
||||
If possible please include gopls logs. Instructions for capturing them can be found here:
|
||||
https://github.com/golang/tools/blob/master/gopls/doc/troubleshooting.md#capture-logs
|
||||
-->
|
||||
|
||||
|
||||
56
.github/ISSUE_TEMPLATE/03-gopls.yml
vendored
56
.github/ISSUE_TEMPLATE/03-gopls.yml
vendored
@@ -1,56 +0,0 @@
|
||||
name: Gopls bugs or feature requests
|
||||
description: Issues or feature requests for the Go language server (gopls)
|
||||
title: "x/tools/gopls: issue title"
|
||||
labels: ["gopls", "Tools"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: "Please answer these questions before submitting your issue. Thanks!"
|
||||
- type: textarea
|
||||
id: gopls-version
|
||||
attributes:
|
||||
label: "gopls version"
|
||||
description: "Output of `gopls -v version` on the command line"
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: go-env
|
||||
attributes:
|
||||
label: "go env"
|
||||
description: "Output of `go env` on the command line in your workspace directory"
|
||||
render: shell
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: what-did-you-do
|
||||
attributes:
|
||||
label: "What did you do?"
|
||||
description: "If possible, provide a recipe for reproducing the error. A complete runnable program is good. A link on [go.dev/play](https://go.dev/play) is better. A failing unit test is the best."
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: actual-behavior
|
||||
attributes:
|
||||
label: "What did you see happen?"
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: expected-behavior
|
||||
attributes:
|
||||
label: "What did you expect to see?"
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: editor-and-settings
|
||||
attributes:
|
||||
label: "Editor and settings"
|
||||
description: "Your editor and any settings you have configured (for example, your VSCode settings.json file)"
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: "Logs"
|
||||
description: "If possible please include gopls logs. Instructions for capturing them can be found here: https://github.com/golang/tools/blob/master/gopls/doc/troubleshooting.md#capture-logs"
|
||||
validations:
|
||||
required: false
|
||||
51
.github/ISSUE_TEMPLATE/04-vuln.md
vendored
Normal file
51
.github/ISSUE_TEMPLATE/04-vuln.md
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
---
|
||||
name: Go vulnerability management - bugs and feature requests
|
||||
about: Issues or feature requests about Go vulnerability management
|
||||
title: "x/vuln: "
|
||||
labels: "vulncheck or vulndb"
|
||||
---
|
||||
|
||||
<!--
|
||||
Please answer these questions before submitting your issue. Thanks!
|
||||
|
||||
To add a new vulnerability to the Go vulnerability database
|
||||
(https://vuln.go.dev), see https://go.dev/s/vulndb-report-new.
|
||||
|
||||
To report an issue about a report, see https://go.dev/s/vulndb-report-feedback.
|
||||
-->
|
||||
|
||||
### What version of Go are you using (`go version`)?
|
||||
|
||||
<pre>
|
||||
$ go version
|
||||
|
||||
</pre>
|
||||
|
||||
### Does this issue reproduce at the latest version of golang.org/x/vuln?
|
||||
|
||||
|
||||
|
||||
### What operating system and processor architecture are you using (`go env`)?
|
||||
|
||||
<details><summary><code>go env</code> Output</summary><br><pre>
|
||||
$ go env
|
||||
|
||||
</pre></details>
|
||||
|
||||
### What did you do?
|
||||
|
||||
<!--
|
||||
If possible, provide a recipe for reproducing the error.
|
||||
A complete runnable program is good.
|
||||
A link on go.dev/play is best.
|
||||
-->
|
||||
|
||||
|
||||
|
||||
### What did you expect to see?
|
||||
|
||||
|
||||
|
||||
### What did you see instead?
|
||||
|
||||
|
||||
52
.github/ISSUE_TEMPLATE/04-vuln.yml
vendored
52
.github/ISSUE_TEMPLATE/04-vuln.yml
vendored
@@ -1,52 +0,0 @@
|
||||
name: Go vulnerability management - bugs and feature requests
|
||||
description: Issues or feature requests about Go vulnerability management
|
||||
title: "x/vuln: issue title"
|
||||
labels: ["vulncheck or vulndb"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: "Please answer these questions before submitting your issue. Thanks! To add a new vulnerability to the Go vulnerability database (https://vuln.go.dev), see https://go.dev/s/vulndb-report-new. To report an issue about a report, see https://go.dev/s/vulndb-report-feedback."
|
||||
- type: textarea
|
||||
id: govulncheck-version
|
||||
attributes:
|
||||
label: govulncheck version
|
||||
description: What version of govulncheck are you using (`govulncheck -version`)?
|
||||
placeholder: |
|
||||
Go: devel go1.22-0262ea1ff9 Thu Oct 26 18:46:50 2023 +0000
|
||||
Scanner: govulncheck@v1.0.2-0.20231108200754-fcf7dff7b242
|
||||
DB: https://vuln.go.dev
|
||||
DB updated: 2023-11-21 15:39:17 +0000 UTC
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: reproduce-latest-version
|
||||
attributes:
|
||||
label: "Does this issue reproduce at the latest version of golang.org/x/vuln?"
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: go-env
|
||||
attributes:
|
||||
label: "Output of `go env` in your module/workspace:"
|
||||
render: shell
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: what-did-you-do
|
||||
attributes:
|
||||
label: "What did you do?"
|
||||
description: "If possible, provide a recipe for reproducing the error. A complete runnable program is good. A link on [go.dev/play](https://go.dev/play) is best."
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: actual-behavior
|
||||
attributes:
|
||||
label: "What did you see happen?"
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: expected-behavior
|
||||
attributes:
|
||||
label: "What did you expect to see?"
|
||||
validations:
|
||||
required: true
|
||||
13
.github/ISSUE_TEMPLATE/10-proposal.md
vendored
Normal file
13
.github/ISSUE_TEMPLATE/10-proposal.md
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
name: Proposals
|
||||
about: New external API or other notable changes
|
||||
title: "proposal: affected/package: "
|
||||
labels: Proposal
|
||||
---
|
||||
|
||||
<!--
|
||||
Our proposal process is documented here:
|
||||
https://go.dev/s/proposal-process
|
||||
-->
|
||||
|
||||
|
||||
15
.github/ISSUE_TEMPLATE/10-proposal.yml
vendored
15
.github/ISSUE_TEMPLATE/10-proposal.yml
vendored
@@ -1,15 +0,0 @@
|
||||
name: Proposals
|
||||
description: New external API or other notable changes
|
||||
title: "proposal: import/path: proposal title"
|
||||
labels: ["Proposal"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: "Our proposal process is documented here: https://go.dev/s/proposal-process"
|
||||
- type: textarea
|
||||
id: proposal-details
|
||||
attributes:
|
||||
label: "Proposal Details"
|
||||
description: "Please provide the details of your proposal here."
|
||||
validations:
|
||||
required: true
|
||||
52
.github/ISSUE_TEMPLATE/11-language-change.md
vendored
Normal file
52
.github/ISSUE_TEMPLATE/11-language-change.md
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
---
|
||||
name: Language Change Proposals
|
||||
about: Changes to the language
|
||||
title: "proposal: Go 2: "
|
||||
labels: Proposal Go2 LanguageChange
|
||||
---
|
||||
|
||||
<!--
|
||||
Our process for evaluating language changes can be found here:
|
||||
https://go.googlesource.com/proposal/+/refs/heads/master#language-changes
|
||||
-->
|
||||
|
||||
### Author background
|
||||
|
||||
- **Would you consider yourself a novice, intermediate, or experienced Go programmer?**
|
||||
- **What other languages do you have experience with?**
|
||||
|
||||
### Related proposals
|
||||
|
||||
- **Has this idea, or one like it, been proposed before?**
|
||||
- **If so, how does this proposal differ?**
|
||||
- **Does this affect error handling?**
|
||||
- **If so, how does this differ from previous error handling proposals?**
|
||||
- **Is this about generics?**
|
||||
- **If so, how does this relate to the accepted design and other generics proposals?**
|
||||
|
||||
### Proposal
|
||||
|
||||
- **What is the proposed change?**
|
||||
- **Who does this proposal help, and why?**
|
||||
- **Please describe as precisely as possible the change to the language.**
|
||||
- **What would change in the language spec?**
|
||||
- **Please also describe the change informally, as in a class teaching Go.**
|
||||
- **Is this change backward compatible?**
|
||||
- Breaking the Go 1 compatibility guarantee is a large cost and requires a large benefit.
|
||||
Show example code before and after the change.
|
||||
- **Before**
|
||||
- **After**
|
||||
- **Orthogonality: how does this change interact or overlap with existing features?**
|
||||
- **Is the goal of this change a performance improvement?**
|
||||
- **If so, what quantifiable improvement should we expect?**
|
||||
- **How would we measure it?**
|
||||
|
||||
### Costs
|
||||
|
||||
- **Would this change make Go easier or harder to learn, and why?**
|
||||
- **What is the cost of this proposal? (Every language change has a cost).**
|
||||
- **How many tools (such as vet, gopls, gofmt, goimports, etc.) would be affected?**
|
||||
- **What is the compile time cost?**
|
||||
- **What is the run time cost?**
|
||||
- **Can you describe a possible implementation?**
|
||||
- **Do you have a prototype? (This is not required.)**
|
||||
165
.github/ISSUE_TEMPLATE/11-language-change.yml
vendored
165
.github/ISSUE_TEMPLATE/11-language-change.yml
vendored
@@ -1,165 +0,0 @@
|
||||
name: Language Change Proposals
|
||||
description: Changes to the language
|
||||
labels: ["Proposal", "LanguageChange", "LanguageChangeReview"]
|
||||
title: "proposal: spec: proposal title"
|
||||
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
## Our process for evaluating language changes can be found [here](https://go.googlesource.com/proposal/+/refs/heads/master#language-changes)
|
||||
|
||||
- type: dropdown
|
||||
id: author-go-experience
|
||||
attributes:
|
||||
label: "Go Programming Experience"
|
||||
description: "Would you consider yourself a novice, intermediate, or experienced Go programmer?"
|
||||
options:
|
||||
- "Novice"
|
||||
- "Intermediate"
|
||||
- "Experienced"
|
||||
default: 1
|
||||
|
||||
- type: input
|
||||
id: author-other-languages-experience
|
||||
attributes:
|
||||
label: "Other Languages Experience"
|
||||
description: "What other languages do you have experience with?"
|
||||
placeholder: "Go, Python, JS, Rust"
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: checkboxes
|
||||
id: related-idea
|
||||
attributes:
|
||||
label: "Related Idea"
|
||||
options:
|
||||
- label: "Has this idea, or one like it, been proposed before?"
|
||||
- label: "Does this affect error handling?"
|
||||
- label: "Is this about generics?"
|
||||
- label: "Is this change backward compatible? Breaking the Go 1 compatibility guarantee is a large cost and requires a large benefit"
|
||||
|
||||
- type: textarea
|
||||
id: related-proposals
|
||||
attributes:
|
||||
label: Has this idea, or one like it, been proposed before?
|
||||
description: If so, how does this proposal differ?
|
||||
placeholder: |
|
||||
Yes or No
|
||||
|
||||
If yes,
|
||||
1. Mention the related proposals
|
||||
2. then describe how this proposal differs
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: error-handling-proposal
|
||||
attributes:
|
||||
label: Does this affect error handling?
|
||||
description: If so, how does this differ from previous error handling proposals?
|
||||
placeholder: |
|
||||
Yes or No
|
||||
|
||||
If yes,
|
||||
1.how does this differ from previous error handling proposals?
|
||||
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: generics-proposal
|
||||
attributes:
|
||||
label: Is this about generics?
|
||||
description: If so, how does this relate to the accepted design and other generics proposals?
|
||||
placeholder: |
|
||||
Yes or No
|
||||
|
||||
If yes,
|
||||
1. how does this relate to the accepted design and other generics proposals?
|
||||
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: proposal
|
||||
attributes:
|
||||
label: "Proposal"
|
||||
description: "What is the proposed change? Who does this proposal help, and why? Please describe as precisely as possible the change to the language."
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: language-spec-changes
|
||||
attributes:
|
||||
label: "Language Spec Changes"
|
||||
description: "What would change in the language spec?"
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
id: informal-change
|
||||
attributes:
|
||||
label: "Informal Change"
|
||||
description: "Please also describe the change informally, as in a class teaching Go."
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
id: go-backwards-compatiblity
|
||||
attributes:
|
||||
label: Is this change backward compatible?
|
||||
description: Breaking the Go 1 compatibility guarantee is a large cost and requires a large benefit.
|
||||
placeholder: |
|
||||
Yes or No
|
||||
|
||||
If yes,
|
||||
1. Show example code before and after the change.
|
||||
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: orthogonality
|
||||
attributes:
|
||||
label: "Orthogonality: How does this change interact or overlap with existing features?"
|
||||
description: "Is the goal of this change a performance improvement? If so, what quantifiable improvement should we expect? How would we measure it?"
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
id: learning-curve
|
||||
attributes:
|
||||
label: "Would this change make Go easier or harder to learn, and why?"
|
||||
|
||||
- type: textarea
|
||||
id: cost-description
|
||||
attributes:
|
||||
label: "Cost Description"
|
||||
description: "What is the cost of this proposal? (Every language change has a cost)"
|
||||
|
||||
- type: input
|
||||
id: go-toolchain
|
||||
attributes:
|
||||
label: Changes to Go ToolChain
|
||||
description: "How many tools (such as vet, gopls, gofmt, goimports, etc.) would be affected? "
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: input
|
||||
id: perf-costs
|
||||
attributes:
|
||||
label: Performance Costs
|
||||
description: "What is the compile time cost? What is the run time cost? "
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
id: prototype
|
||||
attributes:
|
||||
label: "Prototype"
|
||||
description: "Can you describe a possible implementation?"
|
||||
validations:
|
||||
required: false
|
||||
|
||||
30
.github/ISSUE_TEMPLATE/12-telemetry.yml
vendored
30
.github/ISSUE_TEMPLATE/12-telemetry.yml
vendored
@@ -1,30 +0,0 @@
|
||||
name: Go Telemetry Proposals
|
||||
description: Changes to the telemetry upload configuration
|
||||
title: "x/telemetry/config: proposal title"
|
||||
labels: ["Telemetry-Proposal"]
|
||||
projects: ["golang/29"]
|
||||
body:
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Summary
|
||||
description: >
|
||||
What change are you proposing to the upload configuration, and why?
|
||||
For new upload configuration, which new counters will be collected, what
|
||||
do they measure, and why is it important to collect them?
|
||||
Note that uploaded data must not carry sensitive user information.
|
||||
See [go.dev/doc/telemetry#proposals](https://go.dev/doc/telemetry#proposals)
|
||||
for more details on telemetry proposals.
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: Proposed Config Change
|
||||
description: >
|
||||
A CL containing proposed changes to the
|
||||
[config.txt](https://go.googlesource.com/telemetry/+/master/internal/chartconfig/config.txt)
|
||||
chart configuration.
|
||||
See the [chartconfig](https://pkg.go.dev/golang.org/x/telemetry/internal/chartconfig)
|
||||
package for an explanation of the chart config format.
|
||||
For an example change, see [CL 564619](https://go.dev/cl/564619).
|
||||
validations:
|
||||
required: true
|
||||
2
.github/ISSUE_TEMPLATE/config.yml
vendored
2
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,4 +1,4 @@
|
||||
blank_issues_enabled: true
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Questions
|
||||
about: Please use one of the forums for questions or general discussions
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -37,7 +37,7 @@ _testmain.go
|
||||
/src/go/build/zcgo.go
|
||||
/src/go/doc/headscan
|
||||
/src/internal/buildcfg/zbootstrap.go
|
||||
/src/internal/runtime/sys/zversion.go
|
||||
/src/runtime/internal/sys/zversion.go
|
||||
/src/unicode/maketables
|
||||
/src/time/tzdata/zzipdata.go
|
||||
/test.out
|
||||
|
||||
4
LICENSE
4
LICENSE
@@ -1,4 +1,4 @@
|
||||
Copyright 2009 The Go Authors.
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google LLC nor the names of its
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ Go is an open source programming language that makes it easy to build simple,
|
||||
reliable, and efficient software.
|
||||
|
||||

|
||||
*Gopher image by [Renee French][rf], licensed under [Creative Commons 4.0 Attribution license][cc4-by].*
|
||||
*Gopher image by [Renee French][rf], licensed under [Creative Commons 4.0 Attributions license][cc4-by].*
|
||||
|
||||
Our canonical Git repository is located at https://go.googlesource.com/go.
|
||||
There is a mirror of the repository at https://github.com/golang/go.
|
||||
|
||||
@@ -10,4 +10,4 @@ part of that page.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
See https://go.dev/security/policy for how to report a vulnerability.
|
||||
See https://go.dev/security for how to report a vulnerability.
|
||||
|
||||
@@ -21,6 +21,3 @@ warning output from the go api tool. Each file should be named
|
||||
nnnnn.txt, after the issue number for the accepted proposal.
|
||||
(The #nnnnn suffix must also appear at the end of each line in the file;
|
||||
that will be preserved when next/*.txt is concatenated into go1.XX.txt.)
|
||||
|
||||
When you add a file to the api/next directory, you must add at least one file
|
||||
under doc/next. See doc/README.md for details.
|
||||
|
||||
@@ -598,7 +598,3 @@ pkg syscall (freebsd-arm64-cgo), const SYS_MKNODAT = 498
|
||||
pkg syscall (freebsd-arm64-cgo), const SYS_STAT = 188
|
||||
pkg syscall (freebsd-arm64-cgo), const SYS_STAT ideal-int
|
||||
pkg syscall (freebsd-arm64-cgo), const SYS_STATFS = 396
|
||||
pkg syscall (openbsd-386), const ELAST = 91
|
||||
pkg syscall (openbsd-386-cgo), const ELAST = 91
|
||||
pkg syscall (openbsd-amd64), const ELAST = 91
|
||||
pkg syscall (openbsd-amd64-cgo), const ELAST = 91
|
||||
|
||||
@@ -60,9 +60,7 @@ pkg crypto/tls, method (*QUICConn) Close() error #44886
|
||||
pkg crypto/tls, method (*QUICConn) ConnectionState() ConnectionState #44886
|
||||
pkg crypto/tls, method (*QUICConn) HandleData(QUICEncryptionLevel, []uint8) error #44886
|
||||
pkg crypto/tls, method (*QUICConn) NextEvent() QUICEvent #44886
|
||||
pkg crypto/tls, method (*QUICConn) SendSessionTicket(QUICSessionTicketOptions) error #60107
|
||||
pkg crypto/tls, type QUICSessionTicketOptions struct #60107
|
||||
pkg crypto/tls, type QUICSessionTicketOptions struct, EarlyData bool #60107
|
||||
pkg crypto/tls, method (*QUICConn) SendSessionTicket(bool) error #60107
|
||||
pkg crypto/tls, method (*QUICConn) SetTransportParameters([]uint8) #44886
|
||||
pkg crypto/tls, method (*QUICConn) Start(context.Context) error #44886
|
||||
pkg crypto/tls, method (QUICEncryptionLevel) String() string #44886
|
||||
@@ -167,12 +165,7 @@ pkg errors, var ErrUnsupported error #41198
|
||||
pkg flag, func BoolFunc(string, string, func(string) error) #53747
|
||||
pkg flag, method (*FlagSet) BoolFunc(string, string, func(string) error) #53747
|
||||
pkg go/ast, func IsGenerated(*File) bool #28089
|
||||
pkg go/ast, func NewPackage //deprecated #52463
|
||||
pkg go/ast, type File struct, GoVersion string #59033
|
||||
pkg go/ast, type Importer //deprecated #52463
|
||||
pkg go/ast, type Object //deprecated #52463
|
||||
pkg go/ast, type Package //deprecated #52463
|
||||
pkg go/ast, type Scope //deprecated #52463
|
||||
pkg go/build/constraint, func GoVersion(Expr) string #59033
|
||||
pkg go/build, type Directive struct #56986
|
||||
pkg go/build, type Directive struct, Pos token.Position #56986
|
||||
@@ -226,18 +219,18 @@ pkg log/slog, func Any(string, interface{}) Attr #56345
|
||||
pkg log/slog, func AnyValue(interface{}) Value #56345
|
||||
pkg log/slog, func Bool(string, bool) Attr #56345
|
||||
pkg log/slog, func BoolValue(bool) Value #56345
|
||||
pkg log/slog, func DebugContext(context.Context, string, ...interface{}) #61200
|
||||
pkg log/slog, func DebugCtx(context.Context, string, ...interface{}) #56345
|
||||
pkg log/slog, func Debug(string, ...interface{}) #56345
|
||||
pkg log/slog, func Default() *Logger #56345
|
||||
pkg log/slog, func Duration(string, time.Duration) Attr #56345
|
||||
pkg log/slog, func DurationValue(time.Duration) Value #56345
|
||||
pkg log/slog, func ErrorContext(context.Context, string, ...interface{}) #61200
|
||||
pkg log/slog, func ErrorCtx(context.Context, string, ...interface{}) #56345
|
||||
pkg log/slog, func Error(string, ...interface{}) #56345
|
||||
pkg log/slog, func Float64(string, float64) Attr #56345
|
||||
pkg log/slog, func Float64Value(float64) Value #56345
|
||||
pkg log/slog, func Group(string, ...interface{}) Attr #59204
|
||||
pkg log/slog, func GroupValue(...Attr) Value #56345
|
||||
pkg log/slog, func InfoContext(context.Context, string, ...interface{}) #61200
|
||||
pkg log/slog, func InfoCtx(context.Context, string, ...interface{}) #56345
|
||||
pkg log/slog, func Info(string, ...interface{}) #56345
|
||||
pkg log/slog, func Int64(string, int64) Attr #56345
|
||||
pkg log/slog, func Int64Value(int64) Value #56345
|
||||
@@ -257,7 +250,7 @@ pkg log/slog, func Time(string, time.Time) Attr #56345
|
||||
pkg log/slog, func TimeValue(time.Time) Value #56345
|
||||
pkg log/slog, func Uint64(string, uint64) Attr #56345
|
||||
pkg log/slog, func Uint64Value(uint64) Value #56345
|
||||
pkg log/slog, func WarnContext(context.Context, string, ...interface{}) #61200
|
||||
pkg log/slog, func WarnCtx(context.Context, string, ...interface{}) #56345
|
||||
pkg log/slog, func Warn(string, ...interface{}) #56345
|
||||
pkg log/slog, func With(...interface{}) *Logger #56345
|
||||
pkg log/slog, method (Attr) Equal(Attr) bool #56345
|
||||
@@ -278,17 +271,17 @@ pkg log/slog, method (*LevelVar) MarshalText() ([]uint8, error) #56345
|
||||
pkg log/slog, method (*LevelVar) Set(Level) #56345
|
||||
pkg log/slog, method (*LevelVar) String() string #56345
|
||||
pkg log/slog, method (*LevelVar) UnmarshalText([]uint8) error #56345
|
||||
pkg log/slog, method (*Logger) DebugContext(context.Context, string, ...interface{}) #61200
|
||||
pkg log/slog, method (*Logger) DebugCtx(context.Context, string, ...interface{}) #56345
|
||||
pkg log/slog, method (*Logger) Debug(string, ...interface{}) #56345
|
||||
pkg log/slog, method (*Logger) Enabled(context.Context, Level) bool #56345
|
||||
pkg log/slog, method (*Logger) ErrorContext(context.Context, string, ...interface{}) #61200
|
||||
pkg log/slog, method (*Logger) ErrorCtx(context.Context, string, ...interface{}) #56345
|
||||
pkg log/slog, method (*Logger) Error(string, ...interface{}) #56345
|
||||
pkg log/slog, method (*Logger) Handler() Handler #56345
|
||||
pkg log/slog, method (*Logger) InfoContext(context.Context, string, ...interface{}) #61200
|
||||
pkg log/slog, method (*Logger) InfoCtx(context.Context, string, ...interface{}) #56345
|
||||
pkg log/slog, method (*Logger) Info(string, ...interface{}) #56345
|
||||
pkg log/slog, method (*Logger) LogAttrs(context.Context, Level, string, ...Attr) #56345
|
||||
pkg log/slog, method (*Logger) Log(context.Context, Level, string, ...interface{}) #56345
|
||||
pkg log/slog, method (*Logger) WarnContext(context.Context, string, ...interface{}) #61200
|
||||
pkg log/slog, method (*Logger) WarnCtx(context.Context, string, ...interface{}) #56345
|
||||
pkg log/slog, method (*Logger) Warn(string, ...interface{}) #56345
|
||||
pkg log/slog, method (*Logger) WithGroup(string) *Logger #56345
|
||||
pkg log/slog, method (*Logger) With(...interface{}) *Logger #56345
|
||||
@@ -351,6 +344,8 @@ pkg maps, func Copy[$0 interface{ ~map[$2]$3 }, $1 interface{ ~map[$2]$3 }, $2 c
|
||||
pkg maps, func DeleteFunc[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0, func($1, $2) bool) #57436
|
||||
pkg maps, func Equal[$0 interface{ ~map[$2]$3 }, $1 interface{ ~map[$2]$3 }, $2 comparable, $3 comparable]($0, $1) bool #57436
|
||||
pkg maps, func EqualFunc[$0 interface{ ~map[$2]$3 }, $1 interface{ ~map[$2]$4 }, $2 comparable, $3 interface{}, $4 interface{}]($0, $1, func($3, $4) bool) bool #57436
|
||||
pkg maps, func Keys[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0) []$1 #57436
|
||||
pkg maps, func Values[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0) []$2 #57436
|
||||
pkg math/big, method (*Int) Float64() (float64, Accuracy) #56984
|
||||
pkg net/http, method (*ProtocolError) Is(error) bool #41198
|
||||
pkg net/http, method (*ResponseController) EnableFullDuplex() error #57786
|
||||
|
||||
135
api/go1.22.txt
135
api/go1.22.txt
@@ -1,135 +0,0 @@
|
||||
pkg archive/tar, method (*Writer) AddFS(fs.FS) error #58000
|
||||
pkg archive/zip, method (*Writer) AddFS(fs.FS) error #54898
|
||||
pkg cmp, func Or[$0 comparable](...$0) $0 #60204
|
||||
pkg crypto/x509, func OIDFromInts([]uint64) (OID, error) #60665
|
||||
pkg crypto/x509, method (*CertPool) AddCertWithConstraint(*Certificate, func([]*Certificate) error) #57178
|
||||
pkg crypto/x509, method (OID) Equal(OID) bool #60665
|
||||
pkg crypto/x509, method (OID) EqualASN1OID(asn1.ObjectIdentifier) bool #60665
|
||||
pkg crypto/x509, method (OID) String() string #60665
|
||||
pkg crypto/x509, type Certificate struct, Policies []OID #60665
|
||||
pkg crypto/x509, type OID struct #60665
|
||||
pkg database/sql, method (*Null[$0]) Scan(interface{}) error #60370
|
||||
pkg database/sql, method (Null[$0]) Value() (driver.Value, error) #60370
|
||||
pkg database/sql, type Null[$0 interface{}] struct #60370
|
||||
pkg database/sql, type Null[$0 interface{}] struct, V $0 #60370
|
||||
pkg database/sql, type Null[$0 interface{}] struct, Valid bool #60370
|
||||
pkg debug/elf, const R_LARCH_64_PCREL = 109 #63725
|
||||
pkg debug/elf, const R_LARCH_64_PCREL R_LARCH #63725
|
||||
pkg debug/elf, const R_LARCH_ADD6 = 105 #63725
|
||||
pkg debug/elf, const R_LARCH_ADD6 R_LARCH #63725
|
||||
pkg debug/elf, const R_LARCH_ADD_ULEB128 = 107 #63725
|
||||
pkg debug/elf, const R_LARCH_ADD_ULEB128 R_LARCH #63725
|
||||
pkg debug/elf, const R_LARCH_ALIGN = 102 #63725
|
||||
pkg debug/elf, const R_LARCH_ALIGN R_LARCH #63725
|
||||
pkg debug/elf, const R_LARCH_CFA = 104 #63725
|
||||
pkg debug/elf, const R_LARCH_CFA R_LARCH #63725
|
||||
pkg debug/elf, const R_LARCH_DELETE = 101 #63725
|
||||
pkg debug/elf, const R_LARCH_DELETE R_LARCH #63725
|
||||
pkg debug/elf, const R_LARCH_PCREL20_S2 = 103 #63725
|
||||
pkg debug/elf, const R_LARCH_PCREL20_S2 R_LARCH #63725
|
||||
pkg debug/elf, const R_LARCH_SUB6 = 106 #63725
|
||||
pkg debug/elf, const R_LARCH_SUB6 R_LARCH #63725
|
||||
pkg debug/elf, const R_LARCH_SUB_ULEB128 = 108 #63725
|
||||
pkg debug/elf, const R_LARCH_SUB_ULEB128 R_LARCH #63725
|
||||
pkg debug/elf, const R_MIPS_PC32 = 248 #61974
|
||||
pkg debug/elf, const R_MIPS_PC32 R_MIPS #61974
|
||||
pkg encoding/base32, method (*Encoding) AppendDecode([]uint8, []uint8) ([]uint8, error) #53693
|
||||
pkg encoding/base32, method (*Encoding) AppendEncode([]uint8, []uint8) []uint8 #53693
|
||||
pkg encoding/base64, method (*Encoding) AppendDecode([]uint8, []uint8) ([]uint8, error) #53693
|
||||
pkg encoding/base64, method (*Encoding) AppendEncode([]uint8, []uint8) []uint8 #53693
|
||||
pkg encoding/hex, func AppendDecode([]uint8, []uint8) ([]uint8, error) #53693
|
||||
pkg encoding/hex, func AppendEncode([]uint8, []uint8) []uint8 #53693
|
||||
pkg go/ast, func NewPackage //deprecated #52463
|
||||
pkg go/ast, func Unparen(Expr) Expr #60061
|
||||
pkg go/ast, type Importer //deprecated #52463
|
||||
pkg go/ast, type Object //deprecated #52463
|
||||
pkg go/ast, type Package //deprecated #52463
|
||||
pkg go/ast, type Scope //deprecated #52463
|
||||
pkg go/types, func NewAlias(*TypeName, Type) *Alias #63223
|
||||
pkg go/types, func Unalias(Type) Type #63223
|
||||
pkg go/types, method (*Alias) Obj() *TypeName #63223
|
||||
pkg go/types, method (*Alias) String() string #63223
|
||||
pkg go/types, method (*Alias) Underlying() Type #63223
|
||||
pkg go/types, method (*Info) PkgNameOf(*ast.ImportSpec) *PkgName #62037
|
||||
pkg go/types, method (Checker) PkgNameOf(*ast.ImportSpec) *PkgName #62037
|
||||
pkg go/types, type Alias struct #63223
|
||||
pkg go/types, type Info struct, FileVersions map[*ast.File]string #62605
|
||||
pkg go/version, func Compare(string, string) int #62039
|
||||
pkg go/version, func IsValid(string) bool #62039
|
||||
pkg go/version, func Lang(string) string #62039
|
||||
pkg html/template, const ErrJSTemplate //deprecated #61619
|
||||
pkg io, method (*SectionReader) Outer() (ReaderAt, int64, int64) #61870
|
||||
pkg log/slog, func SetLogLoggerLevel(Level) Level #62418
|
||||
pkg math/big, method (*Rat) FloatPrec() (int, bool) #50489
|
||||
pkg math/rand/v2, func ExpFloat64() float64 #61716
|
||||
pkg math/rand/v2, func Float32() float32 #61716
|
||||
pkg math/rand/v2, func Float64() float64 #61716
|
||||
pkg math/rand/v2, func Int() int #61716
|
||||
pkg math/rand/v2, func Int32() int32 #61716
|
||||
pkg math/rand/v2, func Int32N(int32) int32 #61716
|
||||
pkg math/rand/v2, func Int64() int64 #61716
|
||||
pkg math/rand/v2, func Int64N(int64) int64 #61716
|
||||
pkg math/rand/v2, func IntN(int) int #61716
|
||||
pkg math/rand/v2, func N[$0 intType]($0) $0 #61716
|
||||
pkg math/rand/v2, func New(Source) *Rand #61716
|
||||
pkg math/rand/v2, func NewChaCha8([32]uint8) *ChaCha8 #61716
|
||||
pkg math/rand/v2, func NewPCG(uint64, uint64) *PCG #61716
|
||||
pkg math/rand/v2, func NewZipf(*Rand, float64, float64, uint64) *Zipf #61716
|
||||
pkg math/rand/v2, func NormFloat64() float64 #61716
|
||||
pkg math/rand/v2, func Perm(int) []int #61716
|
||||
pkg math/rand/v2, func Shuffle(int, func(int, int)) #61716
|
||||
pkg math/rand/v2, func Uint32() uint32 #61716
|
||||
pkg math/rand/v2, func Uint32N(uint32) uint32 #61716
|
||||
pkg math/rand/v2, func Uint64() uint64 #61716
|
||||
pkg math/rand/v2, func Uint64N(uint64) uint64 #61716
|
||||
pkg math/rand/v2, func UintN(uint) uint #61716
|
||||
pkg math/rand/v2, method (*ChaCha8) MarshalBinary() ([]uint8, error) #61716
|
||||
pkg math/rand/v2, method (*ChaCha8) Seed([32]uint8) #61716
|
||||
pkg math/rand/v2, method (*ChaCha8) Uint64() uint64 #61716
|
||||
pkg math/rand/v2, method (*ChaCha8) UnmarshalBinary([]uint8) error #61716
|
||||
pkg math/rand/v2, method (*PCG) MarshalBinary() ([]uint8, error) #61716
|
||||
pkg math/rand/v2, method (*PCG) Seed(uint64, uint64) #61716
|
||||
pkg math/rand/v2, method (*PCG) Uint64() uint64 #61716
|
||||
pkg math/rand/v2, method (*PCG) UnmarshalBinary([]uint8) error #61716
|
||||
pkg math/rand/v2, method (*Rand) ExpFloat64() float64 #61716
|
||||
pkg math/rand/v2, method (*Rand) Float32() float32 #61716
|
||||
pkg math/rand/v2, method (*Rand) Float64() float64 #61716
|
||||
pkg math/rand/v2, method (*Rand) Int() int #61716
|
||||
pkg math/rand/v2, method (*Rand) Int32() int32 #61716
|
||||
pkg math/rand/v2, method (*Rand) Int32N(int32) int32 #61716
|
||||
pkg math/rand/v2, method (*Rand) Int64() int64 #61716
|
||||
pkg math/rand/v2, method (*Rand) Int64N(int64) int64 #61716
|
||||
pkg math/rand/v2, method (*Rand) IntN(int) int #61716
|
||||
pkg math/rand/v2, method (*Rand) NormFloat64() float64 #61716
|
||||
pkg math/rand/v2, method (*Rand) Perm(int) []int #61716
|
||||
pkg math/rand/v2, method (*Rand) Shuffle(int, func(int, int)) #61716
|
||||
pkg math/rand/v2, method (*Rand) Uint32() uint32 #61716
|
||||
pkg math/rand/v2, method (*Rand) Uint32N(uint32) uint32 #61716
|
||||
pkg math/rand/v2, method (*Rand) Uint64() uint64 #61716
|
||||
pkg math/rand/v2, method (*Rand) Uint64N(uint64) uint64 #61716
|
||||
pkg math/rand/v2, method (*Rand) UintN(uint) uint #61716
|
||||
pkg math/rand/v2, method (*Zipf) Uint64() uint64 #61716
|
||||
pkg math/rand/v2, type ChaCha8 struct #61716
|
||||
pkg math/rand/v2, type PCG struct #61716
|
||||
pkg math/rand/v2, type Rand struct #61716
|
||||
pkg math/rand/v2, type Source interface { Uint64 } #61716
|
||||
pkg math/rand/v2, type Source interface, Uint64() uint64 #61716
|
||||
pkg math/rand/v2, type Zipf struct #61716
|
||||
pkg net, method (*TCPConn) WriteTo(io.Writer) (int64, error) #58808
|
||||
pkg net/http, func FileServerFS(fs.FS) Handler #51971
|
||||
pkg net/http, func NewFileTransportFS(fs.FS) RoundTripper #51971
|
||||
pkg net/http, func ServeFileFS(ResponseWriter, *Request, fs.FS, string) #51971
|
||||
pkg net/http, method (*Request) PathValue(string) string #61410
|
||||
pkg net/http, method (*Request) SetPathValue(string, string) #61410
|
||||
pkg net/netip, method (AddrPort) Compare(AddrPort) int #61642
|
||||
pkg os, method (*File) WriteTo(io.Writer) (int64, error) #58808
|
||||
pkg reflect, func PtrTo //deprecated #59599
|
||||
pkg reflect, func TypeFor[$0 interface{}]() Type #60088
|
||||
pkg slices, func Concat[$0 interface{ ~[]$1 }, $1 interface{}](...$0) $0 #56353
|
||||
pkg syscall (linux-386), type SysProcAttr struct, PidFD *int #51246
|
||||
pkg syscall (linux-386-cgo), type SysProcAttr struct, PidFD *int #51246
|
||||
pkg syscall (linux-amd64), type SysProcAttr struct, PidFD *int #51246
|
||||
pkg syscall (linux-amd64-cgo), type SysProcAttr struct, PidFD *int #51246
|
||||
pkg syscall (linux-arm), type SysProcAttr struct, PidFD *int #51246
|
||||
pkg syscall (linux-arm-cgo), type SysProcAttr struct, PidFD *int #51246
|
||||
pkg testing/slogtest, func Run(*testing.T, func(*testing.T) slog.Handler, func(*testing.T) map[string]interface{}) #61758
|
||||
158
api/go1.23.txt
158
api/go1.23.txt
@@ -1,158 +0,0 @@
|
||||
pkg archive/tar, type FileInfoNames interface { Gname, IsDir, ModTime, Mode, Name, Size, Sys, Uname } #50102
|
||||
pkg archive/tar, type FileInfoNames interface, Gname() (string, error) #50102
|
||||
pkg archive/tar, type FileInfoNames interface, IsDir() bool #50102
|
||||
pkg archive/tar, type FileInfoNames interface, ModTime() time.Time #50102
|
||||
pkg archive/tar, type FileInfoNames interface, Mode() fs.FileMode #50102
|
||||
pkg archive/tar, type FileInfoNames interface, Name() string #50102
|
||||
pkg archive/tar, type FileInfoNames interface, Size() int64 #50102
|
||||
pkg archive/tar, type FileInfoNames interface, Sys() interface{} #50102
|
||||
pkg archive/tar, type FileInfoNames interface, Uname() (string, error) #50102
|
||||
pkg crypto/tls, const QUICResumeSession = 8 #63691
|
||||
pkg crypto/tls, const QUICResumeSession QUICEventKind #63691
|
||||
pkg crypto/tls, const QUICStoreSession = 9 #63691
|
||||
pkg crypto/tls, const QUICStoreSession QUICEventKind #63691
|
||||
pkg crypto/tls, method (*ECHRejectionError) Error() string #63369
|
||||
pkg crypto/tls, method (*QUICConn) StoreSession(*SessionState) error #63691
|
||||
pkg crypto/tls, type Config struct, EncryptedClientHelloConfigList []uint8 #63369
|
||||
pkg crypto/tls, type Config struct, EncryptedClientHelloRejectionVerify func(ConnectionState) error #63369
|
||||
pkg crypto/tls, type ConnectionState struct, ECHAccepted bool #63369
|
||||
pkg crypto/tls, type ECHRejectionError struct #63369
|
||||
pkg crypto/tls, type ECHRejectionError struct, RetryConfigList []uint8 #63369
|
||||
pkg crypto/tls, type QUICConfig struct, EnableSessionEvents bool #63691
|
||||
pkg crypto/tls, type QUICEvent struct, SessionState *SessionState #63691
|
||||
pkg crypto/tls, type QUICSessionTicketOptions struct, Extra [][]uint8 #63691
|
||||
pkg crypto/x509, func ParseOID(string) (OID, error) #66249
|
||||
pkg crypto/x509, method (*OID) UnmarshalBinary([]uint8) error #66249
|
||||
pkg crypto/x509, method (*OID) UnmarshalText([]uint8) error #66249
|
||||
pkg crypto/x509, method (OID) MarshalBinary() ([]uint8, error) #66249
|
||||
pkg crypto/x509, method (OID) MarshalText() ([]uint8, error) #66249
|
||||
pkg debug/elf, const PT_OPENBSD_NOBTCFI = 1705237480 #66054
|
||||
pkg debug/elf, const PT_OPENBSD_NOBTCFI ProgType #66054
|
||||
pkg debug/elf, const STT_GNU_IFUNC = 10 #66836
|
||||
pkg debug/elf, const STT_GNU_IFUNC SymType #66836
|
||||
pkg debug/elf, const STT_RELC = 8 #66836
|
||||
pkg debug/elf, const STT_RELC SymType #66836
|
||||
pkg debug/elf, const STT_SRELC = 9 #66836
|
||||
pkg debug/elf, const STT_SRELC SymType #66836
|
||||
pkg encoding/binary, func Append([]uint8, ByteOrder, interface{}) ([]uint8, error) #60023
|
||||
pkg encoding/binary, func Decode([]uint8, ByteOrder, interface{}) (int, error) #60023
|
||||
pkg encoding/binary, func Encode([]uint8, ByteOrder, interface{}) (int, error) #60023
|
||||
pkg go/ast, func Preorder(Node) iter.Seq[Node] #66339
|
||||
pkg go/types, method (*Alias) Origin() *Alias #67143
|
||||
pkg go/types, method (*Alias) Rhs() Type #66559
|
||||
pkg go/types, method (*Alias) SetTypeParams([]*TypeParam) #67143
|
||||
pkg go/types, method (*Alias) TypeArgs() *TypeList #67143
|
||||
pkg go/types, method (*Alias) TypeParams() *TypeParamList #67143
|
||||
pkg go/types, method (*Func) Signature() *Signature #65772
|
||||
pkg iter, func Pull2[$0 interface{}, $1 interface{}](Seq2[$0, $1]) (func() ($0, $1, bool), func()) #61897
|
||||
pkg iter, func Pull[$0 interface{}](Seq[$0]) (func() ($0, bool), func()) #61897
|
||||
pkg iter, type Seq2[$0 interface{}, $1 interface{}] func(func($0, $1) bool) #61897
|
||||
pkg iter, type Seq[$0 interface{}] func(func($0) bool) #61897
|
||||
pkg maps, func All[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0) iter.Seq2[$1, $2] #61900
|
||||
pkg maps, func Collect[$0 comparable, $1 interface{}](iter.Seq2[$0, $1]) map[$0]$1 #61900
|
||||
pkg maps, func Insert[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0, iter.Seq2[$1, $2]) #61900
|
||||
pkg maps, func Keys[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0) iter.Seq[$1] #61900
|
||||
pkg maps, func Values[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0) iter.Seq[$2] #61900
|
||||
pkg math/rand/v2, func Uint() uint #61716
|
||||
pkg math/rand/v2, method (*ChaCha8) Read([]uint8) (int, error) #67059
|
||||
pkg math/rand/v2, method (*Rand) Uint() uint #61716
|
||||
pkg net, method (*DNSError) Unwrap() error #63116
|
||||
pkg net, method (*TCPConn) SetKeepAliveConfig(KeepAliveConfig) error #62254
|
||||
pkg net, type DNSError struct, UnwrapErr error #63116
|
||||
pkg net, type Dialer struct, KeepAliveConfig KeepAliveConfig #62254
|
||||
pkg net, type KeepAliveConfig struct #62254
|
||||
pkg net, type KeepAliveConfig struct, Count int #62254
|
||||
pkg net, type KeepAliveConfig struct, Enable bool #62254
|
||||
pkg net, type KeepAliveConfig struct, Idle time.Duration #62254
|
||||
pkg net, type KeepAliveConfig struct, Interval time.Duration #62254
|
||||
pkg net, type ListenConfig struct, KeepAliveConfig KeepAliveConfig #62254
|
||||
pkg net/http, func ParseCookie(string) ([]*Cookie, error) #66008
|
||||
pkg net/http, func ParseSetCookie(string) (*Cookie, error) #66008
|
||||
pkg net/http, method (*Request) CookiesNamed(string) []*Cookie #61472
|
||||
pkg net/http, type Cookie struct, Partitioned bool #62490
|
||||
pkg net/http, type Cookie struct, Quoted bool #46443
|
||||
pkg net/http, type Request struct, Pattern string #66405
|
||||
pkg net/http/httptest, func NewRequestWithContext(context.Context, string, string, io.Reader) *http.Request #59473
|
||||
pkg os, func CopyFS(string, fs.FS) error #62484
|
||||
pkg path/filepath, func Localize(string) (string, error) #57151
|
||||
pkg reflect, func SliceAt(Type, unsafe.Pointer, int) Value #61308
|
||||
pkg reflect, method (Value) Seq() iter.Seq[Value] #66056
|
||||
pkg reflect, method (Value) Seq2() iter.Seq2[Value, Value] #66056
|
||||
pkg reflect, type Type interface, CanSeq() bool #66056
|
||||
pkg reflect, type Type interface, CanSeq2() bool #66056
|
||||
pkg reflect, type Type interface, OverflowComplex(complex128) bool #60427
|
||||
pkg reflect, type Type interface, OverflowFloat(float64) bool #60427
|
||||
pkg reflect, type Type interface, OverflowInt(int64) bool #60427
|
||||
pkg reflect, type Type interface, OverflowUint(uint64) bool #60427
|
||||
pkg runtime/debug, func SetCrashOutput(*os.File, CrashOptions) error #42888
|
||||
pkg runtime/debug, type CrashOptions struct #67182
|
||||
pkg slices, func All[$0 interface{ ~[]$1 }, $1 interface{}]($0) iter.Seq2[int, $1] #61899
|
||||
pkg slices, func AppendSeq[$0 interface{ ~[]$1 }, $1 interface{}]($0, iter.Seq[$1]) $0 #61899
|
||||
pkg slices, func Backward[$0 interface{ ~[]$1 }, $1 interface{}]($0) iter.Seq2[int, $1] #61899
|
||||
pkg slices, func Chunk[$0 interface{ ~[]$1 }, $1 interface{}]($0, int) iter.Seq[$0] #53987
|
||||
pkg slices, func Collect[$0 interface{}](iter.Seq[$0]) []$0 #61899
|
||||
pkg slices, func Repeat[$0 interface{ ~[]$1 }, $1 interface{}]($0, int) $0 #65238
|
||||
pkg slices, func SortedFunc[$0 interface{}](iter.Seq[$0], func($0, $0) int) []$0 #61899
|
||||
pkg slices, func SortedStableFunc[$0 interface{}](iter.Seq[$0], func($0, $0) int) []$0 #61899
|
||||
pkg slices, func Sorted[$0 cmp.Ordered](iter.Seq[$0]) []$0 #61899
|
||||
pkg slices, func Values[$0 interface{ ~[]$1 }, $1 interface{}]($0) iter.Seq[$1] #61899
|
||||
pkg structs, type HostLayout struct #66408
|
||||
pkg sync, method (*Map) Clear() #61696
|
||||
pkg sync/atomic, func AndInt32(*int32, int32) int32 #61395
|
||||
pkg sync/atomic, func AndInt64(*int64, int64) int64 #61395
|
||||
pkg sync/atomic, func AndUint32(*uint32, uint32) uint32 #61395
|
||||
pkg sync/atomic, func AndUint64(*uint64, uint64) uint64 #61395
|
||||
pkg sync/atomic, func AndUintptr(*uintptr, uintptr) uintptr #61395
|
||||
pkg sync/atomic, func OrInt32(*int32, int32) int32 #61395
|
||||
pkg sync/atomic, func OrInt64(*int64, int64) int64 #61395
|
||||
pkg sync/atomic, func OrUint32(*uint32, uint32) uint32 #61395
|
||||
pkg sync/atomic, func OrUint64(*uint64, uint64) uint64 #61395
|
||||
pkg sync/atomic, func OrUintptr(*uintptr, uintptr) uintptr #61395
|
||||
pkg sync/atomic, method (*Int32) And(int32) int32 #61395
|
||||
pkg sync/atomic, method (*Int32) Or(int32) int32 #61395
|
||||
pkg sync/atomic, method (*Int64) And(int64) int64 #61395
|
||||
pkg sync/atomic, method (*Int64) Or(int64) int64 #61395
|
||||
pkg sync/atomic, method (*Uint32) And(uint32) uint32 #61395
|
||||
pkg sync/atomic, method (*Uint32) Or(uint32) uint32 #61395
|
||||
pkg sync/atomic, method (*Uint64) And(uint64) uint64 #61395
|
||||
pkg sync/atomic, method (*Uint64) Or(uint64) uint64 #61395
|
||||
pkg sync/atomic, method (*Uintptr) And(uintptr) uintptr #61395
|
||||
pkg sync/atomic, method (*Uintptr) Or(uintptr) uintptr #61395
|
||||
pkg syscall (openbsd-386), const EBADMSG = 92 #67998
|
||||
pkg syscall (openbsd-386), const ELAST = 95 #67998
|
||||
pkg syscall (openbsd-386), const ENOTRECOVERABLE = 93 #67998
|
||||
pkg syscall (openbsd-386), const ENOTRECOVERABLE Errno #67998
|
||||
pkg syscall (openbsd-386), const EOWNERDEAD = 94 #67998
|
||||
pkg syscall (openbsd-386), const EOWNERDEAD Errno #67998
|
||||
pkg syscall (openbsd-386), const EPROTO = 95 #67998
|
||||
pkg syscall (openbsd-386-cgo), const EBADMSG = 92 #67998
|
||||
pkg syscall (openbsd-386-cgo), const ELAST = 95 #67998
|
||||
pkg syscall (openbsd-386-cgo), const ENOTRECOVERABLE = 93 #67998
|
||||
pkg syscall (openbsd-386-cgo), const ENOTRECOVERABLE Errno #67998
|
||||
pkg syscall (openbsd-386-cgo), const EOWNERDEAD = 94 #67998
|
||||
pkg syscall (openbsd-386-cgo), const EOWNERDEAD Errno #67998
|
||||
pkg syscall (openbsd-386-cgo), const EPROTO = 95 #67998
|
||||
pkg syscall (openbsd-amd64), const EBADMSG = 92 #67998
|
||||
pkg syscall (openbsd-amd64), const ELAST = 95 #67998
|
||||
pkg syscall (openbsd-amd64), const ENOTRECOVERABLE = 93 #67998
|
||||
pkg syscall (openbsd-amd64), const ENOTRECOVERABLE Errno #67998
|
||||
pkg syscall (openbsd-amd64), const EOWNERDEAD = 94 #67998
|
||||
pkg syscall (openbsd-amd64), const EOWNERDEAD Errno #67998
|
||||
pkg syscall (openbsd-amd64), const EPROTO = 95 #67998
|
||||
pkg syscall (openbsd-amd64-cgo), const EBADMSG = 92 #67998
|
||||
pkg syscall (openbsd-amd64-cgo), const ELAST = 95 #67998
|
||||
pkg syscall (openbsd-amd64-cgo), const ENOTRECOVERABLE = 93 #67998
|
||||
pkg syscall (openbsd-amd64-cgo), const ENOTRECOVERABLE Errno #67998
|
||||
pkg syscall (openbsd-amd64-cgo), const EOWNERDEAD = 94 #67998
|
||||
pkg syscall (openbsd-amd64-cgo), const EOWNERDEAD Errno #67998
|
||||
pkg syscall (openbsd-amd64-cgo), const EPROTO = 95 #67998
|
||||
pkg syscall (windows-386), const WSAENOPROTOOPT = 10042 #62254
|
||||
pkg syscall (windows-386), const WSAENOPROTOOPT Errno #62254
|
||||
pkg syscall (windows-amd64), const WSAENOPROTOOPT = 10042 #62254
|
||||
pkg syscall (windows-amd64), const WSAENOPROTOOPT Errno #62254
|
||||
pkg syscall, const EBADMSG Errno #67998
|
||||
pkg syscall, const EPROTO Errno #67998
|
||||
pkg unicode/utf16, func RuneLen(int32) int #44940
|
||||
pkg unique, func Make[$0 comparable]($0) Handle[$0] #62483
|
||||
pkg unique, method (Handle[$0]) Value() $0 #62483
|
||||
pkg unique, type Handle[$0 comparable] struct #62483
|
||||
223
api/go1.24.txt
223
api/go1.24.txt
@@ -1,223 +0,0 @@
|
||||
pkg bytes, func FieldsFuncSeq([]uint8, func(int32) bool) iter.Seq[[]uint8] #61901
|
||||
pkg bytes, func FieldsSeq([]uint8) iter.Seq[[]uint8] #61901
|
||||
pkg bytes, func Lines([]uint8) iter.Seq[[]uint8] #61901
|
||||
pkg bytes, func SplitAfterSeq([]uint8, []uint8) iter.Seq[[]uint8] #61901
|
||||
pkg bytes, func SplitSeq([]uint8, []uint8) iter.Seq[[]uint8] #61901
|
||||
pkg crypto/cipher, func NewCFBDecrypter //deprecated #69445
|
||||
pkg crypto/cipher, func NewCFBEncrypter //deprecated #69445
|
||||
pkg crypto/cipher, func NewGCMWithRandomNonce(Block) (AEAD, error) #69981
|
||||
pkg crypto/cipher, func NewOFB //deprecated #69445
|
||||
pkg crypto/fips140, func Enabled() bool #70123
|
||||
pkg crypto/hkdf, func Expand[$0 hash.Hash](func() $0, []uint8, string, int) ([]uint8, error) #61477
|
||||
pkg crypto/hkdf, func Extract[$0 hash.Hash](func() $0, []uint8, []uint8) ([]uint8, error) #61477
|
||||
pkg crypto/hkdf, func Key[$0 hash.Hash](func() $0, []uint8, []uint8, string, int) ([]uint8, error) #61477
|
||||
pkg crypto/mlkem, const CiphertextSize1024 = 1568 #70122
|
||||
pkg crypto/mlkem, const CiphertextSize1024 ideal-int #70122
|
||||
pkg crypto/mlkem, const CiphertextSize768 = 1088 #70122
|
||||
pkg crypto/mlkem, const CiphertextSize768 ideal-int #70122
|
||||
pkg crypto/mlkem, const EncapsulationKeySize1024 = 1568 #70122
|
||||
pkg crypto/mlkem, const EncapsulationKeySize1024 ideal-int #70122
|
||||
pkg crypto/mlkem, const EncapsulationKeySize768 = 1184 #70122
|
||||
pkg crypto/mlkem, const EncapsulationKeySize768 ideal-int #70122
|
||||
pkg crypto/mlkem, const SeedSize = 64 #70122
|
||||
pkg crypto/mlkem, const SeedSize ideal-int #70122
|
||||
pkg crypto/mlkem, const SharedKeySize = 32 #70122
|
||||
pkg crypto/mlkem, const SharedKeySize ideal-int #70122
|
||||
pkg crypto/mlkem, func GenerateKey1024() (*DecapsulationKey1024, error) #70122
|
||||
pkg crypto/mlkem, func GenerateKey768() (*DecapsulationKey768, error) #70122
|
||||
pkg crypto/mlkem, func NewDecapsulationKey1024([]uint8) (*DecapsulationKey1024, error) #70122
|
||||
pkg crypto/mlkem, func NewDecapsulationKey768([]uint8) (*DecapsulationKey768, error) #70122
|
||||
pkg crypto/mlkem, func NewEncapsulationKey1024([]uint8) (*EncapsulationKey1024, error) #70122
|
||||
pkg crypto/mlkem, func NewEncapsulationKey768([]uint8) (*EncapsulationKey768, error) #70122
|
||||
pkg crypto/mlkem, method (*DecapsulationKey1024) Bytes() []uint8 #70122
|
||||
pkg crypto/mlkem, method (*DecapsulationKey1024) Decapsulate([]uint8) ([]uint8, error) #70122
|
||||
pkg crypto/mlkem, method (*DecapsulationKey1024) EncapsulationKey() *EncapsulationKey1024 #70122
|
||||
pkg crypto/mlkem, method (*DecapsulationKey768) Bytes() []uint8 #70122
|
||||
pkg crypto/mlkem, method (*DecapsulationKey768) Decapsulate([]uint8) ([]uint8, error) #70122
|
||||
pkg crypto/mlkem, method (*DecapsulationKey768) EncapsulationKey() *EncapsulationKey768 #70122
|
||||
pkg crypto/mlkem, method (*EncapsulationKey1024) Bytes() []uint8 #70122
|
||||
pkg crypto/mlkem, method (*EncapsulationKey1024) Encapsulate() ([]uint8, []uint8) #70122
|
||||
pkg crypto/mlkem, method (*EncapsulationKey768) Bytes() []uint8 #70122
|
||||
pkg crypto/mlkem, method (*EncapsulationKey768) Encapsulate() ([]uint8, []uint8) #70122
|
||||
pkg crypto/mlkem, type DecapsulationKey1024 struct #70122
|
||||
pkg crypto/mlkem, type DecapsulationKey768 struct #70122
|
||||
pkg crypto/mlkem, type EncapsulationKey1024 struct #70122
|
||||
pkg crypto/mlkem, type EncapsulationKey768 struct #70122
|
||||
pkg crypto/pbkdf2, func Key[$0 hash.Hash](func() $0, string, []uint8, int, int) ([]uint8, error) #69488
|
||||
pkg crypto/rand, func Text() string #67057
|
||||
pkg crypto/sha3, func New224() *SHA3 #69982
|
||||
pkg crypto/sha3, func New256() *SHA3 #69982
|
||||
pkg crypto/sha3, func New384() *SHA3 #69982
|
||||
pkg crypto/sha3, func New512() *SHA3 #69982
|
||||
pkg crypto/sha3, func NewCSHAKE128([]uint8, []uint8) *SHAKE #69982
|
||||
pkg crypto/sha3, func NewCSHAKE256([]uint8, []uint8) *SHAKE #69982
|
||||
pkg crypto/sha3, func NewSHAKE128() *SHAKE #69982
|
||||
pkg crypto/sha3, func NewSHAKE256() *SHAKE #69982
|
||||
pkg crypto/sha3, func Sum224([]uint8) [28]uint8 #69982
|
||||
pkg crypto/sha3, func Sum256([]uint8) [32]uint8 #69982
|
||||
pkg crypto/sha3, func Sum384([]uint8) [48]uint8 #69982
|
||||
pkg crypto/sha3, func Sum512([]uint8) [64]uint8 #69982
|
||||
pkg crypto/sha3, func SumSHAKE128([]uint8, int) []uint8 #69982
|
||||
pkg crypto/sha3, func SumSHAKE256([]uint8, int) []uint8 #69982
|
||||
pkg crypto/sha3, method (*SHA3) AppendBinary([]uint8) ([]uint8, error) #69982
|
||||
pkg crypto/sha3, method (*SHA3) BlockSize() int #69982
|
||||
pkg crypto/sha3, method (*SHA3) MarshalBinary() ([]uint8, error) #69982
|
||||
pkg crypto/sha3, method (*SHA3) Reset() #69982
|
||||
pkg crypto/sha3, method (*SHA3) Size() int #69982
|
||||
pkg crypto/sha3, method (*SHA3) Sum([]uint8) []uint8 #69982
|
||||
pkg crypto/sha3, method (*SHA3) UnmarshalBinary([]uint8) error #69982
|
||||
pkg crypto/sha3, method (*SHA3) Write([]uint8) (int, error) #69982
|
||||
pkg crypto/sha3, method (*SHAKE) AppendBinary([]uint8) ([]uint8, error) #69982
|
||||
pkg crypto/sha3, method (*SHAKE) BlockSize() int #69982
|
||||
pkg crypto/sha3, method (*SHAKE) MarshalBinary() ([]uint8, error) #69982
|
||||
pkg crypto/sha3, method (*SHAKE) Read([]uint8) (int, error) #69982
|
||||
pkg crypto/sha3, method (*SHAKE) Reset() #69982
|
||||
pkg crypto/sha3, method (*SHAKE) UnmarshalBinary([]uint8) error #69982
|
||||
pkg crypto/sha3, method (*SHAKE) Write([]uint8) (int, error) #69982
|
||||
pkg crypto/sha3, type SHA3 struct #69982
|
||||
pkg crypto/sha3, type SHAKE struct #69982
|
||||
pkg crypto/subtle, func WithDataIndependentTiming(func()) #66450
|
||||
pkg crypto/tls, const X25519MLKEM768 = 4588 #69985
|
||||
pkg crypto/tls, const X25519MLKEM768 CurveID #69985
|
||||
pkg crypto/tls, type ClientHelloInfo struct, Extensions []uint16 #32936
|
||||
pkg crypto/tls, type Config struct, EncryptedClientHelloKeys []EncryptedClientHelloKey #68500
|
||||
pkg crypto/tls, type EncryptedClientHelloKey struct #68500
|
||||
pkg crypto/tls, type EncryptedClientHelloKey struct, Config []uint8 #68500
|
||||
pkg crypto/tls, type EncryptedClientHelloKey struct, PrivateKey []uint8 #68500
|
||||
pkg crypto/tls, type EncryptedClientHelloKey struct, SendAsRetry bool #68500
|
||||
pkg crypto/x509, const NoValidChains = 10 #68484
|
||||
pkg crypto/x509, const NoValidChains InvalidReason #68484
|
||||
pkg crypto/x509, method (OID) AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg crypto/x509, method (OID) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg crypto/x509, type Certificate struct, InhibitAnyPolicy int #68484
|
||||
pkg crypto/x509, type Certificate struct, InhibitAnyPolicyZero bool #68484
|
||||
pkg crypto/x509, type Certificate struct, InhibitPolicyMapping int #68484
|
||||
pkg crypto/x509, type Certificate struct, InhibitPolicyMappingZero bool #68484
|
||||
pkg crypto/x509, type Certificate struct, PolicyMappings []PolicyMapping #68484
|
||||
pkg crypto/x509, type Certificate struct, RequireExplicitPolicy int #68484
|
||||
pkg crypto/x509, type Certificate struct, RequireExplicitPolicyZero bool #68484
|
||||
pkg crypto/x509, type PolicyMapping struct #68484
|
||||
pkg crypto/x509, type PolicyMapping struct, IssuerDomainPolicy OID #68484
|
||||
pkg crypto/x509, type PolicyMapping struct, SubjectDomainPolicy OID #68484
|
||||
pkg crypto/x509, type VerifyOptions struct, CertificatePolicies []OID #68484
|
||||
pkg debug/elf, const VER_FLG_BASE = 1 #63952
|
||||
pkg debug/elf, const VER_FLG_BASE DynamicVersionFlag #63952
|
||||
pkg debug/elf, const VER_FLG_INFO = 4 #63952
|
||||
pkg debug/elf, const VER_FLG_INFO DynamicVersionFlag #63952
|
||||
pkg debug/elf, const VER_FLG_WEAK = 2 #63952
|
||||
pkg debug/elf, const VER_FLG_WEAK DynamicVersionFlag #63952
|
||||
pkg debug/elf, method (*File) DynamicVersionNeeds() ([]DynamicVersionNeed, error) #63952
|
||||
pkg debug/elf, method (*File) DynamicVersions() ([]DynamicVersion, error) #63952
|
||||
pkg debug/elf, type DynamicVersion struct #63952
|
||||
pkg debug/elf, type DynamicVersion struct, Deps []string #63952
|
||||
pkg debug/elf, type DynamicVersion struct, Flags DynamicVersionFlag #63952
|
||||
pkg debug/elf, type DynamicVersion struct, Name string #63952
|
||||
pkg debug/elf, type DynamicVersion struct, Index uint16 #63952
|
||||
pkg debug/elf, type DynamicVersionDep struct #63952
|
||||
pkg debug/elf, type DynamicVersionDep struct, Dep string #63952
|
||||
pkg debug/elf, type DynamicVersionDep struct, Flags DynamicVersionFlag #63952
|
||||
pkg debug/elf, type DynamicVersionDep struct, Index uint16 #63952
|
||||
pkg debug/elf, type DynamicVersionFlag uint16 #63952
|
||||
pkg debug/elf, type DynamicVersionNeed struct #63952
|
||||
pkg debug/elf, type DynamicVersionNeed struct, Name string #63952
|
||||
pkg debug/elf, type DynamicVersionNeed struct, Needs []DynamicVersionDep #63952
|
||||
pkg debug/elf, type Symbol struct, HasVersion bool #63952
|
||||
pkg debug/elf, type Symbol struct, VersionIndex VersionIndex #63952
|
||||
pkg debug/elf, method (VersionIndex) Index() uint16 #63952
|
||||
pkg debug/elf, method (VersionIndex) IsHidden() bool #63952
|
||||
pkg debug/elf, type VersionIndex uint16 #63952
|
||||
pkg encoding, type BinaryAppender interface { AppendBinary } #62384
|
||||
pkg encoding, type BinaryAppender interface, AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg encoding, type TextAppender interface { AppendText } #62384
|
||||
pkg encoding, type TextAppender interface, AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg go/types, method (*Interface) EmbeddedTypes() iter.Seq[Type] #66626
|
||||
pkg go/types, method (*Interface) ExplicitMethods() iter.Seq[*Func] #66626
|
||||
pkg go/types, method (*Interface) Methods() iter.Seq[*Func] #66626
|
||||
pkg go/types, method (*MethodSet) Methods() iter.Seq[*Selection] #66626
|
||||
pkg go/types, method (*Named) Methods() iter.Seq[*Func] #66626
|
||||
pkg go/types, method (*Scope) Children() iter.Seq[*Scope] #66626
|
||||
pkg go/types, method (*Struct) Fields() iter.Seq[*Var] #66626
|
||||
pkg go/types, method (*Tuple) Variables() iter.Seq[*Var] #66626
|
||||
pkg go/types, method (*TypeList) Types() iter.Seq[Type] #66626
|
||||
pkg go/types, method (*TypeParamList) TypeParams() iter.Seq[*TypeParam] #66626
|
||||
pkg go/types, method (*Union) Terms() iter.Seq[*Term] #66626
|
||||
pkg hash/maphash, func Comparable[$0 comparable](Seed, $0) uint64 #54670
|
||||
pkg hash/maphash, func WriteComparable[$0 comparable](*Hash, $0) #54670
|
||||
pkg log/slog, method (*LevelVar) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg log/slog, method (Level) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg log/slog, var DiscardHandler Handler #62005
|
||||
pkg math/big, method (*Float) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg math/big, method (*Int) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg math/big, method (*Rat) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg math/rand/v2, method (*ChaCha8) AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg math/rand/v2, method (*PCG) AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg net, method (IP) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg net/http, method (*Protocols) SetHTTP1(bool) #67814
|
||||
pkg net/http, method (*Protocols) SetHTTP2(bool) #67814
|
||||
pkg net/http, method (*Protocols) SetUnencryptedHTTP2(bool) #67816
|
||||
pkg net/http, method (Protocols) HTTP1() bool #67814
|
||||
pkg net/http, method (Protocols) HTTP2() bool #67814
|
||||
pkg net/http, method (Protocols) String() string #67814
|
||||
pkg net/http, method (Protocols) UnencryptedHTTP2() bool #67816
|
||||
pkg net/http, type HTTP2Config struct #67813
|
||||
pkg net/http, type HTTP2Config struct, CountError func(string) #67813
|
||||
pkg net/http, type HTTP2Config struct, MaxConcurrentStreams int #67813
|
||||
pkg net/http, type HTTP2Config struct, MaxDecoderHeaderTableSize int #67813
|
||||
pkg net/http, type HTTP2Config struct, MaxEncoderHeaderTableSize int #67813
|
||||
pkg net/http, type HTTP2Config struct, MaxReadFrameSize int #67813
|
||||
pkg net/http, type HTTP2Config struct, MaxReceiveBufferPerConnection int #67813
|
||||
pkg net/http, type HTTP2Config struct, MaxReceiveBufferPerStream int #67813
|
||||
pkg net/http, type HTTP2Config struct, PermitProhibitedCipherSuites bool #67813
|
||||
pkg net/http, type HTTP2Config struct, PingTimeout time.Duration #67813
|
||||
pkg net/http, type HTTP2Config struct, SendPingTimeout time.Duration #67813
|
||||
pkg net/http, type HTTP2Config struct, WriteByteTimeout time.Duration #67813
|
||||
pkg net/http, type Protocols struct #67814
|
||||
pkg net/http, type Server struct, HTTP2 *HTTP2Config #67813
|
||||
pkg net/http, type Server struct, Protocols *Protocols #67814
|
||||
pkg net/http, type Transport struct, HTTP2 *HTTP2Config #67813
|
||||
pkg net/http, type Transport struct, Protocols *Protocols #67814
|
||||
pkg net/netip, method (Addr) AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg net/netip, method (Addr) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg net/netip, method (AddrPort) AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg net/netip, method (AddrPort) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg net/netip, method (Prefix) AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg net/netip, method (Prefix) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg net/url, method (*URL) AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg os, func OpenInRoot(string, string) (*File, error) #67002
|
||||
pkg os, func OpenRoot(string) (*Root, error) #67002
|
||||
pkg os, method (*Root) Close() error #67002
|
||||
pkg os, method (*Root) Create(string) (*File, error) #67002
|
||||
pkg os, method (*Root) FS() fs.FS #67002
|
||||
pkg os, method (*Root) Lstat(string) (fs.FileInfo, error) #67002
|
||||
pkg os, method (*Root) Mkdir(string, fs.FileMode) error #67002
|
||||
pkg os, method (*Root) Name() string #67002
|
||||
pkg os, method (*Root) Open(string) (*File, error) #67002
|
||||
pkg os, method (*Root) OpenFile(string, int, fs.FileMode) (*File, error) #67002
|
||||
pkg os, method (*Root) OpenRoot(string) (*Root, error) #67002
|
||||
pkg os, method (*Root) Remove(string) error #67002
|
||||
pkg os, method (*Root) Stat(string) (fs.FileInfo, error) #67002
|
||||
pkg os, type Root struct #67002
|
||||
pkg regexp, method (*Regexp) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg runtime, func AddCleanup[$0 interface{}, $1 interface{}](*$0, func($1), $1) Cleanup #67535
|
||||
pkg runtime, func GOROOT //deprecated #51473
|
||||
pkg runtime, method (Cleanup) Stop() #67535
|
||||
pkg runtime, type Cleanup struct #67535
|
||||
pkg strings, func FieldsFuncSeq(string, func(int32) bool) iter.Seq[string] #61901
|
||||
pkg strings, func FieldsSeq(string) iter.Seq[string] #61901
|
||||
pkg strings, func Lines(string) iter.Seq[string] #61901
|
||||
pkg strings, func SplitAfterSeq(string, string) iter.Seq[string] #61901
|
||||
pkg strings, func SplitSeq(string, string) iter.Seq[string] #61901
|
||||
pkg testing, method (*B) Chdir(string) #62516
|
||||
pkg testing, method (*B) Context() context.Context #36532
|
||||
pkg testing, method (*B) Loop() bool #61515
|
||||
pkg testing, method (*F) Chdir(string) #62516
|
||||
pkg testing, method (*F) Context() context.Context #36532
|
||||
pkg testing, method (*T) Chdir(string) #62516
|
||||
pkg testing, method (*T) Context() context.Context #36532
|
||||
pkg testing, type TB interface, Chdir(string) #62516
|
||||
pkg testing, type TB interface, Context() context.Context #36532
|
||||
pkg time, method (Time) AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg time, method (Time) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg weak, func Make[$0 interface{}](*$0) Pointer[$0] #67552
|
||||
pkg weak, method (Pointer[$0]) Value() *$0 #67552
|
||||
pkg weak, type Pointer[$0 interface{}] struct #67552
|
||||
@@ -1,2 +1,2 @@
|
||||
branch: release-branch.go1.24
|
||||
branch: dev.inline
|
||||
parent-branch: master
|
||||
|
||||
@@ -1,75 +0,0 @@
|
||||
# Release Notes
|
||||
|
||||
The `initial` and `next` subdirectories of this directory are for release notes.
|
||||
|
||||
## For developers
|
||||
|
||||
Release notes should be added to `next` by editing existing files or creating
|
||||
new files. **Do not add RELNOTE=yes comments in CLs.** Instead, add a file to
|
||||
the CL (or ask the author to do so).
|
||||
|
||||
At the end of the development cycle, the files will be merged by being
|
||||
concatenated in sorted order by pathname. Files in the directory matching the
|
||||
glob "*stdlib/*minor" are treated specially. They should be in subdirectories
|
||||
corresponding to standard library package paths, and headings for those package
|
||||
paths will be generated automatically.
|
||||
|
||||
Files in this repo's `api/next` directory must have corresponding files in
|
||||
`doc/next/*stdlib/*minor`.
|
||||
The files should be in the subdirectory for the package with the new
|
||||
API, and should be named after the issue number of the API proposal.
|
||||
For example, if the directory `6-stdlib/99-minor` is present,
|
||||
then an `api/next` file with the line
|
||||
|
||||
pkg net/http, function F #12345
|
||||
|
||||
should have a corresponding file named `doc/next/6-stdlib/99-minor/net/http/12345.md`.
|
||||
At a minimum, that file should contain either a full sentence or a TODO,
|
||||
ideally referring to a person with the responsibility to complete the note.
|
||||
|
||||
If your CL addresses an accepted proposal, mention the proposal issue number in
|
||||
your release note in the form `/issue/NUMBER`. A link to the issue in the text
|
||||
will have this form (see below). If you don't want to mention the issue in the
|
||||
text, add it as a comment:
|
||||
```
|
||||
<!-- go.dev/issue/12345 -->
|
||||
```
|
||||
If an accepted proposal is mentioned in a CL but not in the release notes, it will be
|
||||
flagged as a TODO by the automated tooling. That is true even for proposals that add API.
|
||||
|
||||
Use the following forms in your markdown:
|
||||
|
||||
[http.Request] # symbol documentation; auto-linked as in Go doc strings
|
||||
[Request] # short form, for symbols in the package being documented
|
||||
[net/http] # package link
|
||||
[#12345](/issue/12345) # GitHub issues
|
||||
[CL 6789](/cl/6789) # Gerrit changelists
|
||||
|
||||
To preview `next` content in merged form using a local instance of the website, run:
|
||||
|
||||
```
|
||||
go run golang.org/x/website/cmd/golangorg@latest -goroot=..
|
||||
```
|
||||
|
||||
Then open http://localhost:6060/doc/next. Refresh the page to see your latest edits.
|
||||
|
||||
## For the release team
|
||||
|
||||
The `relnote` tool, at `golang.org/x/build/cmd/relnote`, operates on the files
|
||||
in `doc/next`.
|
||||
|
||||
As a release cycle nears completion, run `relnote todo` to get a list of
|
||||
unfinished release note work.
|
||||
|
||||
To prepare the release notes for a release, run `relnote generate`.
|
||||
That will merge the `.md` files in `next` into a single file.
|
||||
Atomically (as close to it as possible) add that file to `_content/doc` directory
|
||||
of the website repository and remove the `doc/next` directory in this repository.
|
||||
|
||||
To begin the next release development cycle, populate the contents of `next`
|
||||
with those of `initial`. From the repo root:
|
||||
|
||||
> cd doc
|
||||
> cp -R initial/ next
|
||||
|
||||
Then edit `next/1-intro.md` to refer to the next version.
|
||||
17
doc/asm.html
17
doc/asm.html
@@ -464,23 +464,6 @@ Function is the outermost frame of the call stack. Traceback should stop at this
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<h3 id="special-instructions">Special instructions</h3>
|
||||
|
||||
<p>
|
||||
The <code>PCALIGN</code> pseudo-instruction is used to indicate that the next instruction should be aligned
|
||||
to a specified boundary by padding with no-op instructions.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
It is currently supported on arm64, amd64, ppc64, loong64 and riscv64.
|
||||
|
||||
For example, the start of the <code>MOVD</code> instruction below is aligned to 32 bytes:
|
||||
<pre>
|
||||
PCALIGN $32
|
||||
MOVD $2, R0
|
||||
</pre>
|
||||
</p>
|
||||
|
||||
<h3 id="data-offsets">Interacting with Go types and constants</h3>
|
||||
|
||||
<p>
|
||||
|
||||
@@ -1,17 +1,14 @@
|
||||
<!--{
|
||||
"Title": "The Go Programming Language Specification",
|
||||
"Subtitle": "Language version go1.17 (Oct 15, 2021)",
|
||||
"Subtitle": "Version of Oct 15, 2021",
|
||||
"Path": "/ref/spec"
|
||||
}-->
|
||||
|
||||
<h2 id="Introduction">Introduction</h2>
|
||||
|
||||
<p>
|
||||
This is the reference manual for the Go programming language as it was for
|
||||
language version 1.17, in October 2021, before the introduction of generics.
|
||||
It is provided for historical interest.
|
||||
The current reference manual can be found <a href="/doc/go_spec.html">here</a>.
|
||||
For more information and other documents, see <a href="/">go.dev</a>.
|
||||
This is a reference manual for the Go programming language. For
|
||||
more information and other documents, see <a href="/">golang.org</a>.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
@@ -656,7 +653,7 @@ and are discussed in that section.
|
||||
|
||||
<p>
|
||||
Numeric constants represent exact values of arbitrary precision and do not overflow.
|
||||
Consequently, there are no constants denoting the IEEE 754 negative zero, infinity,
|
||||
Consequently, there are no constants denoting the IEEE-754 negative zero, infinity,
|
||||
and not-a-number values.
|
||||
</p>
|
||||
|
||||
@@ -882,8 +879,8 @@ int16 the set of all signed 16-bit integers (-32768 to 32767)
|
||||
int32 the set of all signed 32-bit integers (-2147483648 to 2147483647)
|
||||
int64 the set of all signed 64-bit integers (-9223372036854775808 to 9223372036854775807)
|
||||
|
||||
float32 the set of all IEEE 754 32-bit floating-point numbers
|
||||
float64 the set of all IEEE 754 64-bit floating-point numbers
|
||||
float32 the set of all IEEE-754 32-bit floating-point numbers
|
||||
float64 the set of all IEEE-754 64-bit floating-point numbers
|
||||
|
||||
complex64 the set of all complex numbers with float32 real and imaginary parts
|
||||
complex128 the set of all complex numbers with float64 real and imaginary parts
|
||||
@@ -917,7 +914,7 @@ are required when different numeric types are mixed in an expression
|
||||
or assignment. For instance, <code>int32</code> and <code>int</code>
|
||||
are not the same type even though they may have the same size on a
|
||||
particular architecture.
|
||||
</p>
|
||||
|
||||
|
||||
<h3 id="String_types">String types</h3>
|
||||
|
||||
@@ -1454,7 +1451,6 @@ maps grow to accommodate the number of items
|
||||
stored in them, with the exception of <code>nil</code> maps.
|
||||
A <code>nil</code> map is equivalent to an empty map except that no elements
|
||||
may be added.
|
||||
</p>
|
||||
|
||||
<h3 id="Channel_types">Channel types</h3>
|
||||
|
||||
@@ -3645,8 +3641,6 @@ As the <code>++</code> and <code>--</code> operators form
|
||||
statements, not expressions, they fall
|
||||
outside the operator hierarchy.
|
||||
As a consequence, statement <code>*p++</code> is the same as <code>(*p)++</code>.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
There are five precedence levels for binary operators.
|
||||
Multiplication operators bind strongest, followed by addition
|
||||
@@ -3814,7 +3808,7 @@ For floating-point and complex numbers,
|
||||
<code>+x</code> is the same as <code>x</code>,
|
||||
while <code>-x</code> is the negation of <code>x</code>.
|
||||
The result of a floating-point or complex division by zero is not specified beyond the
|
||||
IEEE 754 standard; whether a <a href="#Run_time_panics">run-time panic</a>
|
||||
IEEE-754 standard; whether a <a href="#Run_time_panics">run-time panic</a>
|
||||
occurs is implementation-specific.
|
||||
</p>
|
||||
|
||||
@@ -3904,7 +3898,7 @@ These terms and the result of the comparisons are defined as follows:
|
||||
|
||||
<li>
|
||||
Floating-point values are comparable and ordered,
|
||||
as defined by the IEEE 754 standard.
|
||||
as defined by the IEEE-754 standard.
|
||||
</li>
|
||||
|
||||
<li>
|
||||
@@ -4252,7 +4246,7 @@ When converting an integer or floating-point number to a floating-point type,
|
||||
or a complex number to another complex type, the result value is rounded
|
||||
to the precision specified by the destination type.
|
||||
For instance, the value of a variable <code>x</code> of type <code>float32</code>
|
||||
may be stored using additional precision beyond that of an IEEE 754 32-bit number,
|
||||
may be stored using additional precision beyond that of an IEEE-754 32-bit number,
|
||||
but float32(x) represents the result of rounding <code>x</code>'s value to
|
||||
32-bit precision. Similarly, <code>x + 0.1</code> may use more than 32 bits
|
||||
of precision, but <code>float32(x + 0.1)</code> does not.
|
||||
|
||||
1246
doc/go1.21.html
Normal file
1246
doc/go1.21.html
Normal file
File diff suppressed because it is too large
Load Diff
@@ -82,7 +82,7 @@ while still insisting that races are errors and that tools can diagnose and repo
|
||||
<p>
|
||||
The following formal definition of Go's memory model closely follows
|
||||
the approach presented by Hans-J. Boehm and Sarita V. Adve in
|
||||
“<a href="https://dl.acm.org/doi/10.1145/1375581.1375591">Foundations of the C++ Concurrency Memory Model</a>”,
|
||||
“<a href="https://www.hpl.hp.com/techreports/2008/HPL-2008-56.pdf">Foundations of the C++ Concurrency Memory Model</a>”,
|
||||
published in PLDI 2008.
|
||||
The definition of data-race-free programs and the guarantee of sequential consistency
|
||||
for race-free programs are equivalent to the ones in that work.
|
||||
@@ -98,12 +98,12 @@ which in turn are made up of memory operations.
|
||||
A <i>memory operation</i> is modeled by four details:
|
||||
</p>
|
||||
<ul>
|
||||
<li>its kind, indicating whether it is an ordinary data read, an ordinary data write,
|
||||
or a <i>synchronizing operation</i> such as an atomic data access,
|
||||
a mutex operation, or a channel operation,</li>
|
||||
<li>its location in the program,</li>
|
||||
<li>the memory location or variable being accessed, and</li>
|
||||
<li>the values read or written by the operation.</li>
|
||||
<li>its kind, indicating whether it is an ordinary data read, an ordinary data write,
|
||||
or a <i>synchronizing operation</i> such as an atomic data access,
|
||||
a mutex operation, or a channel operation,
|
||||
<li>its location in the program,
|
||||
<li>the memory location or variable being accessed, and
|
||||
<li>the values read or written by the operation.
|
||||
</ul>
|
||||
<p>
|
||||
Some memory operations are <i>read-like</i>, including read, atomic read, mutex lock, and channel receive.
|
||||
@@ -159,11 +159,10 @@ union of the sequenced before and synchronized before relations.
|
||||
For an ordinary (non-synchronizing) data read <i>r</i> on a memory location <i>x</i>,
|
||||
<i>W</i>(<i>r</i>) must be a write <i>w</i> that is <i>visible</i> to <i>r</i>,
|
||||
where visible means that both of the following hold:
|
||||
</p>
|
||||
|
||||
<ol>
|
||||
<li><i>w</i> happens before <i>r</i>.</li>
|
||||
<li><i>w</i> does not happen before any other write <i>w'</i> (to <i>x</i>) that happens before <i>r</i>.</li>
|
||||
<li><i>w</i> happens before <i>r</i>.
|
||||
<li><i>w</i> does not happen before any other write <i>w'</i> (to <i>x</i>) that happens before <i>r</i>.
|
||||
</ol>
|
||||
|
||||
<p>
|
||||
@@ -222,7 +221,7 @@ for programs that do contain races.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Any implementation can, upon detecting a data race,
|
||||
First, any implementation can, upon detecting a data race,
|
||||
report the race and halt execution of the program.
|
||||
Implementations using ThreadSanitizer
|
||||
(accessed with “<code>go</code> <code>build</code> <code>-race</code>”)
|
||||
@@ -230,18 +229,7 @@ do exactly this.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
A read of an array, struct, or complex number
|
||||
may by implemented as a read of each individual sub-value
|
||||
(array element, struct field, or real/imaginary component),
|
||||
in any order.
|
||||
Similarly, a write of an array, struct, or complex number
|
||||
may be implemented as a write of each individual sub-value,
|
||||
in any order.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
A read <i>r</i> of a memory location <i>x</i>
|
||||
holding a value
|
||||
Otherwise, a read <i>r</i> of a memory location <i>x</i>
|
||||
that is not larger than a machine word must observe
|
||||
some write <i>w</i> such that <i>r</i> does not happen before <i>w</i>
|
||||
and there is no write <i>w'</i> such that <i>w</i> happens before <i>w'</i>
|
||||
|
||||
1520
doc/go_spec.html
1520
doc/go_spec.html
File diff suppressed because it is too large
Load Diff
248
doc/godebug.md
248
doc/godebug.md
@@ -34,7 +34,6 @@ For example, if a Go program is running in an environment that contains
|
||||
|
||||
then that Go program will disable the use of HTTP/2 by default in both
|
||||
the HTTP client and the HTTP server.
|
||||
Unrecognized settings in the `GODEBUG` environment variable are ignored.
|
||||
It is also possible to set the default `GODEBUG` for a given program
|
||||
(discussed below).
|
||||
|
||||
@@ -89,38 +88,14 @@ Because this method of setting GODEBUG defaults was introduced only in Go 1.21,
|
||||
programs listing versions of Go earlier than Go 1.20 are configured to match Go 1.20,
|
||||
not the older version.
|
||||
|
||||
To override these defaults, starting in Go 1.23, the work module's `go.mod`
|
||||
or the workspace's `go.work` can list one or more `godebug` lines:
|
||||
|
||||
godebug (
|
||||
default=go1.21
|
||||
panicnil=1
|
||||
asynctimerchan=0
|
||||
)
|
||||
|
||||
The special key `default` indicates a Go version to take unspecified
|
||||
settings from. This allows setting the GODEBUG defaults separately
|
||||
from the Go language version in the module.
|
||||
In this example, the program is asking for Go 1.21 semantics and
|
||||
then asking for the old pre-Go 1.21 `panic(nil)` behavior and the
|
||||
new Go 1.23 `asynctimerchan=0` behavior.
|
||||
|
||||
Only the work module's `go.mod` is consulted for `godebug` directives.
|
||||
Any directives in required dependency modules are ignored.
|
||||
It is an error to list a `godebug` with an unrecognized setting.
|
||||
(Toolchains older than Go 1.23 reject all `godebug` lines, since they do not
|
||||
understand `godebug` at all.)
|
||||
|
||||
The defaults from the `go` and `godebug` lines apply to all main
|
||||
packages that are built. For more fine-grained control,
|
||||
starting in Go 1.21, a main package's source files
|
||||
To override these defaults, a main package's source files
|
||||
can include one or more `//go:debug` directives at the top of the file
|
||||
(preceding the `package` statement).
|
||||
The `godebug` lines in the previous example would be written:
|
||||
Continuing the `panicnil` example, if the module or workspace is updated
|
||||
to say `go` `1.21`, the program can opt back into the old `panic(nil)`
|
||||
behavior by including this directive:
|
||||
|
||||
//go:debug default=go1.21
|
||||
//go:debug panicnil=1
|
||||
//go:debug asynctimerchan=0
|
||||
|
||||
Starting in Go 1.21, the Go toolchain treats a `//go:debug` directive
|
||||
with an unrecognized GODEBUG setting as an invalid program.
|
||||
@@ -151,206 +126,6 @@ for example,
|
||||
see the [runtime documentation](/pkg/runtime#hdr-Environment_Variables)
|
||||
and the [go command documentation](/cmd/go#hdr-Build_and_test_caching).
|
||||
|
||||
### Go 1.24
|
||||
|
||||
Go 1.24 added a new `fips140` setting that controls whether the Go
|
||||
Cryptographic Module operates in FIPS 140-3 mode.
|
||||
The possible values are:
|
||||
- "off": no special support for FIPS 140-3 mode. This is the default.
|
||||
- "on": the Go Cryptographic Module operates in FIPS 140-3 mode.
|
||||
- "only": like "on", but cryptographic algorithms not approved by
|
||||
FIPS 140-3 return an error or panic.
|
||||
For more information, see [FIPS 140-3 Compliance](/doc/security/fips140).
|
||||
This setting is fixed at program startup time, and can't be modified
|
||||
by changing the `GODEBUG` environment variable after the program starts.
|
||||
|
||||
Go 1.24 changed the global [`math/rand.Seed`](/pkg/math/rand/#Seed) to be a
|
||||
no-op. This behavior is controlled by the `randseednop` setting.
|
||||
For Go 1.24 it defaults to `randseednop=1`.
|
||||
Using `randseednop=0` reverts to the pre-Go 1.24 behavior.
|
||||
|
||||
Go 1.24 added new values for the `multipathtcp` setting.
|
||||
The possible values for `multipathtcp` are now:
|
||||
- "0": disable MPTCP on dialers and listeners by default
|
||||
- "1": enable MPTCP on dialers and listeners by default
|
||||
- "2": enable MPTCP on listeners only by default
|
||||
- "3": enable MPTCP on dialers only by default
|
||||
|
||||
For Go 1.24, it now defaults to multipathtcp="2", thus
|
||||
enabled by default on listeners. Using multipathtcp="0" reverts to the
|
||||
pre-Go 1.24 behavior.
|
||||
|
||||
Go 1.24 changed the behavior of `go test -json` to emit build errors as JSON
|
||||
instead of text.
|
||||
These new JSON events are distinguished by new `Action` values,
|
||||
but can still cause problems with CI systems that aren't robust to these events.
|
||||
This behavior can be controlled with the `gotestjsonbuildtext` setting.
|
||||
Using `gotestjsonbuildtext=1` restores the 1.23 behavior.
|
||||
This setting will be removed in a future release, Go 1.28 at the earliest.
|
||||
|
||||
Go 1.24 changed [`crypto/rsa`](/pkg/crypto/rsa) to require RSA keys to be at
|
||||
least 1024 bits. This behavior can be controlled with the `rsa1024min` setting.
|
||||
Using `rsa1024min=0` restores the Go 1.23 behavior.
|
||||
|
||||
Go 1.24 introduced a mechanism for enabling platform specific Data Independent
|
||||
Timing (DIT) modes in the [`crypto/subtle`](/pkg/crypto/subtle) package. This
|
||||
mode can be enabled for an entire program with the `dataindependenttiming` setting.
|
||||
For Go 1.24 it defaults to `dataindependenttiming=0`. There is no change in default
|
||||
behavior from Go 1.23 when `dataindependenttiming` is unset.
|
||||
Using `dataindependenttiming=1` enables the DIT mode for the entire Go program.
|
||||
When enabled, DIT will be enabled when calling into C from Go. When enabled,
|
||||
calling into Go code from C will enable DIT, and disable it before returning to
|
||||
C if it was not enabled when Go code was entered.
|
||||
This currently only affects arm64 programs. For all other platforms it is a no-op.
|
||||
|
||||
Go 1.24 removed the `x509sha1` setting. `crypto/x509` no longer supports verifying
|
||||
signatures on certificates that use SHA-1 based signature algorithms.
|
||||
|
||||
Go 1.24 changes the default value of the [`x509usepolicies`
|
||||
setting.](/pkg/crypto/x509/#CreateCertificate) from `0` to `1`. When marshalling
|
||||
certificates, policies are now taken from the
|
||||
[`Certificate.Policies`](/pkg/crypto/x509/#Certificate.Policies) field rather
|
||||
than the
|
||||
[`Certificate.PolicyIdentifiers`](/pkg/crypto/x509/#Certificate.PolicyIdentifiers)
|
||||
field by default.
|
||||
|
||||
Go 1.24 enabled the post-quantum key exchange mechanism
|
||||
X25519MLKEM768 by default. The default can be reverted using the
|
||||
[`tlsmlkem` setting](/pkg/crypto/tls/#Config.CurvePreferences).
|
||||
Go 1.24 also removed X25519Kyber768Draft00 and the Go 1.23 `tlskyber` setting.
|
||||
|
||||
Go 1.24 made [`ParsePKCS1PrivateKey`](/pkg/crypto/x509/#ParsePKCS1PrivateKey)
|
||||
use and validate the CRT parameters in the encoded private key. This behavior
|
||||
can be controlled with the `x509rsacrt` setting. Using `x509rsacrt=0` restores
|
||||
the Go 1.23 behavior.
|
||||
|
||||
### Go 1.23
|
||||
|
||||
Go 1.23 changed the channels created by package time to be unbuffered
|
||||
(synchronous), which makes correct use of the [`Timer.Stop`](/pkg/time/#Timer.Stop)
|
||||
and [`Timer.Reset`](/pkg/time/#Timer.Reset) method results much easier.
|
||||
The [`asynctimerchan` setting](/pkg/time/#NewTimer) disables this change.
|
||||
There are no runtime metrics for this change,
|
||||
This setting may be removed in a future release, Go 1.27 at the earliest.
|
||||
|
||||
Go 1.23 changed the mode bits reported by [`os.Lstat`](/pkg/os#Lstat) and [`os.Stat`](/pkg/os#Stat)
|
||||
for reparse points, which can be controlled with the `winsymlink` setting.
|
||||
As of Go 1.23 (`winsymlink=1`), mount points no longer have [`os.ModeSymlink`](/pkg/os#ModeSymlink)
|
||||
set, and reparse points that are not symlinks, Unix sockets, or dedup files now
|
||||
always have [`os.ModeIrregular`](/pkg/os#ModeIrregular) set. As a result of these changes,
|
||||
[`filepath.EvalSymlinks`](/pkg/path/filepath#EvalSymlinks) no longer evaluates
|
||||
mount points, which was a source of many inconsistencies and bugs.
|
||||
At previous versions (`winsymlink=0`), mount points are treated as symlinks,
|
||||
and other reparse points with non-default [`os.ModeType`](/pkg/os#ModeType) bits
|
||||
(such as [`os.ModeDir`](/pkg/os#ModeDir)) do not have the `ModeIrregular` bit set.
|
||||
|
||||
Go 1.23 changed [`os.Readlink`](/pkg/os#Readlink) and [`filepath.EvalSymlinks`](/pkg/path/filepath#EvalSymlinks)
|
||||
to avoid trying to normalize volumes to drive letters, which was not always even possible.
|
||||
This behavior is controlled by the `winreadlinkvolume` setting.
|
||||
For Go 1.23, it defaults to `winreadlinkvolume=1`.
|
||||
Previous versions default to `winreadlinkvolume=0`.
|
||||
|
||||
Go 1.23 enabled the experimental post-quantum key exchange mechanism
|
||||
X25519Kyber768Draft00 by default. The default can be reverted using the
|
||||
[`tlskyber` setting](/pkg/crypto/tls/#Config.CurvePreferences).
|
||||
|
||||
Go 1.23 changed the behavior of
|
||||
[crypto/x509.ParseCertificate](/pkg/crypto/x509/#ParseCertificate) to reject
|
||||
serial numbers that are negative. This change can be reverted with
|
||||
the [`x509negativeserial` setting](/pkg/crypto/x509/#ParseCertificate).
|
||||
|
||||
Go 1.23 re-enabled support in html/template for ECMAScript 6 template literals by default.
|
||||
The [`jstmpllitinterp` setting](/pkg/html/template#hdr-Security_Model) no longer has
|
||||
any effect.
|
||||
|
||||
Go 1.23 changed the default TLS cipher suites used by clients and servers when
|
||||
not explicitly configured, removing 3DES cipher suites. The default can be reverted
|
||||
using the [`tls3des` setting](/pkg/crypto/tls/#Config.CipherSuites).
|
||||
|
||||
Go 1.23 changed the behavior of [`tls.X509KeyPair`](/pkg/crypto/tls#X509KeyPair)
|
||||
and [`tls.LoadX509KeyPair`](/pkg/crypto/tls#LoadX509KeyPair) to populate the
|
||||
Leaf field of the returned [`tls.Certificate`](/pkg/crypto/tls#Certificate).
|
||||
This behavior is controlled by the `x509keypairleaf` setting. For Go 1.23, it
|
||||
defaults to `x509keypairleaf=1`. Previous versions default to
|
||||
`x509keypairleaf=0`.
|
||||
|
||||
Go 1.23 changed
|
||||
[`net/http.ServeContent`](/pkg/net/http#ServeContent),
|
||||
[`net/http.ServeFile`](/pkg/net/http#ServeFile), and
|
||||
[`net/http.ServeFS`](/pkg/net/http#ServeFS) to
|
||||
remove Cache-Control, Content-Encoding, Etag, and Last-Modified headers
|
||||
when serving an error. This behavior is controlled by
|
||||
the [`httpservecontentkeepheaders` setting](/pkg/net/http#ServeContent).
|
||||
Using `httpservecontentkeepheaders=1` restores the pre-Go 1.23 behavior.
|
||||
|
||||
### Go 1.22
|
||||
|
||||
Go 1.22 adds a configurable limit to control the maximum acceptable RSA key size
|
||||
that can be used in TLS handshakes, controlled by the [`tlsmaxrsasize` setting](/pkg/crypto/tls#Conn.Handshake).
|
||||
The default is tlsmaxrsasize=8192, limiting RSA to 8192-bit keys. To avoid
|
||||
denial of service attacks, this setting and default was backported to Go
|
||||
1.19.13, Go 1.20.8, and Go 1.21.1.
|
||||
|
||||
Go 1.22 made it an error for a request or response read by a net/http
|
||||
client or server to have an empty Content-Length header.
|
||||
This behavior is controlled by the `httplaxcontentlength` setting.
|
||||
|
||||
Go 1.22 changed the behavior of ServeMux to accept extended
|
||||
patterns and unescape both patterns and request paths by segment.
|
||||
This behavior can be controlled by the
|
||||
[`httpmuxgo121` setting](/pkg/net/http/#ServeMux).
|
||||
|
||||
Go 1.22 added the [Alias type](/pkg/go/types#Alias) to [go/types](/pkg/go/types)
|
||||
for the explicit representation of [type aliases](/ref/spec#Type_declarations).
|
||||
Whether the type checker produces `Alias` types or not is controlled by the
|
||||
[`gotypesalias` setting](/pkg/go/types#Alias).
|
||||
For Go 1.22 it defaults to `gotypesalias=0`.
|
||||
For Go 1.23, `gotypesalias=1` will become the default.
|
||||
This setting will be removed in a future release, Go 1.27 at the earliest.
|
||||
|
||||
Go 1.22 changed the default minimum TLS version supported by both servers
|
||||
and clients to TLS 1.2. The default can be reverted to TLS 1.0 using the
|
||||
[`tls10server` setting](/pkg/crypto/tls/#Config).
|
||||
|
||||
Go 1.22 changed the default TLS cipher suites used by clients and servers when
|
||||
not explicitly configured, removing the cipher suites which used RSA based key
|
||||
exchange. The default can be reverted using the [`tlsrsakex` setting](/pkg/crypto/tls/#Config).
|
||||
|
||||
Go 1.22 disabled
|
||||
[`ConnectionState.ExportKeyingMaterial`](/pkg/crypto/tls/#ConnectionState.ExportKeyingMaterial)
|
||||
when the connection supports neither TLS 1.3 nor Extended Master Secret
|
||||
(implemented in Go 1.21). It can be reenabled with the [`tlsunsafeekm`
|
||||
setting](/pkg/crypto/tls/#ConnectionState.ExportKeyingMaterial).
|
||||
|
||||
Go 1.22 changed how the runtime interacts with transparent huge pages on Linux.
|
||||
In particular, a common default Linux kernel configuration can result in
|
||||
significant memory overheads, and Go 1.22 no longer works around this default.
|
||||
To work around this issue without adjusting kernel settings, transparent huge
|
||||
pages can be disabled for Go memory with the
|
||||
[`disablethp` setting](/pkg/runtime#hdr-Environment_Variable).
|
||||
This behavior was backported to Go 1.21.1, but the setting is only available
|
||||
starting with Go 1.21.6.
|
||||
This setting may be removed in a future release, and users impacted by this issue
|
||||
should adjust their Linux configuration according to the recommendations in the
|
||||
[GC guide](/doc/gc-guide#Linux_transparent_huge_pages), or switch to a Linux
|
||||
distribution that disables transparent huge pages altogether.
|
||||
|
||||
Go 1.22 added contention on runtime-internal locks to the [`mutex`
|
||||
profile](/pkg/runtime/pprof#Profile). Contention on these locks is always
|
||||
reported at `runtime._LostContendedRuntimeLock`. Complete stack traces of
|
||||
runtime locks can be enabled with the [`runtimecontentionstacks`
|
||||
setting](/pkg/runtime#hdr-Environment_Variable). These stack traces have
|
||||
non-standard semantics, see setting documentation for details.
|
||||
|
||||
Go 1.22 added a new [`crypto/x509.Certificate`](/pkg/crypto/x509/#Certificate)
|
||||
field, [`Policies`](/pkg/crypto/x509/#Certificate.Policies), which supports
|
||||
certificate policy OIDs with components larger than 31 bits. By default this
|
||||
field is only used during parsing, when it is populated with policy OIDs, but
|
||||
not used during marshaling. It can be used to marshal these larger OIDs, instead
|
||||
of the existing PolicyIdentifiers field, by using the
|
||||
[`x509usepolicies` setting.](/pkg/crypto/x509/#CreateCertificate).
|
||||
|
||||
|
||||
### Go 1.21
|
||||
|
||||
Go 1.21 made it a run-time error to call `panic` with a nil interface value,
|
||||
@@ -367,10 +142,6 @@ forms, controlled by the
|
||||
respectively.
|
||||
This behavior was backported to Go 1.19.8+ and Go 1.20.3+.
|
||||
|
||||
Go 1.21 adds the support of Multipath TCP but it is only used if the application
|
||||
explicitly asked for it. This behavior can be controlled by the
|
||||
[`multipathtcp` setting](/pkg/net#Dialer.SetMultipathTCP).
|
||||
|
||||
There is no plan to remove any of these settings.
|
||||
|
||||
### Go 1.20
|
||||
@@ -405,18 +176,11 @@ Go 1.19 made it an error for path lookups to resolve to binaries in the current
|
||||
controlled by the [`execerrdot` setting](/pkg/os/exec#hdr-Executables_in_the_current_directory).
|
||||
There is no plan to remove this setting.
|
||||
|
||||
Go 1.19 started sending EDNS0 additional headers on DNS requests.
|
||||
This can reportedly break the DNS server provided on some routers,
|
||||
such as CenturyLink Zyxel C3000Z.
|
||||
This can be changed by the [`netedns0` setting](/pkg/net#hdr-Name_Resolution).
|
||||
This setting is available in Go 1.21.12, Go 1.22.5, Go 1.23, and later.
|
||||
There is no plan to remove this setting.
|
||||
|
||||
### Go 1.18
|
||||
|
||||
Go 1.18 removed support for SHA1 in most X.509 certificates,
|
||||
controlled by the [`x509sha1` setting](/pkg/crypto/x509#InsecureAlgorithmError).
|
||||
This setting was removed in Go 1.24.
|
||||
controlled by the [`x509sha1` setting](/crypto/x509#InsecureAlgorithmError).
|
||||
This setting will be removed in a future release, Go 1.22 at the earliest.
|
||||
|
||||
### Go 1.10
|
||||
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
<style>
|
||||
main ul li { margin: 0.5em 0; }
|
||||
</style>
|
||||
|
||||
## DRAFT RELEASE NOTES — Introduction to Go 1.N {#introduction}
|
||||
|
||||
**Go 1.N is not yet released. These are work-in-progress release notes.
|
||||
Go 1.N is expected to be released in {Month} {Year}.**
|
||||
@@ -1,3 +0,0 @@
|
||||
## Changes to the language {#language}
|
||||
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
## Tools {#tools}
|
||||
|
||||
### Go command {#go-command}
|
||||
|
||||
### Cgo {#cgo}
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
## Runtime {#runtime}
|
||||
@@ -1,7 +0,0 @@
|
||||
## Compiler {#compiler}
|
||||
|
||||
## Assembler {#assembler}
|
||||
|
||||
## Linker {#linker}
|
||||
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
## Standard library {#library}
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
### Minor changes to the library {#minor_library_changes}
|
||||
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
API changes and other small changes to the standard library go here.
|
||||
@@ -1,2 +0,0 @@
|
||||
## Ports {#ports}
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
# Copyright 2024 The Go Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file.
|
||||
|
||||
# Rules for building and testing new FIPS snapshots.
|
||||
# For example:
|
||||
#
|
||||
# make v1.2.3.zip
|
||||
# make v1.2.3.test
|
||||
#
|
||||
# and then if changes are needed, check them into master
|
||||
# and run 'make v1.2.3.rm' and repeat.
|
||||
#
|
||||
# Note that once published a snapshot zip file should never
|
||||
# be modified. We record the sha256 hashes of the zip files
|
||||
# in fips140.sum, and the cmd/go/internal/fips140 test checks
|
||||
# that the zips match.
|
||||
#
|
||||
# When the zip file is finalized, run 'make updatesum' to update
|
||||
# fips140.sum.
|
||||
|
||||
default:
|
||||
@echo nothing to make
|
||||
|
||||
# make v1.2.3.zip builds a v1.2.3.zip file
|
||||
# from the current origin/master.
|
||||
# copy and edit the 'go run' command by hand to use a different branch.
|
||||
v%.zip:
|
||||
git fetch origin master
|
||||
go run ../../src/cmd/go/internal/fips140/mkzip.go v$*
|
||||
|
||||
# normally mkzip refuses to overwrite an existing zip file.
|
||||
# make v1.2.3.rm removes the zip file and and unpacked
|
||||
# copy from the module cache.
|
||||
v%.rm:
|
||||
rm -f v$*.zip
|
||||
chmod -R u+w $$(go env GOMODCACHE)/golang.org/fips140@v$* 2>/dev/null || true
|
||||
rm -rf $$(go env GOMODCACHE)/golang.org/fips140@v$*
|
||||
|
||||
# make v1.2.3.test runs the crypto tests using that snapshot.
|
||||
v%.test:
|
||||
GOFIPS140=v$* go test -short crypto...
|
||||
|
||||
# make updatesum updates the fips140.sum file.
|
||||
updatesum:
|
||||
go test cmd/go/internal/fips140 -update
|
||||
@@ -1,9 +0,0 @@
|
||||
This directory holds snapshots of the crypto/internal/fips140 tree
|
||||
that are being validated and certified for FIPS-140 use.
|
||||
The file x.txt (for example, inprocess.txt, certified.txt)
|
||||
defines the meaning of the FIPS version alias x, listing
|
||||
the exact version to use.
|
||||
|
||||
The zip files are created by cmd/go/internal/fips140/mkzip.go.
|
||||
The fips140.sum file lists checksums for the zip files.
|
||||
See the Makefile for recipes.
|
||||
@@ -1,12 +0,0 @@
|
||||
# SHA256 checksums of snapshot zip files in this directory.
|
||||
# These checksums are included in the FIPS security policy
|
||||
# (validation instructions sent to the lab) and MUST NOT CHANGE.
|
||||
# That is, the zip files themselves must not change.
|
||||
#
|
||||
# It is okay to add new zip files to the list, and it is okay to
|
||||
# remove zip files from the list when they are removed from
|
||||
# this directory. To update this file:
|
||||
#
|
||||
# go test cmd/go/internal/fips140 -update
|
||||
#
|
||||
v1.0.0.zip b50508feaeff05d22516b21e1fd210bbf5d6a1e422eaf2cfa23fe379342713b8
|
||||
Binary file not shown.
@@ -31,7 +31,7 @@ import (
|
||||
)
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, "usage: go run mkzip.go zoneinfo.zip\n")
|
||||
fmt.Fprintf(os.Stderr, "usage: go run mkzip.go ../../zoneinfo.zip\n")
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
|
||||
@@ -24,8 +24,8 @@
|
||||
# in the CL match the update.bash in the CL.
|
||||
|
||||
# Versions to use.
|
||||
CODE=2025a
|
||||
DATA=2025a
|
||||
CODE=2023c
|
||||
DATA=2023c
|
||||
|
||||
set -e
|
||||
|
||||
|
||||
Binary file not shown.
@@ -3,4 +3,4 @@
|
||||
// tests and tools.
|
||||
module misc
|
||||
|
||||
go 1.22
|
||||
go 1.21
|
||||
|
||||
@@ -204,7 +204,6 @@ func runMain() (int, error) {
|
||||
`; export GOPROXY=` + os.Getenv("GOPROXY") +
|
||||
`; export GOCACHE="` + deviceRoot + `/gocache"` +
|
||||
`; export PATH="` + deviceGoroot + `/bin":$PATH` +
|
||||
`; export HOME="` + deviceRoot + `/home"` +
|
||||
`; cd "` + deviceCwd + `"` +
|
||||
"; '" + deviceBin + "' " + strings.Join(os.Args[2:], " ")
|
||||
code, err := adbRun(cmd)
|
||||
|
||||
@@ -1,22 +1,19 @@
|
||||
#!/bin/sh
|
||||
|
||||
# This script configures clang to target the iOS simulator. If you'd like to
|
||||
# build for real iOS devices, change SDK to "iphoneos" and PLATFORM to "ios".
|
||||
# This uses the latest available iOS SDK, which is recommended. To select a
|
||||
# specific SDK, run 'xcodebuild -showsdks' to see the available SDKs and replace
|
||||
# iphonesimulator with one of them.
|
||||
|
||||
SDK=iphonesimulator
|
||||
PLATFORM=ios-simulator
|
||||
|
||||
# This uses the latest available iOS SDK, which is recommended.
|
||||
# To select a specific SDK, run 'xcodebuild -showsdks'
|
||||
# to see the available SDKs and replace iphoneos with one of them.
|
||||
if [ "$GOARCH" == "arm64" ]; then
|
||||
SDK=iphoneos
|
||||
PLATFORM=ios
|
||||
CLANGARCH="arm64"
|
||||
else
|
||||
SDK=iphonesimulator
|
||||
PLATFORM=ios-simulator
|
||||
CLANGARCH="x86_64"
|
||||
fi
|
||||
|
||||
SDK_PATH=`xcrun --sdk $SDK --show-sdk-path`
|
||||
|
||||
export IPHONEOS_DEPLOYMENT_TARGET=5.1
|
||||
# cmd/cgo doesn't support llvm-gcc-4.2, so we have to use clang.
|
||||
CLANG=`xcrun --sdk $SDK --find clang`
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build ignore
|
||||
// +build ignore
|
||||
|
||||
// detect attempts to autodetect the correct
|
||||
// values of the environment variables
|
||||
|
||||
@@ -1,21 +1,44 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This program can be used as go_ios_$GOARCH_exec by the Go tool. It executes
|
||||
// binaries on the iOS Simulator using the XCode toolchain.
|
||||
// This program can be used as go_ios_$GOARCH_exec by the Go tool.
|
||||
// It executes binaries on an iOS device using the XCode toolchain
|
||||
// and the ios-deploy program: https://github.com/phonegap/ios-deploy
|
||||
//
|
||||
// This script supports an extra flag, -lldb, that pauses execution
|
||||
// just before the main program begins and allows the user to control
|
||||
// the remote lldb session. This flag is appended to the end of the
|
||||
// script's arguments and is not passed through to the underlying
|
||||
// binary.
|
||||
//
|
||||
// This script requires that three environment variables be set:
|
||||
//
|
||||
// GOIOS_DEV_ID: The codesigning developer id or certificate identifier
|
||||
// GOIOS_APP_ID: The provisioning app id prefix. Must support wildcard app ids.
|
||||
// GOIOS_TEAM_ID: The team id that owns the app id prefix.
|
||||
//
|
||||
// $GOROOT/misc/ios contains a script, detect.go, that attempts to autodetect these.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
const debug = false
|
||||
@@ -87,8 +110,18 @@ func runMain() (int, error) {
|
||||
return 1, err
|
||||
}
|
||||
|
||||
err = runOnSimulator(appdir)
|
||||
if goarch := os.Getenv("GOARCH"); goarch == "arm64" {
|
||||
err = runOnDevice(appdir)
|
||||
} else {
|
||||
err = runOnSimulator(appdir)
|
||||
}
|
||||
if err != nil {
|
||||
// If the lldb driver completed with an exit code, use that.
|
||||
if err, ok := err.(*exec.ExitError); ok {
|
||||
if ws, ok := err.Sys().(interface{ ExitStatus() int }); ok {
|
||||
return ws.ExitStatus(), nil
|
||||
}
|
||||
}
|
||||
return 1, err
|
||||
}
|
||||
return 0, nil
|
||||
@@ -102,6 +135,61 @@ func runOnSimulator(appdir string) error {
|
||||
return runSimulator(appdir, bundleID, os.Args[2:])
|
||||
}
|
||||
|
||||
func runOnDevice(appdir string) error {
|
||||
// e.g. B393DDEB490947F5A463FD074299B6C0AXXXXXXX
|
||||
devID = getenv("GOIOS_DEV_ID")
|
||||
|
||||
// e.g. Z8B3JBXXXX.org.golang.sample, Z8B3JBXXXX prefix is available at
|
||||
// https://developer.apple.com/membercenter/index.action#accountSummary as Team ID.
|
||||
appID = getenv("GOIOS_APP_ID")
|
||||
|
||||
// e.g. Z8B3JBXXXX, available at
|
||||
// https://developer.apple.com/membercenter/index.action#accountSummary as Team ID.
|
||||
teamID = getenv("GOIOS_TEAM_ID")
|
||||
|
||||
// Device IDs as listed with ios-deploy -c.
|
||||
deviceID = os.Getenv("GOIOS_DEVICE_ID")
|
||||
|
||||
if _, id, ok := strings.Cut(appID, "."); ok {
|
||||
bundleID = id
|
||||
}
|
||||
|
||||
if err := signApp(appdir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := uninstallDevice(bundleID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := installDevice(appdir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := mountDevImage(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Kill any hanging debug bridges that might take up port 3222.
|
||||
exec.Command("killall", "idevicedebugserverproxy").Run()
|
||||
|
||||
closer, err := startDebugBridge()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
return runDevice(appdir, bundleID, os.Args[2:])
|
||||
}
|
||||
|
||||
func getenv(envvar string) string {
|
||||
s := os.Getenv(envvar)
|
||||
if s == "" {
|
||||
log.Fatalf("%s not set\nrun $GOROOT/misc/ios/detect.go to attempt to autodetect", envvar)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func assembleApp(appdir, bin string) error {
|
||||
if err := os.MkdirAll(appdir, 0755); err != nil {
|
||||
return err
|
||||
@@ -129,6 +217,236 @@ func assembleApp(appdir, bin string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func signApp(appdir string) error {
|
||||
entitlementsPath := filepath.Join(tmpdir, "Entitlements.plist")
|
||||
cmd := exec.Command(
|
||||
"codesign",
|
||||
"-f",
|
||||
"-s", devID,
|
||||
"--entitlements", entitlementsPath,
|
||||
appdir,
|
||||
)
|
||||
if debug {
|
||||
log.Println(strings.Join(cmd.Args, " "))
|
||||
}
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("codesign: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// mountDevImage ensures a developer image is mounted on the device.
|
||||
// The image contains the device lldb server for idevicedebugserverproxy
|
||||
// to connect to.
|
||||
func mountDevImage() error {
|
||||
// Check for existing mount.
|
||||
cmd := idevCmd(exec.Command("ideviceimagemounter", "-l", "-x"))
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
os.Stderr.Write(out)
|
||||
return fmt.Errorf("ideviceimagemounter: %v", err)
|
||||
}
|
||||
var info struct {
|
||||
Dict struct {
|
||||
Data []byte `xml:",innerxml"`
|
||||
} `xml:"dict"`
|
||||
}
|
||||
if err := xml.Unmarshal(out, &info); err != nil {
|
||||
return fmt.Errorf("mountDevImage: failed to decode mount information: %v", err)
|
||||
}
|
||||
dict, err := parsePlistDict(info.Dict.Data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mountDevImage: failed to parse mount information: %v", err)
|
||||
}
|
||||
if dict["ImagePresent"] == "true" && dict["Status"] == "Complete" {
|
||||
return nil
|
||||
}
|
||||
// Some devices only give us an ImageSignature key.
|
||||
if _, exists := dict["ImageSignature"]; exists {
|
||||
return nil
|
||||
}
|
||||
// No image is mounted. Find a suitable image.
|
||||
imgPath, err := findDevImage()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sigPath := imgPath + ".signature"
|
||||
cmd = idevCmd(exec.Command("ideviceimagemounter", imgPath, sigPath))
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
os.Stderr.Write(out)
|
||||
return fmt.Errorf("ideviceimagemounter: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// findDevImage use the device iOS version and build to locate a suitable
|
||||
// developer image.
|
||||
func findDevImage() (string, error) {
|
||||
cmd := idevCmd(exec.Command("ideviceinfo"))
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("ideviceinfo: %v", err)
|
||||
}
|
||||
var iosVer, buildVer string
|
||||
lines := bytes.Split(out, []byte("\n"))
|
||||
for _, line := range lines {
|
||||
key, val, ok := strings.Cut(string(line), ": ")
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
switch key {
|
||||
case "ProductVersion":
|
||||
iosVer = val
|
||||
case "BuildVersion":
|
||||
buildVer = val
|
||||
}
|
||||
}
|
||||
if iosVer == "" || buildVer == "" {
|
||||
return "", errors.New("failed to parse ideviceinfo output")
|
||||
}
|
||||
verSplit := strings.Split(iosVer, ".")
|
||||
if len(verSplit) > 2 {
|
||||
// Developer images are specific to major.minor ios version.
|
||||
// Cut off the patch version.
|
||||
iosVer = strings.Join(verSplit[:2], ".")
|
||||
}
|
||||
sdkBase := "/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/DeviceSupport"
|
||||
patterns := []string{fmt.Sprintf("%s (%s)", iosVer, buildVer), fmt.Sprintf("%s (*)", iosVer), fmt.Sprintf("%s*", iosVer)}
|
||||
for _, pattern := range patterns {
|
||||
matches, err := filepath.Glob(filepath.Join(sdkBase, pattern, "DeveloperDiskImage.dmg"))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("findDevImage: %v", err)
|
||||
}
|
||||
if len(matches) > 0 {
|
||||
return matches[0], nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("failed to find matching developer image for iOS version %s build %s", iosVer, buildVer)
|
||||
}
|
||||
|
||||
// startDebugBridge ensures that the idevicedebugserverproxy runs on
|
||||
// port 3222.
|
||||
func startDebugBridge() (func(), error) {
|
||||
errChan := make(chan error, 1)
|
||||
cmd := idevCmd(exec.Command("idevicedebugserverproxy", "3222"))
|
||||
var stderr bytes.Buffer
|
||||
cmd.Stderr = &stderr
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, fmt.Errorf("idevicedebugserverproxy: %v", err)
|
||||
}
|
||||
go func() {
|
||||
if err := cmd.Wait(); err != nil {
|
||||
if _, ok := err.(*exec.ExitError); ok {
|
||||
errChan <- fmt.Errorf("idevicedebugserverproxy: %s", stderr.Bytes())
|
||||
} else {
|
||||
errChan <- fmt.Errorf("idevicedebugserverproxy: %v", err)
|
||||
}
|
||||
}
|
||||
errChan <- nil
|
||||
}()
|
||||
closer := func() {
|
||||
cmd.Process.Kill()
|
||||
<-errChan
|
||||
}
|
||||
// Dial localhost:3222 to ensure the proxy is ready.
|
||||
delay := time.Second / 4
|
||||
for attempt := 0; attempt < 5; attempt++ {
|
||||
conn, err := net.DialTimeout("tcp", "localhost:3222", 5*time.Second)
|
||||
if err == nil {
|
||||
conn.Close()
|
||||
return closer, nil
|
||||
}
|
||||
select {
|
||||
case <-time.After(delay):
|
||||
delay *= 2
|
||||
case err := <-errChan:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
closer()
|
||||
return nil, errors.New("failed to set up idevicedebugserverproxy")
|
||||
}
|
||||
|
||||
// findDeviceAppPath returns the device path to the app with the
|
||||
// given bundle ID. It parses the output of ideviceinstaller -l -o xml,
|
||||
// looking for the bundle ID and the corresponding Path value.
|
||||
func findDeviceAppPath(bundleID string) (string, error) {
|
||||
cmd := idevCmd(exec.Command("ideviceinstaller", "-l", "-o", "xml"))
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
os.Stderr.Write(out)
|
||||
return "", fmt.Errorf("ideviceinstaller: -l -o xml %v", err)
|
||||
}
|
||||
var list struct {
|
||||
Apps []struct {
|
||||
Data []byte `xml:",innerxml"`
|
||||
} `xml:"array>dict"`
|
||||
}
|
||||
if err := xml.Unmarshal(out, &list); err != nil {
|
||||
return "", fmt.Errorf("failed to parse ideviceinstaller output: %v", err)
|
||||
}
|
||||
for _, app := range list.Apps {
|
||||
values, err := parsePlistDict(app.Data)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("findDeviceAppPath: failed to parse app dict: %v", err)
|
||||
}
|
||||
if values["CFBundleIdentifier"] == bundleID {
|
||||
if path, ok := values["Path"]; ok {
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("failed to find device path for bundle: %s", bundleID)
|
||||
}
|
||||
|
||||
// Parse an xml encoded plist. Plist values are mapped to string.
|
||||
func parsePlistDict(dict []byte) (map[string]string, error) {
|
||||
d := xml.NewDecoder(bytes.NewReader(dict))
|
||||
values := make(map[string]string)
|
||||
var key string
|
||||
var hasKey bool
|
||||
for {
|
||||
tok, err := d.Token()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tok, ok := tok.(xml.StartElement); ok {
|
||||
if tok.Name.Local == "key" {
|
||||
if err := d.DecodeElement(&key, &tok); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hasKey = true
|
||||
} else if hasKey {
|
||||
var val string
|
||||
var err error
|
||||
switch n := tok.Name.Local; n {
|
||||
case "true", "false":
|
||||
// Bools are represented as <true/> and <false/>.
|
||||
val = n
|
||||
err = d.Skip()
|
||||
default:
|
||||
err = d.DecodeElement(&val, &tok)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
values[key] = val
|
||||
hasKey = false
|
||||
} else {
|
||||
if err := d.Skip(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func installSimulator(appdir string) error {
|
||||
cmd := exec.Command(
|
||||
"xcrun", "simctl", "install",
|
||||
@@ -142,20 +460,138 @@ func installSimulator(appdir string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func runSimulator(appdir, bundleID string, args []string) error {
|
||||
xcrunArgs := []string{"simctl", "spawn",
|
||||
"booted",
|
||||
appdir + "/gotest",
|
||||
func uninstallDevice(bundleID string) error {
|
||||
cmd := idevCmd(exec.Command(
|
||||
"ideviceinstaller",
|
||||
"-U", bundleID,
|
||||
))
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
os.Stderr.Write(out)
|
||||
return fmt.Errorf("ideviceinstaller -U %q: %s", bundleID, err)
|
||||
}
|
||||
xcrunArgs = append(xcrunArgs, args...)
|
||||
cmd := exec.Command("xcrun", xcrunArgs...)
|
||||
cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr
|
||||
err := cmd.Run()
|
||||
return nil
|
||||
}
|
||||
|
||||
func installDevice(appdir string) error {
|
||||
attempt := 0
|
||||
for {
|
||||
cmd := idevCmd(exec.Command(
|
||||
"ideviceinstaller",
|
||||
"-i", appdir,
|
||||
))
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
// Sometimes, installing the app fails for some reason.
|
||||
// Give the device a few seconds and try again.
|
||||
if attempt < 5 {
|
||||
time.Sleep(5 * time.Second)
|
||||
attempt++
|
||||
continue
|
||||
}
|
||||
os.Stderr.Write(out)
|
||||
return fmt.Errorf("ideviceinstaller -i %q: %v (%d attempts)", appdir, err, attempt)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func idevCmd(cmd *exec.Cmd) *exec.Cmd {
|
||||
if deviceID != "" {
|
||||
// Inject -u device_id after the executable, but before the arguments.
|
||||
args := []string{cmd.Args[0], "-u", deviceID}
|
||||
cmd.Args = append(args, cmd.Args[1:]...)
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runSimulator(appdir, bundleID string, args []string) error {
|
||||
cmd := exec.Command(
|
||||
"xcrun", "simctl", "launch",
|
||||
"--wait-for-debugger",
|
||||
"booted",
|
||||
bundleID,
|
||||
)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
os.Stderr.Write(out)
|
||||
return fmt.Errorf("xcrun simctl launch booted %q: %v", bundleID, err)
|
||||
}
|
||||
var processID int
|
||||
var ignore string
|
||||
if _, err := fmt.Sscanf(string(out), "%s %d", &ignore, &processID); err != nil {
|
||||
return fmt.Errorf("runSimulator: couldn't find processID from `simctl launch`: %v (%q)", err, out)
|
||||
}
|
||||
_, err = runLLDB("ios-simulator", appdir, strconv.Itoa(processID), args)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
func runDevice(appdir, bundleID string, args []string) error {
|
||||
attempt := 0
|
||||
for {
|
||||
// The device app path reported by the device might be stale, so retry
|
||||
// the lookup of the device path along with the lldb launching below.
|
||||
deviceapp, err := findDeviceAppPath(bundleID)
|
||||
if err != nil {
|
||||
// The device app path might not yet exist for a newly installed app.
|
||||
if attempt == 5 {
|
||||
return err
|
||||
}
|
||||
attempt++
|
||||
time.Sleep(5 * time.Second)
|
||||
continue
|
||||
}
|
||||
out, err := runLLDB("remote-ios", appdir, deviceapp, args)
|
||||
// If the program was not started it can be retried without papering over
|
||||
// real test failures.
|
||||
started := bytes.HasPrefix(out, []byte("lldb: running program"))
|
||||
if started || err == nil || attempt == 5 {
|
||||
return err
|
||||
}
|
||||
// Sometimes, the app was not yet ready to launch or the device path was
|
||||
// stale. Retry.
|
||||
attempt++
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func runLLDB(target, appdir, deviceapp string, args []string) ([]byte, error) {
|
||||
var env []string
|
||||
for _, e := range os.Environ() {
|
||||
// Don't override TMPDIR, HOME, GOCACHE on the device.
|
||||
if strings.HasPrefix(e, "TMPDIR=") || strings.HasPrefix(e, "HOME=") || strings.HasPrefix(e, "GOCACHE=") {
|
||||
continue
|
||||
}
|
||||
env = append(env, e)
|
||||
}
|
||||
lldb := exec.Command(
|
||||
"python",
|
||||
"-", // Read script from stdin.
|
||||
target,
|
||||
appdir,
|
||||
deviceapp,
|
||||
)
|
||||
lldb.Args = append(lldb.Args, args...)
|
||||
lldb.Env = env
|
||||
lldb.Stdin = strings.NewReader(lldbDriver)
|
||||
lldb.Stdout = os.Stdout
|
||||
var out bytes.Buffer
|
||||
lldb.Stderr = io.MultiWriter(&out, os.Stderr)
|
||||
err := lldb.Start()
|
||||
if err == nil {
|
||||
// Forward SIGQUIT to the lldb driver which in turn will forward
|
||||
// to the running program.
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, syscall.SIGQUIT)
|
||||
proc := lldb.Process
|
||||
go func() {
|
||||
for sig := range sigs {
|
||||
proc.Signal(sig)
|
||||
}
|
||||
}()
|
||||
err = lldb.Wait()
|
||||
signal.Stop(sigs)
|
||||
close(sigs)
|
||||
}
|
||||
return out.Bytes(), err
|
||||
}
|
||||
|
||||
func copyLocalDir(dst, src string) error {
|
||||
@@ -364,3 +800,112 @@ const resourceRules = `<?xml version="1.0" encoding="UTF-8"?>
|
||||
</dict>
|
||||
</plist>
|
||||
`
|
||||
|
||||
const lldbDriver = `
|
||||
import sys
|
||||
import os
|
||||
import signal
|
||||
|
||||
platform, exe, device_exe_or_pid, args = sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4:]
|
||||
|
||||
env = []
|
||||
for k, v in os.environ.items():
|
||||
env.append(k + "=" + v)
|
||||
|
||||
sys.path.append('/Applications/Xcode.app/Contents/SharedFrameworks/LLDB.framework/Resources/Python')
|
||||
|
||||
import lldb
|
||||
|
||||
debugger = lldb.SBDebugger.Create()
|
||||
debugger.SetAsync(True)
|
||||
debugger.SkipLLDBInitFiles(True)
|
||||
|
||||
err = lldb.SBError()
|
||||
target = debugger.CreateTarget(exe, None, platform, True, err)
|
||||
if not target.IsValid() or not err.Success():
|
||||
sys.stderr.write("lldb: failed to setup up target: %s\n" % (err))
|
||||
sys.exit(1)
|
||||
|
||||
listener = debugger.GetListener()
|
||||
|
||||
if platform == 'remote-ios':
|
||||
target.modules[0].SetPlatformFileSpec(lldb.SBFileSpec(device_exe_or_pid))
|
||||
process = target.ConnectRemote(listener, 'connect://localhost:3222', None, err)
|
||||
else:
|
||||
process = target.AttachToProcessWithID(listener, int(device_exe_or_pid), err)
|
||||
|
||||
if not err.Success():
|
||||
sys.stderr.write("lldb: failed to connect to remote target %s: %s\n" % (device_exe_or_pid, err))
|
||||
sys.exit(1)
|
||||
|
||||
# Don't stop on signals.
|
||||
sigs = process.GetUnixSignals()
|
||||
for i in range(0, sigs.GetNumSignals()):
|
||||
sig = sigs.GetSignalAtIndex(i)
|
||||
sigs.SetShouldStop(sig, False)
|
||||
sigs.SetShouldNotify(sig, False)
|
||||
|
||||
event = lldb.SBEvent()
|
||||
running = False
|
||||
prev_handler = None
|
||||
|
||||
def signal_handler(signal, frame):
|
||||
process.Signal(signal)
|
||||
|
||||
def run_program():
|
||||
# Forward SIGQUIT to the program.
|
||||
prev_handler = signal.signal(signal.SIGQUIT, signal_handler)
|
||||
# Tell the Go driver that the program is running and should not be retried.
|
||||
sys.stderr.write("lldb: running program\n")
|
||||
running = True
|
||||
# Process is stopped at attach/launch. Let it run.
|
||||
process.Continue()
|
||||
|
||||
if platform != 'remote-ios':
|
||||
# For the local emulator the program is ready to run.
|
||||
# For remote device runs, we need to wait for eStateConnected,
|
||||
# below.
|
||||
run_program()
|
||||
|
||||
while True:
|
||||
if not listener.WaitForEvent(1, event):
|
||||
continue
|
||||
if not lldb.SBProcess.EventIsProcessEvent(event):
|
||||
continue
|
||||
if running:
|
||||
# Pass through stdout and stderr.
|
||||
while True:
|
||||
out = process.GetSTDOUT(8192)
|
||||
if not out:
|
||||
break
|
||||
sys.stdout.write(out)
|
||||
while True:
|
||||
out = process.GetSTDERR(8192)
|
||||
if not out:
|
||||
break
|
||||
sys.stderr.write(out)
|
||||
state = process.GetStateFromEvent(event)
|
||||
if state in [lldb.eStateCrashed, lldb.eStateDetached, lldb.eStateUnloaded, lldb.eStateExited]:
|
||||
if running:
|
||||
signal.signal(signal.SIGQUIT, prev_handler)
|
||||
break
|
||||
elif state == lldb.eStateConnected:
|
||||
if platform == 'remote-ios':
|
||||
process.RemoteLaunch(args, env, None, None, None, None, 0, False, err)
|
||||
if not err.Success():
|
||||
sys.stderr.write("lldb: failed to launch remote process: %s\n" % (err))
|
||||
process.Kill()
|
||||
debugger.Terminate()
|
||||
sys.exit(1)
|
||||
run_program()
|
||||
|
||||
exitStatus = process.GetExitStatus()
|
||||
exitDesc = process.GetExitDescription()
|
||||
process.Kill()
|
||||
debugger.Terminate()
|
||||
if exitStatus == 0 and exitDesc is not None:
|
||||
# Ensure tests fail when killed by a signal.
|
||||
exitStatus = 123
|
||||
|
||||
sys.exit(exitStatus)
|
||||
`
|
||||
|
||||
@@ -10,11 +10,11 @@ case "$GOWASIRUNTIME" in
|
||||
"wasmer")
|
||||
exec wasmer run --dir=/ --env PWD="$PWD" --env PATH="$PATH" ${GOWASIRUNTIMEARGS:-} "$1" -- "${@:2}"
|
||||
;;
|
||||
"wazero")
|
||||
exec wazero run -mount /:/ -env-inherit -cachedir "${TMPDIR:-/tmp}"/wazero ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}"
|
||||
"wasmtime")
|
||||
exec wasmtime run --dir=/ --env PWD="$PWD" --env PATH="$PATH" --max-wasm-stack 1048576 ${GOWASIRUNTIMEARGS:-} "$1" -- "${@:2}"
|
||||
;;
|
||||
"wasmtime" | "")
|
||||
exec wasmtime run --dir=/ --env PWD="$PWD" --env PATH="$PATH" -W max-wasm-stack=1048576 ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}"
|
||||
"wazero" | "")
|
||||
exec wazero run -mount /:/ -env-inherit -cachedir "${TMPDIR:-/tmp}"/wazero ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}"
|
||||
;;
|
||||
*)
|
||||
echo "Unknown Go WASI runtime specified: $GOWASIRUNTIME"
|
||||
@@ -17,7 +17,7 @@ license that can be found in the LICENSE file.
|
||||
<script src="https://cdn.jsdelivr.net/npm/text-encoding@0.7.0/lib/encoding.min.js"></script>
|
||||
(see https://caniuse.com/#feat=textencoder)
|
||||
-->
|
||||
<script src="../../lib/wasm/wasm_exec.js"></script>
|
||||
<script src="wasm_exec.js"></script>
|
||||
<script>
|
||||
if (!WebAssembly.instantiateStreaming) { // polyfill
|
||||
WebAssembly.instantiateStreaming = async (resp, importObject) => {
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
if (!globalThis.fs) {
|
||||
let outputBuf = "";
|
||||
globalThis.fs = {
|
||||
constants: { O_WRONLY: -1, O_RDWR: -1, O_CREAT: -1, O_TRUNC: -1, O_APPEND: -1, O_EXCL: -1, O_DIRECTORY: -1 }, // unused
|
||||
constants: { O_WRONLY: -1, O_RDWR: -1, O_CREAT: -1, O_TRUNC: -1, O_APPEND: -1, O_EXCL: -1 }, // unused
|
||||
writeSync(fd, buf) {
|
||||
outputBuf += decoder.decode(buf);
|
||||
const nl = outputBuf.lastIndexOf("\n");
|
||||
@@ -73,14 +73,6 @@
|
||||
}
|
||||
}
|
||||
|
||||
if (!globalThis.path) {
|
||||
globalThis.path = {
|
||||
resolve(...pathSegments) {
|
||||
return pathSegments.join("/");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!globalThis.crypto) {
|
||||
throw new Error("globalThis.crypto is not available, polyfill required (crypto.getRandomValues only)");
|
||||
}
|
||||
@@ -216,16 +208,10 @@
|
||||
return decoder.decode(new DataView(this._inst.exports.mem.buffer, saddr, len));
|
||||
}
|
||||
|
||||
const testCallExport = (a, b) => {
|
||||
this._inst.exports.testExport0();
|
||||
return this._inst.exports.testExport(a, b);
|
||||
}
|
||||
|
||||
const timeOrigin = Date.now() - performance.now();
|
||||
this.importObject = {
|
||||
_gotest: {
|
||||
add: (a, b) => a + b,
|
||||
callExport: testCallExport,
|
||||
},
|
||||
gojs: {
|
||||
// Go's SP does not change as long as no Go code is running. Some operations (e.g. calls, getters and setters)
|
||||
@@ -11,7 +11,6 @@ if (process.argv.length < 3) {
|
||||
|
||||
globalThis.require = require;
|
||||
globalThis.fs = require("fs");
|
||||
globalThis.path = require("path");
|
||||
globalThis.TextEncoder = require("util").TextEncoder;
|
||||
globalThis.TextDecoder = require("util").TextDecoder;
|
||||
|
||||
@@ -31,18 +31,14 @@ Maintaining vendor directories
|
||||
|
||||
Before updating vendor directories, ensure that module mode is enabled.
|
||||
Make sure that GO111MODULE is not set in the environment, or that it is
|
||||
set to 'on' or 'auto', and if you use a go.work file, set GOWORK=off.
|
||||
|
||||
Also, ensure that 'go env GOROOT' shows the root of this Go source
|
||||
tree. Otherwise, the results are undefined. It's recommended to build
|
||||
Go from source and use that 'go' binary to update its source tree.
|
||||
set to 'on' or 'auto'.
|
||||
|
||||
Requirements may be added, updated, and removed with 'go get'.
|
||||
The vendor directory may be updated with 'go mod vendor'.
|
||||
A typical sequence might be:
|
||||
|
||||
cd src # or src/cmd
|
||||
go get golang.org/x/net@master
|
||||
cd src
|
||||
go get golang.org/x/net@latest
|
||||
go mod tidy
|
||||
go mod vendor
|
||||
|
||||
|
||||
@@ -10,4 +10,4 @@ if [ ! -f make.bash ]; then
|
||||
fi
|
||||
. ./make.bash "$@" --no-banner
|
||||
bash run.bash --no-rebuild
|
||||
"$GOTOOLDIR/dist" banner # print build info
|
||||
$GOTOOLDIR/dist banner # print build info
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"fmt"
|
||||
"internal/godebug"
|
||||
"io/fs"
|
||||
"maps"
|
||||
"math"
|
||||
"path"
|
||||
"reflect"
|
||||
@@ -613,7 +612,7 @@ func (fi headerFileInfo) String() string {
|
||||
}
|
||||
|
||||
// sysStat, if non-nil, populates h from system-dependent fields of fi.
|
||||
var sysStat func(fi fs.FileInfo, h *Header, doNameLookups bool) error
|
||||
var sysStat func(fi fs.FileInfo, h *Header) error
|
||||
|
||||
const (
|
||||
// Mode constants from the USTAR spec:
|
||||
@@ -633,17 +632,13 @@ const (
|
||||
c_ISSOCK = 0140000 // Socket
|
||||
)
|
||||
|
||||
// FileInfoHeader creates a partially-populated [Header] from fi.
|
||||
// FileInfoHeader creates a partially-populated Header from fi.
|
||||
// If fi describes a symlink, FileInfoHeader records link as the link target.
|
||||
// If fi describes a directory, a slash is appended to the name.
|
||||
//
|
||||
// Since fs.FileInfo's Name method only returns the base name of
|
||||
// the file it describes, it may be necessary to modify Header.Name
|
||||
// to provide the full path name of the file.
|
||||
//
|
||||
// If fi implements [FileInfoNames]
|
||||
// Header.Gname and Header.Uname
|
||||
// are provided by the methods of the interface.
|
||||
func FileInfoHeader(fi fs.FileInfo, link string) (*Header, error) {
|
||||
if fi == nil {
|
||||
return nil, errors.New("archive/tar: FileInfo is nil")
|
||||
@@ -697,45 +692,31 @@ func FileInfoHeader(fi fs.FileInfo, link string) (*Header, error) {
|
||||
h.Gname = sys.Gname
|
||||
h.AccessTime = sys.AccessTime
|
||||
h.ChangeTime = sys.ChangeTime
|
||||
h.Xattrs = maps.Clone(sys.Xattrs)
|
||||
if sys.Xattrs != nil {
|
||||
h.Xattrs = make(map[string]string)
|
||||
for k, v := range sys.Xattrs {
|
||||
h.Xattrs[k] = v
|
||||
}
|
||||
}
|
||||
if sys.Typeflag == TypeLink {
|
||||
// hard link
|
||||
h.Typeflag = TypeLink
|
||||
h.Size = 0
|
||||
h.Linkname = sys.Linkname
|
||||
}
|
||||
h.PAXRecords = maps.Clone(sys.PAXRecords)
|
||||
}
|
||||
var doNameLookups = true
|
||||
if iface, ok := fi.(FileInfoNames); ok {
|
||||
doNameLookups = false
|
||||
var err error
|
||||
h.Gname, err = iface.Gname()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
h.Uname, err = iface.Uname()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if sys.PAXRecords != nil {
|
||||
h.PAXRecords = make(map[string]string)
|
||||
for k, v := range sys.PAXRecords {
|
||||
h.PAXRecords[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
if sysStat != nil {
|
||||
return h, sysStat(fi, h, doNameLookups)
|
||||
return h, sysStat(fi, h)
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// FileInfoNames extends [fs.FileInfo].
|
||||
// Passing an instance of this to [FileInfoHeader] permits the caller
|
||||
// to avoid a system-dependent name lookup by specifying the Uname and Gname directly.
|
||||
type FileInfoNames interface {
|
||||
fs.FileInfo
|
||||
// Uname should give a user name.
|
||||
Uname() (string, error)
|
||||
// Gname should give a group name.
|
||||
Gname() (string, error)
|
||||
}
|
||||
|
||||
// isHeaderOnlyType checks if the given type flag is of the type that has no
|
||||
// data section even if a size is specified.
|
||||
func isHeaderOnlyType(flag byte) bool {
|
||||
@@ -746,3 +727,10 @@ func isHeaderOnlyType(flag byte) bool {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func min(a, b int64) int64 {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ import "strings"
|
||||
// sub-second times | no | yes | no
|
||||
// sparse files | no | yes | yes
|
||||
//
|
||||
// The table's upper portion shows the [Header] fields, where each format reports
|
||||
// The table's upper portion shows the Header fields, where each format reports
|
||||
// the maximum number of bytes allowed for each string field and
|
||||
// the integer type used to store each numeric field
|
||||
// (where timestamps are stored as the number of seconds since the Unix epoch).
|
||||
|
||||
@@ -35,7 +35,7 @@ type fileReader interface {
|
||||
WriteTo(io.Writer) (int64, error)
|
||||
}
|
||||
|
||||
// NewReader creates a new [Reader] reading from r.
|
||||
// NewReader creates a new Reader reading from r.
|
||||
func NewReader(r io.Reader) *Reader {
|
||||
return &Reader{r: r, curr: ®FileReader{r, 0}}
|
||||
}
|
||||
@@ -47,10 +47,10 @@ func NewReader(r io.Reader) *Reader {
|
||||
//
|
||||
// If Next encounters a non-local name (as defined by [filepath.IsLocal])
|
||||
// and the GODEBUG environment variable contains `tarinsecurepath=0`,
|
||||
// Next returns the header with an [ErrInsecurePath] error.
|
||||
// Next returns the header with an ErrInsecurePath error.
|
||||
// A future version of Go may introduce this behavior by default.
|
||||
// Programs that want to accept non-local names can ignore
|
||||
// the [ErrInsecurePath] error and use the returned header.
|
||||
// the ErrInsecurePath error and use the returned header.
|
||||
func (tr *Reader) Next() (*Header, error) {
|
||||
if tr.err != nil {
|
||||
return nil, tr.err
|
||||
@@ -623,14 +623,14 @@ func readGNUSparseMap0x1(paxHdrs map[string]string) (sparseDatas, error) {
|
||||
|
||||
// Read reads from the current file in the tar archive.
|
||||
// It returns (0, io.EOF) when it reaches the end of that file,
|
||||
// until [Next] is called to advance to the next file.
|
||||
// until Next is called to advance to the next file.
|
||||
//
|
||||
// If the current file is sparse, then the regions marked as a hole
|
||||
// are read back as NUL-bytes.
|
||||
//
|
||||
// Calling Read on special types like [TypeLink], [TypeSymlink], [TypeChar],
|
||||
// [TypeBlock], [TypeDir], and [TypeFifo] returns (0, [io.EOF]) regardless of what
|
||||
// the [Header.Size] claims.
|
||||
// Calling Read on special types like TypeLink, TypeSymlink, TypeChar,
|
||||
// TypeBlock, TypeDir, and TypeFifo returns (0, io.EOF) regardless of what
|
||||
// the Header.Size claims.
|
||||
func (tr *Reader) Read(b []byte) (int, error) {
|
||||
if tr.err != nil {
|
||||
return 0, tr.err
|
||||
@@ -811,7 +811,9 @@ func (sr sparseFileReader) physicalRemaining() int64 {
|
||||
type zeroReader struct{}
|
||||
|
||||
func (zeroReader) Read(b []byte) (int, error) {
|
||||
clear(b)
|
||||
for i := range b {
|
||||
b[i] = 0
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
|
||||
@@ -7,16 +7,14 @@ package tar
|
||||
import (
|
||||
"bytes"
|
||||
"compress/bzip2"
|
||||
"crypto/md5"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"maps"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -27,7 +25,7 @@ func TestReader(t *testing.T) {
|
||||
vectors := []struct {
|
||||
file string // Test input file
|
||||
headers []*Header // Expected output headers
|
||||
chksums []string // CRC32 checksum of files, leave as nil if not checked
|
||||
chksums []string // MD5 checksum of files, leave as nil if not checked
|
||||
err error // Expected error to occur
|
||||
}{{
|
||||
file: "testdata/gnu.tar",
|
||||
@@ -55,8 +53,8 @@ func TestReader(t *testing.T) {
|
||||
Format: FormatGNU,
|
||||
}},
|
||||
chksums: []string{
|
||||
"6cbd88fc",
|
||||
"ddac04b3",
|
||||
"e38b27eaccb4391bdec553a7f3ae6b2f",
|
||||
"c65bd2e50a56a2138bf1716f2fd56fe9",
|
||||
},
|
||||
}, {
|
||||
file: "testdata/sparse-formats.tar",
|
||||
@@ -149,11 +147,11 @@ func TestReader(t *testing.T) {
|
||||
Format: FormatGNU,
|
||||
}},
|
||||
chksums: []string{
|
||||
"5375e1d2",
|
||||
"5375e1d2",
|
||||
"5375e1d2",
|
||||
"5375e1d2",
|
||||
"8eb179ba",
|
||||
"6f53234398c2449fe67c1812d993012f",
|
||||
"6f53234398c2449fe67c1812d993012f",
|
||||
"6f53234398c2449fe67c1812d993012f",
|
||||
"6f53234398c2449fe67c1812d993012f",
|
||||
"b0061974914468de549a2af8ced10316",
|
||||
},
|
||||
}, {
|
||||
file: "testdata/star.tar",
|
||||
@@ -270,7 +268,7 @@ func TestReader(t *testing.T) {
|
||||
Format: FormatPAX,
|
||||
}},
|
||||
chksums: []string{
|
||||
"5fd7e86a",
|
||||
"0afb597b283fe61b5d4879669a350556",
|
||||
},
|
||||
}, {
|
||||
file: "testdata/pax-records.tar",
|
||||
@@ -657,7 +655,7 @@ func TestReader(t *testing.T) {
|
||||
if v.chksums == nil {
|
||||
continue
|
||||
}
|
||||
h := crc32.NewIEEE()
|
||||
h := md5.New()
|
||||
_, err = io.CopyBuffer(h, tr, rdbuf) // Effectively an incremental read
|
||||
if err != nil {
|
||||
break
|
||||
@@ -1019,7 +1017,7 @@ func TestParsePAX(t *testing.T) {
|
||||
for i, v := range vectors {
|
||||
r := strings.NewReader(v.in)
|
||||
got, err := parsePAX(r)
|
||||
if !maps.Equal(got, v.want) && !(len(got) == 0 && len(v.want) == 0) {
|
||||
if !reflect.DeepEqual(got, v.want) && !(len(got) == 0 && len(v.want) == 0) {
|
||||
t.Errorf("test %d, parsePAX():\ngot %v\nwant %v", i, got, v.want)
|
||||
}
|
||||
if ok := err == nil; ok != v.ok {
|
||||
@@ -1136,7 +1134,7 @@ func TestReadOldGNUSparseMap(t *testing.T) {
|
||||
v.input = v.input[copy(blk[:], v.input):]
|
||||
tr := Reader{r: bytes.NewReader(v.input)}
|
||||
got, err := tr.readOldGNUSparseMap(&hdr, &blk)
|
||||
if !slices.Equal(got, v.wantMap) {
|
||||
if !equalSparseEntries(got, v.wantMap) {
|
||||
t.Errorf("test %d, readOldGNUSparseMap(): got %v, want %v", i, got, v.wantMap)
|
||||
}
|
||||
if err != v.wantErr {
|
||||
@@ -1327,7 +1325,7 @@ func TestReadGNUSparsePAXHeaders(t *testing.T) {
|
||||
r := strings.NewReader(v.inputData + "#") // Add canary byte
|
||||
tr := Reader{curr: ®FileReader{r, int64(r.Len())}}
|
||||
got, err := tr.readGNUSparsePAXHeaders(&hdr)
|
||||
if !slices.Equal(got, v.wantMap) {
|
||||
if !equalSparseEntries(got, v.wantMap) {
|
||||
t.Errorf("test %d, readGNUSparsePAXHeaders(): got %v, want %v", i, got, v.wantMap)
|
||||
}
|
||||
if err != v.wantErr {
|
||||
|
||||
@@ -23,30 +23,30 @@ func init() {
|
||||
// The downside is that renaming uname or gname by the OS never takes effect.
|
||||
var userMap, groupMap sync.Map // map[int]string
|
||||
|
||||
func statUnix(fi fs.FileInfo, h *Header, doNameLookups bool) error {
|
||||
func statUnix(fi fs.FileInfo, h *Header) error {
|
||||
sys, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
h.Uid = int(sys.Uid)
|
||||
h.Gid = int(sys.Gid)
|
||||
if doNameLookups {
|
||||
// Best effort at populating Uname and Gname.
|
||||
// The os/user functions may fail for any number of reasons
|
||||
// (not implemented on that platform, cgo not enabled, etc).
|
||||
if u, ok := userMap.Load(h.Uid); ok {
|
||||
h.Uname = u.(string)
|
||||
} else if u, err := user.LookupId(strconv.Itoa(h.Uid)); err == nil {
|
||||
h.Uname = u.Username
|
||||
userMap.Store(h.Uid, h.Uname)
|
||||
}
|
||||
if g, ok := groupMap.Load(h.Gid); ok {
|
||||
h.Gname = g.(string)
|
||||
} else if g, err := user.LookupGroupId(strconv.Itoa(h.Gid)); err == nil {
|
||||
h.Gname = g.Name
|
||||
groupMap.Store(h.Gid, h.Gname)
|
||||
}
|
||||
|
||||
// Best effort at populating Uname and Gname.
|
||||
// The os/user functions may fail for any number of reasons
|
||||
// (not implemented on that platform, cgo not enabled, etc).
|
||||
if u, ok := userMap.Load(h.Uid); ok {
|
||||
h.Uname = u.(string)
|
||||
} else if u, err := user.LookupId(strconv.Itoa(h.Uid)); err == nil {
|
||||
h.Uname = u.Username
|
||||
userMap.Store(h.Uid, h.Uname)
|
||||
}
|
||||
if g, ok := groupMap.Load(h.Gid); ok {
|
||||
h.Gname = g.(string)
|
||||
} else if g, err := user.LookupGroupId(strconv.Itoa(h.Gid)); err == nil {
|
||||
h.Gname = g.Name
|
||||
groupMap.Store(h.Gid, h.Gname)
|
||||
}
|
||||
|
||||
h.AccessTime = statAtime(sys)
|
||||
h.ChangeTime = statCtime(sys)
|
||||
|
||||
|
||||
@@ -73,7 +73,7 @@ func (f *formatter) formatString(b []byte, s string) {
|
||||
// in the V7 path field as a directory even though the full path
|
||||
// recorded elsewhere (e.g., via PAX record) contains no trailing slash.
|
||||
if len(s) > len(b) && b[len(b)-1] == '/' {
|
||||
n := len(strings.TrimRight(s[:len(b)-1], "/"))
|
||||
n := len(strings.TrimRight(s[:len(b)], "/"))
|
||||
b[n] = 0 // Replace trailing slash with NUL terminator
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,13 +11,11 @@ import (
|
||||
"internal/testenv"
|
||||
"io"
|
||||
"io/fs"
|
||||
"maps"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -100,6 +98,10 @@ func (f *testFile) Seek(pos int64, whence int) (int64, error) {
|
||||
return f.pos, nil
|
||||
}
|
||||
|
||||
func equalSparseEntries(x, y []sparseEntry) bool {
|
||||
return (len(x) == 0 && len(y) == 0) || reflect.DeepEqual(x, y)
|
||||
}
|
||||
|
||||
func TestSparseEntries(t *testing.T) {
|
||||
vectors := []struct {
|
||||
in []sparseEntry
|
||||
@@ -196,11 +198,11 @@ func TestSparseEntries(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
gotAligned := alignSparseEntries(append([]sparseEntry{}, v.in...), v.size)
|
||||
if !slices.Equal(gotAligned, v.wantAligned) {
|
||||
if !equalSparseEntries(gotAligned, v.wantAligned) {
|
||||
t.Errorf("test %d, alignSparseEntries():\ngot %v\nwant %v", i, gotAligned, v.wantAligned)
|
||||
}
|
||||
gotInverted := invertSparseEntries(append([]sparseEntry{}, v.in...), v.size)
|
||||
if !slices.Equal(gotInverted, v.wantInverted) {
|
||||
if !equalSparseEntries(gotInverted, v.wantInverted) {
|
||||
t.Errorf("test %d, inverseSparseEntries():\ngot %v\nwant %v", i, gotInverted, v.wantInverted)
|
||||
}
|
||||
}
|
||||
@@ -742,7 +744,7 @@ func TestHeaderAllowedFormats(t *testing.T) {
|
||||
if formats != v.formats {
|
||||
t.Errorf("test %d, allowedFormats(): got %v, want %v", i, formats, v.formats)
|
||||
}
|
||||
if formats&FormatPAX > 0 && !maps.Equal(paxHdrs, v.paxHdrs) && !(len(paxHdrs) == 0 && len(v.paxHdrs) == 0) {
|
||||
if formats&FormatPAX > 0 && !reflect.DeepEqual(paxHdrs, v.paxHdrs) && !(len(paxHdrs) == 0 && len(v.paxHdrs) == 0) {
|
||||
t.Errorf("test %d, allowedFormats():\ngot %v\nwant %s", i, paxHdrs, v.paxHdrs)
|
||||
}
|
||||
if (formats != FormatUnknown) && (err != nil) {
|
||||
@@ -846,53 +848,3 @@ func Benchmark(b *testing.B) {
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
var _ fileInfoNames = fileInfoNames{}
|
||||
|
||||
type fileInfoNames struct{}
|
||||
|
||||
func (f *fileInfoNames) Name() string {
|
||||
return "tmp"
|
||||
}
|
||||
|
||||
func (f *fileInfoNames) Size() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (f *fileInfoNames) Mode() fs.FileMode {
|
||||
return 0777
|
||||
}
|
||||
|
||||
func (f *fileInfoNames) ModTime() time.Time {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
func (f *fileInfoNames) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (f *fileInfoNames) Sys() any {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fileInfoNames) Uname() (string, error) {
|
||||
return "Uname", nil
|
||||
}
|
||||
|
||||
func (f *fileInfoNames) Gname() (string, error) {
|
||||
return "Gname", nil
|
||||
}
|
||||
|
||||
func TestFileInfoHeaderUseFileInfoNames(t *testing.T) {
|
||||
info := &fileInfoNames{}
|
||||
header, err := FileInfoHeader(info, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if header.Uname != "Uname" {
|
||||
t.Fatalf("header.Uname: got %s, want %s", header.Uname, "Uname")
|
||||
}
|
||||
if header.Gname != "Gname" {
|
||||
t.Fatalf("header.Gname: got %s, want %s", header.Gname, "Gname")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,19 +5,16 @@
|
||||
package tar
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"maps"
|
||||
"path"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Writer provides sequential writing of a tar archive.
|
||||
// [Writer.WriteHeader] begins a new file with the provided [Header],
|
||||
// Write.WriteHeader begins a new file with the provided Header,
|
||||
// and then Writer can be treated as an io.Writer to supply that file's data.
|
||||
type Writer struct {
|
||||
w io.Writer
|
||||
@@ -47,7 +44,7 @@ type fileWriter interface {
|
||||
// Flush finishes writing the current file's block padding.
|
||||
// The current file must be fully written before Flush can be called.
|
||||
//
|
||||
// This is unnecessary as the next call to [Writer.WriteHeader] or [Writer.Close]
|
||||
// This is unnecessary as the next call to WriteHeader or Close
|
||||
// will implicitly flush out the file's padding.
|
||||
func (tw *Writer) Flush() error {
|
||||
if tw.err != nil {
|
||||
@@ -170,10 +167,16 @@ func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error {
|
||||
// Write PAX records to the output.
|
||||
isGlobal := hdr.Typeflag == TypeXGlobalHeader
|
||||
if len(paxHdrs) > 0 || isGlobal {
|
||||
// Sort keys for deterministic ordering.
|
||||
var keys []string
|
||||
for k := range paxHdrs {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
// Write each record to a buffer.
|
||||
var buf strings.Builder
|
||||
// Sort keys for deterministic ordering.
|
||||
for _, k := range slices.Sorted(maps.Keys(paxHdrs)) {
|
||||
for _, k := range keys {
|
||||
rec, err := formatPAXRecord(k, paxHdrs[k])
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -400,49 +403,6 @@ func (tw *Writer) writeRawHeader(blk *block, size int64, flag byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddFS adds the files from fs.FS to the archive.
|
||||
// It walks the directory tree starting at the root of the filesystem
|
||||
// adding each file to the tar archive while maintaining the directory structure.
|
||||
func (tw *Writer) AddFS(fsys fs.FS) error {
|
||||
return fs.WalkDir(fsys, ".", func(name string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if name == "." {
|
||||
return nil
|
||||
}
|
||||
info, err := d.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO(#49580): Handle symlinks when fs.ReadLinkFS is available.
|
||||
if !d.IsDir() && !info.Mode().IsRegular() {
|
||||
return errors.New("tar: cannot add non-regular file")
|
||||
}
|
||||
h, err := FileInfoHeader(info, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h.Name = name
|
||||
if d.IsDir() {
|
||||
h.Name += "/"
|
||||
}
|
||||
if err := tw.WriteHeader(h); err != nil {
|
||||
return err
|
||||
}
|
||||
if d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
f, err := fsys.Open(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
_, err = io.Copy(tw, f)
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
// splitUSTARPath splits a path according to USTAR prefix and suffix rules.
|
||||
// If the path is not splittable, then it will return ("", "", false).
|
||||
func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
|
||||
@@ -465,12 +425,12 @@ func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
|
||||
}
|
||||
|
||||
// Write writes to the current file in the tar archive.
|
||||
// Write returns the error [ErrWriteTooLong] if more than
|
||||
// Header.Size bytes are written after [Writer.WriteHeader].
|
||||
// Write returns the error ErrWriteTooLong if more than
|
||||
// Header.Size bytes are written after WriteHeader.
|
||||
//
|
||||
// Calling Write on special types like [TypeLink], [TypeSymlink], [TypeChar],
|
||||
// [TypeBlock], [TypeDir], and [TypeFifo] returns (0, [ErrWriteTooLong]) regardless
|
||||
// of what the [Header.Size] claims.
|
||||
// Calling Write on special types like TypeLink, TypeSymlink, TypeChar,
|
||||
// TypeBlock, TypeDir, and TypeFifo returns (0, ErrWriteTooLong) regardless
|
||||
// of what the Header.Size claims.
|
||||
func (tw *Writer) Write(b []byte) (int, error) {
|
||||
if tw.err != nil {
|
||||
return 0, tw.err
|
||||
@@ -504,7 +464,7 @@ func (tw *Writer) readFrom(r io.Reader) (int64, error) {
|
||||
}
|
||||
|
||||
// Close closes the tar archive by flushing the padding, and writing the footer.
|
||||
// If the current file (from a prior call to [Writer.WriteHeader]) is not fully written,
|
||||
// If the current file (from a prior call to WriteHeader) is not fully written,
|
||||
// then this returns an error.
|
||||
func (tw *Writer) Close() error {
|
||||
if tw.err == ErrWriteAfterClose {
|
||||
@@ -669,7 +629,6 @@ func (sw *sparseFileWriter) ReadFrom(r io.Reader) (n int64, err error) {
|
||||
func (sw sparseFileWriter) logicalRemaining() int64 {
|
||||
return sw.sp[len(sw.sp)-1].endOffset() - sw.pos
|
||||
}
|
||||
|
||||
func (sw sparseFileWriter) physicalRemaining() int64 {
|
||||
return sw.fw.physicalRemaining()
|
||||
}
|
||||
|
||||
@@ -9,15 +9,12 @@ import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io"
|
||||
"io/fs"
|
||||
"maps"
|
||||
"os"
|
||||
"path"
|
||||
"slices"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"testing/fstest"
|
||||
"testing/iotest"
|
||||
"time"
|
||||
)
|
||||
@@ -582,10 +579,10 @@ func TestPaxSymlink(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
hdr.Typeflag = TypeSymlink
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat:1 %v", err)
|
||||
}
|
||||
hdr.Typeflag = TypeSymlink
|
||||
// Force a PAX long linkname to be written
|
||||
longLinkname := strings.Repeat("1234567890/1234567890", 10)
|
||||
hdr.Linkname = longLinkname
|
||||
@@ -703,7 +700,7 @@ func TestPaxXattrs(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !maps.Equal(hdr.Xattrs, xattrs) {
|
||||
if !reflect.DeepEqual(hdr.Xattrs, xattrs) {
|
||||
t.Fatalf("xattrs did not survive round trip: got %+v, want %+v",
|
||||
hdr.Xattrs, xattrs)
|
||||
}
|
||||
@@ -750,7 +747,7 @@ func TestPaxHeadersSorted(t *testing.T) {
|
||||
bytes.Index(buf.Bytes(), []byte("foo=foo")),
|
||||
bytes.Index(buf.Bytes(), []byte("qux=qux")),
|
||||
}
|
||||
if !slices.IsSorted(indices) {
|
||||
if !sort.IntsAreSorted(indices) {
|
||||
t.Fatal("PAX headers are not sorted")
|
||||
}
|
||||
}
|
||||
@@ -762,10 +759,10 @@ func TestUSTARLongName(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
hdr.Typeflag = TypeDir
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat:1 %v", err)
|
||||
}
|
||||
hdr.Typeflag = TypeDir
|
||||
// Force a PAX long name to be written. The name was taken from a practical example
|
||||
// that fails and replaced ever char through numbers to anonymize the sample.
|
||||
longName := "/0000_0000000/00000-000000000/0000_0000000/00000-0000000000000/0000_0000000/00000-0000000-00000000/0000_0000000/00000000/0000_0000000/000/0000_0000000/00000000v00/0000_0000000/000000/0000_0000000/0000000/0000_0000000/00000y-00/0000/0000/00000000/0x000000/"
|
||||
@@ -1336,93 +1333,3 @@ func TestFileWriter(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriterAddFS(t *testing.T) {
|
||||
fsys := fstest.MapFS{
|
||||
"emptyfolder": {Mode: 0o755 | os.ModeDir},
|
||||
"file.go": {Data: []byte("hello")},
|
||||
"subfolder/another.go": {Data: []byte("world")},
|
||||
// Notably missing here is the "subfolder" directory. This makes sure even
|
||||
// if we don't have a subfolder directory listed.
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
tw := NewWriter(&buf)
|
||||
if err := tw.AddFS(fsys); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Add subfolder into fsys to match what we'll read from the tar.
|
||||
fsys["subfolder"] = &fstest.MapFile{Mode: 0o555 | os.ModeDir}
|
||||
|
||||
// Test that we can get the files back from the archive
|
||||
tr := NewReader(&buf)
|
||||
|
||||
names := make([]string, 0, len(fsys))
|
||||
for name := range fsys {
|
||||
names = append(names, name)
|
||||
}
|
||||
sort.Strings(names)
|
||||
|
||||
entriesLeft := len(fsys)
|
||||
for _, name := range names {
|
||||
entriesLeft--
|
||||
|
||||
entryInfo, err := fsys.Stat(name)
|
||||
if err != nil {
|
||||
t.Fatalf("getting entry info error: %v", err)
|
||||
}
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break // End of archive
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tmpName := name
|
||||
if entryInfo.IsDir() {
|
||||
tmpName += "/"
|
||||
}
|
||||
if hdr.Name != tmpName {
|
||||
t.Errorf("test fs has filename %v; archive header has %v",
|
||||
name, hdr.Name)
|
||||
}
|
||||
|
||||
if entryInfo.Mode() != hdr.FileInfo().Mode() {
|
||||
t.Errorf("%s: test fs has mode %v; archive header has %v",
|
||||
name, entryInfo.Mode(), hdr.FileInfo().Mode())
|
||||
}
|
||||
|
||||
if entryInfo.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
data, err := io.ReadAll(tr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
origdata := fsys[name].Data
|
||||
if string(data) != string(origdata) {
|
||||
t.Fatalf("test fs has file content %v; archive header has %v",
|
||||
data, origdata)
|
||||
}
|
||||
}
|
||||
if entriesLeft > 0 {
|
||||
t.Fatalf("not all entries are in the archive")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriterAddFSNonRegularFiles(t *testing.T) {
|
||||
fsys := fstest.MapFS{
|
||||
"device": {Data: []byte("hello"), Mode: 0755 | fs.ModeDevice},
|
||||
"symlink": {Data: []byte("world"), Mode: 0755 | fs.ModeSymlink},
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
tw := NewWriter(&buf)
|
||||
if err := tw.AddFS(fsys); err == nil {
|
||||
t.Fatal("expected error, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -48,15 +48,15 @@ type Reader struct {
|
||||
fileList []fileListEntry
|
||||
}
|
||||
|
||||
// A ReadCloser is a [Reader] that must be closed when no longer needed.
|
||||
// A ReadCloser is a Reader that must be closed when no longer needed.
|
||||
type ReadCloser struct {
|
||||
f *os.File
|
||||
Reader
|
||||
}
|
||||
|
||||
// A File is a single file in a ZIP archive.
|
||||
// The file information is in the embedded [FileHeader].
|
||||
// The file content can be accessed by calling [File.Open].
|
||||
// The file information is in the embedded FileHeader.
|
||||
// The file content can be accessed by calling Open.
|
||||
type File struct {
|
||||
FileHeader
|
||||
zip *Reader
|
||||
@@ -93,16 +93,16 @@ func OpenReader(name string) (*ReadCloser, error) {
|
||||
return r, err
|
||||
}
|
||||
|
||||
// NewReader returns a new [Reader] reading from r, which is assumed to
|
||||
// NewReader returns a new Reader reading from r, which is assumed to
|
||||
// have the given size in bytes.
|
||||
//
|
||||
// If any file inside the archive uses a non-local name
|
||||
// (as defined by [filepath.IsLocal]) or a name containing backslashes
|
||||
// and the GODEBUG environment variable contains `zipinsecurepath=0`,
|
||||
// NewReader returns the reader with an [ErrInsecurePath] error.
|
||||
// NewReader returns the reader with an ErrInsecurePath error.
|
||||
// A future version of Go may introduce this behavior by default.
|
||||
// Programs that want to accept non-local names can ignore
|
||||
// the [ErrInsecurePath] error and use the returned reader.
|
||||
// the ErrInsecurePath error and use the returned reader.
|
||||
func NewReader(r io.ReaderAt, size int64) (*Reader, error) {
|
||||
if size < 0 {
|
||||
return nil, errors.New("zip: size cannot be negative")
|
||||
@@ -178,7 +178,7 @@ func (r *Reader) init(rdr io.ReaderAt, size int64) error {
|
||||
|
||||
// RegisterDecompressor registers or overrides a custom decompressor for a
|
||||
// specific method ID. If a decompressor for a given method is not found,
|
||||
// [Reader] will default to looking up the decompressor at the package level.
|
||||
// Reader will default to looking up the decompressor at the package level.
|
||||
func (r *Reader) RegisterDecompressor(method uint16, dcomp Decompressor) {
|
||||
if r.decompressors == nil {
|
||||
r.decompressors = make(map[uint16]Decompressor)
|
||||
@@ -202,7 +202,7 @@ func (rc *ReadCloser) Close() error {
|
||||
// DataOffset returns the offset of the file's possibly-compressed
|
||||
// data, relative to the beginning of the zip file.
|
||||
//
|
||||
// Most callers should instead use [File.Open], which transparently
|
||||
// Most callers should instead use Open, which transparently
|
||||
// decompresses data and verifies checksums.
|
||||
func (f *File) DataOffset() (offset int64, err error) {
|
||||
bodyOffset, err := f.findBodyOffset()
|
||||
@@ -212,7 +212,7 @@ func (f *File) DataOffset() (offset int64, err error) {
|
||||
return f.headerOffset + bodyOffset, nil
|
||||
}
|
||||
|
||||
// Open returns a [ReadCloser] that provides access to the [File]'s contents.
|
||||
// Open returns a ReadCloser that provides access to the File's contents.
|
||||
// Multiple files may be read concurrently.
|
||||
func (f *File) Open() (io.ReadCloser, error) {
|
||||
bodyOffset, err := f.findBodyOffset()
|
||||
@@ -255,7 +255,7 @@ func (f *File) Open() (io.ReadCloser, error) {
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
// OpenRaw returns a [Reader] that provides access to the [File]'s contents without
|
||||
// OpenRaw returns a Reader that provides access to the File's contents without
|
||||
// decompression.
|
||||
func (f *File) OpenRaw() (io.Reader, error) {
|
||||
bodyOffset, err := f.findBodyOffset()
|
||||
@@ -469,8 +469,8 @@ parseExtras:
|
||||
|
||||
const ticksPerSecond = 1e7 // Windows timestamp resolution
|
||||
ts := int64(attrBuf.uint64()) // ModTime since Windows epoch
|
||||
secs := ts / ticksPerSecond
|
||||
nsecs := (1e9 / ticksPerSecond) * (ts % ticksPerSecond)
|
||||
secs := int64(ts / ticksPerSecond)
|
||||
nsecs := (1e9 / ticksPerSecond) * int64(ts%ticksPerSecond)
|
||||
epoch := time.Date(1601, time.January, 1, 0, 0, 0, 0, time.UTC)
|
||||
modified = time.Unix(epoch.Unix()+secs, nsecs)
|
||||
}
|
||||
@@ -699,13 +699,9 @@ func findSignatureInBlock(b []byte) int {
|
||||
if b[i] == 'P' && b[i+1] == 'K' && b[i+2] == 0x05 && b[i+3] == 0x06 {
|
||||
// n is length of comment
|
||||
n := int(b[i+directoryEndLen-2]) | int(b[i+directoryEndLen-1])<<8
|
||||
if n+directoryEndLen+i > len(b) {
|
||||
// Truncated comment.
|
||||
// Some parsers (such as Info-ZIP) ignore the truncated comment
|
||||
// rather than treating it as a hard error.
|
||||
return -1
|
||||
if n+directoryEndLen+i <= len(b) {
|
||||
return i
|
||||
}
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
@@ -862,19 +858,14 @@ func (r *Reader) initFileList() {
|
||||
}
|
||||
}
|
||||
|
||||
slices.SortFunc(r.fileList, func(a, b fileListEntry) int {
|
||||
return fileEntryCompare(a.name, b.name)
|
||||
})
|
||||
sort.Slice(r.fileList, func(i, j int) bool { return fileEntryLess(r.fileList[i].name, r.fileList[j].name) })
|
||||
})
|
||||
}
|
||||
|
||||
func fileEntryCompare(x, y string) int {
|
||||
func fileEntryLess(x, y string) bool {
|
||||
xdir, xelem, _ := split(x)
|
||||
ydir, yelem, _ := split(y)
|
||||
if xdir != ydir {
|
||||
return strings.Compare(xdir, ydir)
|
||||
}
|
||||
return strings.Compare(xelem, yelem)
|
||||
return xdir < ydir || xdir == ydir && xelem < yelem
|
||||
}
|
||||
|
||||
// Open opens the named file in the ZIP archive,
|
||||
@@ -902,8 +893,14 @@ func (r *Reader) Open(name string) (fs.File, error) {
|
||||
}
|
||||
|
||||
func split(name string) (dir, elem string, isDir bool) {
|
||||
name, isDir = strings.CutSuffix(name, "/")
|
||||
i := strings.LastIndexByte(name, '/')
|
||||
if len(name) > 0 && name[len(name)-1] == '/' {
|
||||
isDir = true
|
||||
name = name[:len(name)-1]
|
||||
}
|
||||
i := len(name) - 1
|
||||
for i >= 0 && name[i] != '/' {
|
||||
i--
|
||||
}
|
||||
if i < 0 {
|
||||
return ".", name, isDir
|
||||
}
|
||||
@@ -919,12 +916,9 @@ func (r *Reader) openLookup(name string) *fileListEntry {
|
||||
|
||||
dir, elem, _ := split(name)
|
||||
files := r.fileList
|
||||
i, _ := slices.BinarySearchFunc(files, dir, func(a fileListEntry, dir string) (ret int) {
|
||||
idir, ielem, _ := split(a.name)
|
||||
if dir != idir {
|
||||
return strings.Compare(idir, dir)
|
||||
}
|
||||
return strings.Compare(ielem, elem)
|
||||
i := sort.Search(len(files), func(i int) bool {
|
||||
idir, ielem, _ := split(files[i].name)
|
||||
return idir > dir || idir == dir && ielem >= elem
|
||||
})
|
||||
if i < len(files) {
|
||||
fname := files[i].name
|
||||
@@ -937,21 +931,13 @@ func (r *Reader) openLookup(name string) *fileListEntry {
|
||||
|
||||
func (r *Reader) openReadDir(dir string) []fileListEntry {
|
||||
files := r.fileList
|
||||
i, _ := slices.BinarySearchFunc(files, dir, func(a fileListEntry, dir string) int {
|
||||
idir, _, _ := split(a.name)
|
||||
if dir != idir {
|
||||
return strings.Compare(idir, dir)
|
||||
}
|
||||
// find the first entry with dir
|
||||
return +1
|
||||
i := sort.Search(len(files), func(i int) bool {
|
||||
idir, _, _ := split(files[i].name)
|
||||
return idir >= dir
|
||||
})
|
||||
j, _ := slices.BinarySearchFunc(files, dir, func(a fileListEntry, dir string) int {
|
||||
jdir, _, _ := split(a.name)
|
||||
if dir != jdir {
|
||||
return strings.Compare(jdir, dir)
|
||||
}
|
||||
// find the last entry with dir
|
||||
return -1
|
||||
j := sort.Search(len(files), func(j int) bool {
|
||||
jdir, _, _ := split(files[j].name)
|
||||
return jdir > dir
|
||||
})
|
||||
return files[i:j]
|
||||
}
|
||||
|
||||
@@ -13,8 +13,8 @@ import (
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
"testing/fstest"
|
||||
@@ -570,14 +570,6 @@ var tests = []ZipTest{
|
||||
},
|
||||
},
|
||||
},
|
||||
// Issue 66869: Don't skip over an EOCDR with a truncated comment.
|
||||
// The test file sneakily hides a second EOCDR before the first one;
|
||||
// previously we would extract one file ("file") from this archive,
|
||||
// while most other tools would reject the file or extract a different one ("FILE").
|
||||
{
|
||||
Name: "comment-truncated.zip",
|
||||
Error: ErrFormat,
|
||||
},
|
||||
}
|
||||
|
||||
func TestReader(t *testing.T) {
|
||||
@@ -912,7 +904,9 @@ func returnRecursiveZip() (r io.ReaderAt, size int64) {
|
||||
// type zeros struct{}
|
||||
//
|
||||
// func (zeros) Read(b []byte) (int, error) {
|
||||
// clear(b)
|
||||
// for i := range b {
|
||||
// b[i] = 0
|
||||
// }
|
||||
// return len(b), nil
|
||||
// }
|
||||
//
|
||||
@@ -1192,7 +1186,7 @@ func TestIssue12449(t *testing.T) {
|
||||
0x00, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
}
|
||||
// Read in the archive.
|
||||
_, err := NewReader(bytes.NewReader(data), int64(len(data)))
|
||||
_, err := NewReader(bytes.NewReader([]byte(data)), int64(len(data)))
|
||||
if err != nil {
|
||||
t.Errorf("Error reading the archive: %v", err)
|
||||
}
|
||||
@@ -1274,7 +1268,7 @@ func TestFSWalk(t *testing.T) {
|
||||
} else if !test.wantErr && sawErr {
|
||||
t.Error("unexpected error")
|
||||
}
|
||||
if test.want != nil && !slices.Equal(files, test.want) {
|
||||
if test.want != nil && !reflect.DeepEqual(files, test.want) {
|
||||
t.Errorf("got %v want %v", files, test.want)
|
||||
}
|
||||
})
|
||||
@@ -1339,7 +1333,7 @@ func TestCVE202127919(t *testing.T) {
|
||||
0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x39, 0x00,
|
||||
0x00, 0x00, 0x59, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
}
|
||||
r, err := NewReader(bytes.NewReader(data), int64(len(data)))
|
||||
r, err := NewReader(bytes.NewReader([]byte(data)), int64(len(data)))
|
||||
if err != ErrInsecurePath {
|
||||
t.Fatalf("Error reading the archive: %v", err)
|
||||
}
|
||||
@@ -1565,7 +1559,7 @@ func TestCVE202141772(t *testing.T) {
|
||||
0x00, 0x04, 0x00, 0x04, 0x00, 0x31, 0x01, 0x00,
|
||||
0x00, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
}
|
||||
r, err := NewReader(bytes.NewReader(data), int64(len(data)))
|
||||
r, err := NewReader(bytes.NewReader([]byte(data)), int64(len(data)))
|
||||
if err != ErrInsecurePath {
|
||||
t.Fatalf("Error reading the archive: %v", err)
|
||||
}
|
||||
@@ -1580,7 +1574,7 @@ func TestCVE202141772(t *testing.T) {
|
||||
t.Errorf("Opening %q with fs.FS API succeeded", f.Name)
|
||||
}
|
||||
}
|
||||
if !slices.Equal(names, entryNames) {
|
||||
if !reflect.DeepEqual(names, entryNames) {
|
||||
t.Errorf("Unexpected file entries: %q", names)
|
||||
}
|
||||
if _, err := r.Open(""); err == nil {
|
||||
@@ -1693,7 +1687,7 @@ func TestInsecurePaths(t *testing.T) {
|
||||
for _, f := range zr.File {
|
||||
gotPaths = append(gotPaths, f.Name)
|
||||
}
|
||||
if !slices.Equal(gotPaths, []string{path}) {
|
||||
if !reflect.DeepEqual(gotPaths, []string{path}) {
|
||||
t.Errorf("NewReader for archive with file %q: got files %q", path, gotPaths)
|
||||
continue
|
||||
}
|
||||
@@ -1718,7 +1712,7 @@ func TestDisableInsecurePathCheck(t *testing.T) {
|
||||
for _, f := range zr.File {
|
||||
gotPaths = append(gotPaths, f.Name)
|
||||
}
|
||||
if want := []string{name}; !slices.Equal(gotPaths, want) {
|
||||
if want := []string{name}; !reflect.DeepEqual(gotPaths, want) {
|
||||
t.Errorf("NewReader with zipinsecurepath=1: got files %q, want %q", gotPaths, want)
|
||||
}
|
||||
}
|
||||
@@ -1828,7 +1822,7 @@ func TestBaseOffsetPlusOverflow(t *testing.T) {
|
||||
}
|
||||
}()
|
||||
// Previously, this would trigger a panic as we attempt to read from
|
||||
// an io.SectionReader which would access a slice at a negative offset
|
||||
// a io.SectionReader which would access a slice at a negative offset
|
||||
// as the section reader offset & size were < 0.
|
||||
NewReader(bytes.NewReader(data), int64(len(data))+1875)
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
type Compressor func(w io.Writer) (io.WriteCloser, error)
|
||||
|
||||
// A Decompressor returns a new decompressing reader, reading from r.
|
||||
// The [io.ReadCloser]'s Close method must be used to release associated resources.
|
||||
// The ReadCloser's Close method must be used to release associated resources.
|
||||
// The Decompressor itself must be safe to invoke from multiple goroutines
|
||||
// simultaneously, but each returned reader will be used only by
|
||||
// one goroutine at a time.
|
||||
@@ -115,7 +115,7 @@ func init() {
|
||||
}
|
||||
|
||||
// RegisterDecompressor allows custom decompressors for a specified method ID.
|
||||
// The common methods [Store] and [Deflate] are built in.
|
||||
// The common methods Store and Deflate are built in.
|
||||
func RegisterDecompressor(method uint16, dcomp Decompressor) {
|
||||
if _, dup := decompressors.LoadOrStore(method, dcomp); dup {
|
||||
panic("decompressor already registered")
|
||||
@@ -123,7 +123,7 @@ func RegisterDecompressor(method uint16, dcomp Decompressor) {
|
||||
}
|
||||
|
||||
// RegisterCompressor registers custom compressors for a specified method ID.
|
||||
// The common methods [Store] and [Deflate] are built in.
|
||||
// The common methods Store and Deflate are built in.
|
||||
func RegisterCompressor(method uint16, comp Compressor) {
|
||||
if _, dup := compressors.LoadOrStore(method, comp); dup {
|
||||
panic("compressor already registered")
|
||||
|
||||
@@ -17,7 +17,7 @@ for normal archives both fields will be the same. For files requiring
|
||||
the ZIP64 format the 32 bit fields will be 0xffffffff and the 64 bit
|
||||
fields must be used instead.
|
||||
|
||||
[ZIP specification]: https://support.pkware.com/pkzip/appnote
|
||||
[ZIP specification]: https://www.pkware.com/appnote
|
||||
*/
|
||||
package zip
|
||||
|
||||
@@ -82,7 +82,7 @@ const (
|
||||
// FileHeader describes a file within a ZIP file.
|
||||
// See the [ZIP specification] for details.
|
||||
//
|
||||
// [ZIP specification]: https://support.pkware.com/pkzip/appnote
|
||||
// [ZIP specification]: https://www.pkware.com/appnote
|
||||
type FileHeader struct {
|
||||
// Name is the name of the file.
|
||||
//
|
||||
@@ -143,9 +143,9 @@ type FileHeader struct {
|
||||
// Deprecated: Use CompressedSize64 instead.
|
||||
CompressedSize uint32
|
||||
|
||||
// UncompressedSize is the uncompressed size of the file in bytes.
|
||||
// UncompressedSize is the compressed size of the file in bytes.
|
||||
// If either the uncompressed or compressed size of the file
|
||||
// does not fit in 32 bits, UncompressedSize is set to ^uint32(0).
|
||||
// does not fit in 32 bits, CompressedSize is set to ^uint32(0).
|
||||
//
|
||||
// Deprecated: Use UncompressedSize64 instead.
|
||||
UncompressedSize uint32
|
||||
@@ -160,12 +160,12 @@ type FileHeader struct {
|
||||
ExternalAttrs uint32 // Meaning depends on CreatorVersion
|
||||
}
|
||||
|
||||
// FileInfo returns an fs.FileInfo for the [FileHeader].
|
||||
// FileInfo returns an fs.FileInfo for the FileHeader.
|
||||
func (h *FileHeader) FileInfo() fs.FileInfo {
|
||||
return headerFileInfo{h}
|
||||
}
|
||||
|
||||
// headerFileInfo implements [fs.FileInfo].
|
||||
// headerFileInfo implements fs.FileInfo.
|
||||
type headerFileInfo struct {
|
||||
fh *FileHeader
|
||||
}
|
||||
@@ -194,7 +194,7 @@ func (fi headerFileInfo) String() string {
|
||||
return fs.FormatFileInfo(fi)
|
||||
}
|
||||
|
||||
// FileInfoHeader creates a partially-populated [FileHeader] from an
|
||||
// FileInfoHeader creates a partially-populated FileHeader from an
|
||||
// fs.FileInfo.
|
||||
// Because fs.FileInfo's Name method returns only the base name of
|
||||
// the file it describes, it may be necessary to modify the Name field
|
||||
@@ -245,7 +245,7 @@ func timeZone(offset time.Duration) *time.Location {
|
||||
|
||||
// msDosTimeToTime converts an MS-DOS date and time into a time.Time.
|
||||
// The resolution is 2s.
|
||||
// See: https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-dosdatetimetofiletime
|
||||
// See: https://msdn.microsoft.com/en-us/library/ms724247(v=VS.85).aspx
|
||||
func msDosTimeToTime(dosDate, dosTime uint16) time.Time {
|
||||
return time.Date(
|
||||
// date bits 0-4: day of month; 5-8: month; 9-15: years since 1980
|
||||
@@ -265,7 +265,7 @@ func msDosTimeToTime(dosDate, dosTime uint16) time.Time {
|
||||
|
||||
// timeToMsDosTime converts a time.Time to an MS-DOS date and time.
|
||||
// The resolution is 2s.
|
||||
// See: https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-filetimetodosdatetime
|
||||
// See: https://msdn.microsoft.com/en-us/library/ms724274(v=VS.85).aspx
|
||||
func timeToMsDosTime(t time.Time) (fDate uint16, fTime uint16) {
|
||||
fDate = uint16(t.Day() + int(t.Month())<<5 + (t.Year()-1980)<<9)
|
||||
fTime = uint16(t.Second()/2 + t.Minute()<<5 + t.Hour()<<11)
|
||||
@@ -273,17 +273,17 @@ func timeToMsDosTime(t time.Time) (fDate uint16, fTime uint16) {
|
||||
}
|
||||
|
||||
// ModTime returns the modification time in UTC using the legacy
|
||||
// [ModifiedDate] and [ModifiedTime] fields.
|
||||
// ModifiedDate and ModifiedTime fields.
|
||||
//
|
||||
// Deprecated: Use [Modified] instead.
|
||||
// Deprecated: Use Modified instead.
|
||||
func (h *FileHeader) ModTime() time.Time {
|
||||
return msDosTimeToTime(h.ModifiedDate, h.ModifiedTime)
|
||||
}
|
||||
|
||||
// SetModTime sets the [Modified], [ModifiedTime], and [ModifiedDate] fields
|
||||
// SetModTime sets the Modified, ModifiedTime, and ModifiedDate fields
|
||||
// to the given time in UTC.
|
||||
//
|
||||
// Deprecated: Use [Modified] instead.
|
||||
// Deprecated: Use Modified instead.
|
||||
func (h *FileHeader) SetModTime(t time.Time) {
|
||||
t = t.UTC() // Convert to UTC for compatibility
|
||||
h.Modified = t
|
||||
@@ -309,7 +309,7 @@ const (
|
||||
msdosReadOnly = 0x01
|
||||
)
|
||||
|
||||
// Mode returns the permission and mode bits for the [FileHeader].
|
||||
// Mode returns the permission and mode bits for the FileHeader.
|
||||
func (h *FileHeader) Mode() (mode fs.FileMode) {
|
||||
switch h.CreatorVersion >> 8 {
|
||||
case creatorUnix, creatorMacOSX:
|
||||
@@ -323,7 +323,7 @@ func (h *FileHeader) Mode() (mode fs.FileMode) {
|
||||
return mode
|
||||
}
|
||||
|
||||
// SetMode changes the permission and mode bits for the [FileHeader].
|
||||
// SetMode changes the permission and mode bits for the FileHeader.
|
||||
func (h *FileHeader) SetMode(mode fs.FileMode) {
|
||||
h.CreatorVersion = h.CreatorVersion&0xff | creatorUnix<<8
|
||||
h.ExternalAttrs = fileModeToUnixMode(mode) << 16
|
||||
|
||||
BIN
src/archive/zip/testdata/comment-truncated.zip
vendored
BIN
src/archive/zip/testdata/comment-truncated.zip
vendored
Binary file not shown.
@@ -11,7 +11,6 @@ import (
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"io/fs"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
@@ -41,7 +40,7 @@ type header struct {
|
||||
raw bool
|
||||
}
|
||||
|
||||
// NewWriter returns a new [Writer] writing a zip file to w.
|
||||
// NewWriter returns a new Writer writing a zip file to w.
|
||||
func NewWriter(w io.Writer) *Writer {
|
||||
return &Writer{cw: &countWriter{w: bufio.NewWriter(w)}}
|
||||
}
|
||||
@@ -64,7 +63,7 @@ func (w *Writer) Flush() error {
|
||||
}
|
||||
|
||||
// SetComment sets the end-of-central-directory comment field.
|
||||
// It can only be called before [Writer.Close].
|
||||
// It can only be called before Close.
|
||||
func (w *Writer) SetComment(comment string) error {
|
||||
if len(comment) > uint16max {
|
||||
return errors.New("zip: Writer.Comment too long")
|
||||
@@ -208,15 +207,14 @@ func (w *Writer) Close() error {
|
||||
}
|
||||
|
||||
// Create adds a file to the zip file using the provided name.
|
||||
// It returns a [Writer] to which the file contents should be written.
|
||||
// The file contents will be compressed using the [Deflate] method.
|
||||
// It returns a Writer to which the file contents should be written.
|
||||
// The file contents will be compressed using the Deflate method.
|
||||
// The name must be a relative path: it must not start with a drive
|
||||
// letter (e.g. C:) or leading slash, and only forward slashes are
|
||||
// allowed. To create a directory instead of a file, add a trailing
|
||||
// slash to the name. Duplicate names will not overwrite previous entries
|
||||
// and are appended to the zip file.
|
||||
// The file's contents must be written to the [io.Writer] before the next
|
||||
// call to [Writer.Create], [Writer.CreateHeader], or [Writer.Close].
|
||||
// slash to the name.
|
||||
// The file's contents must be written to the io.Writer before the next
|
||||
// call to Create, CreateHeader, or Close.
|
||||
func (w *Writer) Create(name string) (io.Writer, error) {
|
||||
header := &FileHeader{
|
||||
Name: name,
|
||||
@@ -263,13 +261,13 @@ func (w *Writer) prepare(fh *FileHeader) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateHeader adds a file to the zip archive using the provided [FileHeader]
|
||||
// for the file metadata. [Writer] takes ownership of fh and may mutate
|
||||
// its fields. The caller must not modify fh after calling [Writer.CreateHeader].
|
||||
// CreateHeader adds a file to the zip archive using the provided FileHeader
|
||||
// for the file metadata. Writer takes ownership of fh and may mutate
|
||||
// its fields. The caller must not modify fh after calling CreateHeader.
|
||||
//
|
||||
// This returns a [Writer] to which the file contents should be written.
|
||||
// This returns a Writer to which the file contents should be written.
|
||||
// The file's contents must be written to the io.Writer before the next
|
||||
// call to [Writer.Create], [Writer.CreateHeader], [Writer.CreateRaw], or [Writer.Close].
|
||||
// call to Create, CreateHeader, CreateRaw, or Close.
|
||||
func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) {
|
||||
if err := w.prepare(fh); err != nil {
|
||||
return nil, err
|
||||
@@ -407,8 +405,8 @@ func writeHeader(w io.Writer, h *header) error {
|
||||
// flags.
|
||||
if h.raw && !h.hasDataDescriptor() {
|
||||
b.uint32(h.CRC32)
|
||||
b.uint32(uint32(min(h.CompressedSize64, uint32max)))
|
||||
b.uint32(uint32(min(h.UncompressedSize64, uint32max)))
|
||||
b.uint32(uint32(min64(h.CompressedSize64, uint32max)))
|
||||
b.uint32(uint32(min64(h.UncompressedSize64, uint32max)))
|
||||
} else {
|
||||
// When this package handle the compression, these values are
|
||||
// always written to the trailing data descriptor.
|
||||
@@ -428,23 +426,26 @@ func writeHeader(w io.Writer, h *header) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// CreateRaw adds a file to the zip archive using the provided [FileHeader] and
|
||||
// returns a [Writer] to which the file contents should be written. The file's
|
||||
// contents must be written to the io.Writer before the next call to [Writer.Create],
|
||||
// [Writer.CreateHeader], [Writer.CreateRaw], or [Writer.Close].
|
||||
func min64(x, y uint64) uint64 {
|
||||
if x < y {
|
||||
return x
|
||||
}
|
||||
return y
|
||||
}
|
||||
|
||||
// CreateRaw adds a file to the zip archive using the provided FileHeader and
|
||||
// returns a Writer to which the file contents should be written. The file's
|
||||
// contents must be written to the io.Writer before the next call to Create,
|
||||
// CreateHeader, CreateRaw, or Close.
|
||||
//
|
||||
// In contrast to [Writer.CreateHeader], the bytes passed to Writer are not compressed.
|
||||
//
|
||||
// CreateRaw's argument is stored in w. If the argument is a pointer to the embedded
|
||||
// [FileHeader] in a [File] obtained from a [Reader] created from in-memory data,
|
||||
// then w will refer to all of that memory.
|
||||
// In contrast to CreateHeader, the bytes passed to Writer are not compressed.
|
||||
func (w *Writer) CreateRaw(fh *FileHeader) (io.Writer, error) {
|
||||
if err := w.prepare(fh); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fh.CompressedSize = uint32(min(fh.CompressedSize64, uint32max))
|
||||
fh.UncompressedSize = uint32(min(fh.UncompressedSize64, uint32max))
|
||||
fh.CompressedSize = uint32(min64(fh.CompressedSize64, uint32max))
|
||||
fh.UncompressedSize = uint32(min64(fh.UncompressedSize64, uint32max))
|
||||
|
||||
h := &header{
|
||||
FileHeader: fh,
|
||||
@@ -469,17 +470,14 @@ func (w *Writer) CreateRaw(fh *FileHeader) (io.Writer, error) {
|
||||
return fw, nil
|
||||
}
|
||||
|
||||
// Copy copies the file f (obtained from a [Reader]) into w. It copies the raw
|
||||
// Copy copies the file f (obtained from a Reader) into w. It copies the raw
|
||||
// form directly bypassing decompression, compression, and validation.
|
||||
func (w *Writer) Copy(f *File) error {
|
||||
r, err := f.OpenRaw()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Copy the FileHeader so w doesn't store a pointer to the data
|
||||
// of f's entire archive. See #65499.
|
||||
fh := f.FileHeader
|
||||
fw, err := w.CreateRaw(&fh)
|
||||
fw, err := w.CreateRaw(&f.FileHeader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -488,7 +486,7 @@ func (w *Writer) Copy(f *File) error {
|
||||
}
|
||||
|
||||
// RegisterCompressor registers or overrides a custom compressor for a specific
|
||||
// method ID. If a compressor for a given method is not found, [Writer] will
|
||||
// method ID. If a compressor for a given method is not found, Writer will
|
||||
// default to looking up the compressor at the package level.
|
||||
func (w *Writer) RegisterCompressor(method uint16, comp Compressor) {
|
||||
if w.compressors == nil {
|
||||
@@ -497,50 +495,6 @@ func (w *Writer) RegisterCompressor(method uint16, comp Compressor) {
|
||||
w.compressors[method] = comp
|
||||
}
|
||||
|
||||
// AddFS adds the files from fs.FS to the archive.
|
||||
// It walks the directory tree starting at the root of the filesystem
|
||||
// adding each file to the zip using deflate while maintaining the directory structure.
|
||||
func (w *Writer) AddFS(fsys fs.FS) error {
|
||||
return fs.WalkDir(fsys, ".", func(name string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if name == "." {
|
||||
return nil
|
||||
}
|
||||
info, err := d.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !d.IsDir() && !info.Mode().IsRegular() {
|
||||
return errors.New("zip: cannot add non-regular file")
|
||||
}
|
||||
h, err := FileInfoHeader(info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h.Name = name
|
||||
if d.IsDir() {
|
||||
h.Name += "/"
|
||||
}
|
||||
h.Method = Deflate
|
||||
fw, err := w.CreateHeader(h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
f, err := fsys.Open(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
_, err = io.Copy(fw, f)
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func (w *Writer) compressor(method uint16) Compressor {
|
||||
comp := w.compressors[method]
|
||||
if comp == nil {
|
||||
@@ -615,7 +569,7 @@ func (w *fileWriter) writeDataDescriptor() error {
|
||||
}
|
||||
// Write data descriptor. This is more complicated than one would
|
||||
// think, see e.g. comments in zipfile.c:putextended() and
|
||||
// https://bugs.openjdk.org/browse/JDK-7073588.
|
||||
// http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=7073588.
|
||||
// The approach here is to write 8 byte sizes if needed without
|
||||
// adding a zip64 extra in the local header (too late anyway).
|
||||
var buf []byte
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"testing/fstest"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -108,7 +107,7 @@ func TestWriter(t *testing.T) {
|
||||
|
||||
// TestWriterComment is test for EOCD comment read/write.
|
||||
func TestWriterComment(t *testing.T) {
|
||||
tests := []struct {
|
||||
var tests = []struct {
|
||||
comment string
|
||||
ok bool
|
||||
}{
|
||||
@@ -158,7 +157,7 @@ func TestWriterComment(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestWriterUTF8(t *testing.T) {
|
||||
utf8Tests := []struct {
|
||||
var utf8Tests = []struct {
|
||||
name string
|
||||
comment string
|
||||
nonUTF8 bool
|
||||
@@ -603,71 +602,3 @@ func BenchmarkCompressedZipGarbage(b *testing.B) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func writeTestsToFS(tests []WriteTest) fs.FS {
|
||||
fsys := fstest.MapFS{}
|
||||
for _, wt := range tests {
|
||||
fsys[wt.Name] = &fstest.MapFile{
|
||||
Data: wt.Data,
|
||||
Mode: wt.Mode,
|
||||
}
|
||||
}
|
||||
return fsys
|
||||
}
|
||||
|
||||
func TestWriterAddFS(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
w := NewWriter(buf)
|
||||
tests := []WriteTest{
|
||||
{Name: "emptyfolder", Mode: 0o755 | os.ModeDir},
|
||||
{Name: "file.go", Data: []byte("hello"), Mode: 0644},
|
||||
{Name: "subfolder/another.go", Data: []byte("world"), Mode: 0644},
|
||||
// Notably missing here is the "subfolder" directory. This makes sure even
|
||||
// if we don't have a subfolder directory listed.
|
||||
}
|
||||
err := w.AddFS(writeTestsToFS(tests))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := w.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Add subfolder into fsys to match what we'll read from the zip.
|
||||
tests = append(tests[:2:2], WriteTest{Name: "subfolder", Mode: 0o555 | os.ModeDir}, tests[2])
|
||||
|
||||
// read it back
|
||||
r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i, wt := range tests {
|
||||
if wt.Mode.IsDir() {
|
||||
wt.Name += "/"
|
||||
}
|
||||
testReadFile(t, r.File[i], &wt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssue61875(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
w := NewWriter(buf)
|
||||
tests := []WriteTest{
|
||||
{
|
||||
Name: "symlink",
|
||||
Data: []byte("../link/target"),
|
||||
Method: Deflate,
|
||||
Mode: 0755 | fs.ModeSymlink,
|
||||
},
|
||||
{
|
||||
Name: "device",
|
||||
Data: []byte(""),
|
||||
Method: Deflate,
|
||||
Mode: 0755 | fs.ModeDevice,
|
||||
},
|
||||
}
|
||||
err := w.AddFS(writeTestsToFS(tests))
|
||||
if err == nil {
|
||||
t.Errorf("expected error, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,14 +8,13 @@ package zip
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"internal/testenv"
|
||||
"io"
|
||||
"runtime"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -199,6 +198,13 @@ func (r *rleBuffer) Write(p []byte) (n int, err error) {
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func min(x, y int64) int64 {
|
||||
if x < y {
|
||||
return x
|
||||
}
|
||||
return y
|
||||
}
|
||||
|
||||
func memset(a []byte, b byte) {
|
||||
if len(a) == 0 {
|
||||
return
|
||||
@@ -215,8 +221,9 @@ func (r *rleBuffer) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
if len(p) == 0 {
|
||||
return
|
||||
}
|
||||
skipParts, _ := slices.BinarySearchFunc(r.buf, off, func(rb repeatedByte, off int64) int {
|
||||
return cmp.Compare(rb.off+rb.n, off)
|
||||
skipParts := sort.Search(len(r.buf), func(i int) bool {
|
||||
part := &r.buf[i]
|
||||
return part.off+part.n > off
|
||||
})
|
||||
parts := r.buf[skipParts:]
|
||||
if len(parts) > 0 {
|
||||
@@ -590,7 +597,7 @@ func testZip64(t testing.TB, size int64) *rleBuffer {
|
||||
}
|
||||
|
||||
// read back zip file and check that we get to the end of it
|
||||
r, err := NewReader(buf, buf.Size())
|
||||
r, err := NewReader(buf, int64(buf.Size()))
|
||||
if err != nil {
|
||||
t.Fatal("reader:", err)
|
||||
}
|
||||
@@ -814,6 +821,8 @@ func TestSuffixSaver(t *testing.T) {
|
||||
type zeros struct{}
|
||||
|
||||
func (zeros) Read(p []byte) (int, error) {
|
||||
clear(p)
|
||||
for i := range p {
|
||||
p[i] = 0
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
@@ -29,9 +29,6 @@ var (
|
||||
// Buffered input.
|
||||
|
||||
// Reader implements buffering for an io.Reader object.
|
||||
// A new Reader is created by calling [NewReader] or [NewReaderSize];
|
||||
// alternatively the zero value of a Reader may be used after calling [Reset]
|
||||
// on it.
|
||||
type Reader struct {
|
||||
buf []byte
|
||||
rd io.Reader // reader provided by the client
|
||||
@@ -44,21 +41,24 @@ type Reader struct {
|
||||
const minReadBufferSize = 16
|
||||
const maxConsecutiveEmptyReads = 100
|
||||
|
||||
// NewReaderSize returns a new [Reader] whose buffer has at least the specified
|
||||
// size. If the argument io.Reader is already a [Reader] with large enough
|
||||
// size, it returns the underlying [Reader].
|
||||
// NewReaderSize returns a new Reader whose buffer has at least the specified
|
||||
// size. If the argument io.Reader is already a Reader with large enough
|
||||
// size, it returns the underlying Reader.
|
||||
func NewReaderSize(rd io.Reader, size int) *Reader {
|
||||
// Is it already a Reader?
|
||||
b, ok := rd.(*Reader)
|
||||
if ok && len(b.buf) >= size {
|
||||
return b
|
||||
}
|
||||
if size < minReadBufferSize {
|
||||
size = minReadBufferSize
|
||||
}
|
||||
r := new(Reader)
|
||||
r.reset(make([]byte, max(size, minReadBufferSize)), rd)
|
||||
r.reset(make([]byte, size), rd)
|
||||
return r
|
||||
}
|
||||
|
||||
// NewReader returns a new [Reader] whose buffer has the default size.
|
||||
// NewReader returns a new Reader whose buffer has the default size.
|
||||
func NewReader(rd io.Reader) *Reader {
|
||||
return NewReaderSize(rd, defaultBufSize)
|
||||
}
|
||||
@@ -68,9 +68,9 @@ func (b *Reader) Size() int { return len(b.buf) }
|
||||
|
||||
// Reset discards any buffered data, resets all state, and switches
|
||||
// the buffered reader to read from r.
|
||||
// Calling Reset on the zero value of [Reader] initializes the internal buffer
|
||||
// Calling Reset on the zero value of Reader initializes the internal buffer
|
||||
// to the default size.
|
||||
// Calling b.Reset(b) (that is, resetting a [Reader] to itself) does nothing.
|
||||
// Calling b.Reset(b) (that is, resetting a Reader to itself) does nothing.
|
||||
func (b *Reader) Reset(r io.Reader) {
|
||||
// If a Reader r is passed to NewReader, NewReader will return r.
|
||||
// Different layers of code may do that, and then later pass r
|
||||
@@ -133,12 +133,11 @@ func (b *Reader) readErr() error {
|
||||
}
|
||||
|
||||
// Peek returns the next n bytes without advancing the reader. The bytes stop
|
||||
// being valid at the next read call. If necessary, Peek will read more bytes
|
||||
// into the buffer in order to make n bytes available. If Peek returns fewer
|
||||
// than n bytes, it also returns an error explaining why the read is short.
|
||||
// The error is [ErrBufferFull] if n is larger than b's buffer size.
|
||||
// being valid at the next read call. If Peek returns fewer than n bytes, it
|
||||
// also returns an error explaining why the read is short. The error is
|
||||
// ErrBufferFull if n is larger than b's buffer size.
|
||||
//
|
||||
// Calling Peek prevents a [Reader.UnreadByte] or [Reader.UnreadRune] call from succeeding
|
||||
// Calling Peek prevents a UnreadByte or UnreadRune call from succeeding
|
||||
// until the next read operation.
|
||||
func (b *Reader) Peek(n int) ([]byte, error) {
|
||||
if n < 0 {
|
||||
@@ -208,10 +207,10 @@ func (b *Reader) Discard(n int) (discarded int, err error) {
|
||||
|
||||
// Read reads data into p.
|
||||
// It returns the number of bytes read into p.
|
||||
// The bytes are taken from at most one Read on the underlying [Reader],
|
||||
// The bytes are taken from at most one Read on the underlying Reader,
|
||||
// hence n may be less than len(p).
|
||||
// To read exactly len(p) bytes, use io.ReadFull(b, p).
|
||||
// If the underlying [Reader] can return a non-zero count with io.EOF,
|
||||
// If the underlying Reader can return a non-zero count with io.EOF,
|
||||
// then this Read method can do so as well; see the [io.Reader] docs.
|
||||
func (b *Reader) Read(p []byte) (n int, err error) {
|
||||
n = len(p)
|
||||
@@ -281,7 +280,7 @@ func (b *Reader) ReadByte() (byte, error) {
|
||||
// UnreadByte unreads the last byte. Only the most recently read byte can be unread.
|
||||
//
|
||||
// UnreadByte returns an error if the most recent method called on the
|
||||
// [Reader] was not a read operation. Notably, [Reader.Peek], [Reader.Discard], and [Reader.WriteTo] are not
|
||||
// Reader was not a read operation. Notably, Peek, Discard, and WriteTo are not
|
||||
// considered read operations.
|
||||
func (b *Reader) UnreadByte() error {
|
||||
if b.lastByte < 0 || b.r == 0 && b.w > 0 {
|
||||
@@ -322,8 +321,8 @@ func (b *Reader) ReadRune() (r rune, size int, err error) {
|
||||
}
|
||||
|
||||
// UnreadRune unreads the last rune. If the most recent method called on
|
||||
// the [Reader] was not a [Reader.ReadRune], [Reader.UnreadRune] returns an error. (In this
|
||||
// regard it is stricter than [Reader.UnreadByte], which will unread the last byte
|
||||
// the Reader was not a ReadRune, UnreadRune returns an error. (In this
|
||||
// regard it is stricter than UnreadByte, which will unread the last byte
|
||||
// from any read operation.)
|
||||
func (b *Reader) UnreadRune() error {
|
||||
if b.lastRuneSize < 0 || b.r < b.lastRuneSize {
|
||||
@@ -343,10 +342,10 @@ func (b *Reader) Buffered() int { return b.w - b.r }
|
||||
// The bytes stop being valid at the next read.
|
||||
// If ReadSlice encounters an error before finding a delimiter,
|
||||
// it returns all the data in the buffer and the error itself (often io.EOF).
|
||||
// ReadSlice fails with error [ErrBufferFull] if the buffer fills without a delim.
|
||||
// ReadSlice fails with error ErrBufferFull if the buffer fills without a delim.
|
||||
// Because the data returned from ReadSlice will be overwritten
|
||||
// by the next I/O operation, most clients should use
|
||||
// [Reader.ReadBytes] or ReadString instead.
|
||||
// ReadBytes or ReadString instead.
|
||||
// ReadSlice returns err != nil if and only if line does not end in delim.
|
||||
func (b *Reader) ReadSlice(delim byte) (line []byte, err error) {
|
||||
s := 0 // search start index
|
||||
@@ -390,7 +389,7 @@ func (b *Reader) ReadSlice(delim byte) (line []byte, err error) {
|
||||
}
|
||||
|
||||
// ReadLine is a low-level line-reading primitive. Most callers should use
|
||||
// [Reader.ReadBytes]('\n') or [Reader.ReadString]('\n') instead or use a [Scanner].
|
||||
// ReadBytes('\n') or ReadString('\n') instead or use a Scanner.
|
||||
//
|
||||
// ReadLine tries to return a single line, not including the end-of-line bytes.
|
||||
// If the line was too long for the buffer then isPrefix is set and the
|
||||
@@ -402,7 +401,7 @@ func (b *Reader) ReadSlice(delim byte) (line []byte, err error) {
|
||||
//
|
||||
// The text returned from ReadLine does not include the line end ("\r\n" or "\n").
|
||||
// No indication or error is given if the input ends without a final line end.
|
||||
// Calling [Reader.UnreadByte] after ReadLine will always unread the last byte read
|
||||
// Calling UnreadByte after ReadLine will always unread the last byte read
|
||||
// (possibly a character belonging to the line end) even if that byte is not
|
||||
// part of the line returned by ReadLine.
|
||||
func (b *Reader) ReadLine() (line []byte, isPrefix bool, err error) {
|
||||
@@ -512,9 +511,9 @@ func (b *Reader) ReadString(delim byte) (string, error) {
|
||||
}
|
||||
|
||||
// WriteTo implements io.WriterTo.
|
||||
// This may make multiple calls to the [Reader.Read] method of the underlying [Reader].
|
||||
// If the underlying reader supports the [Reader.WriteTo] method,
|
||||
// this calls the underlying [Reader.WriteTo] without buffering.
|
||||
// This may make multiple calls to the Read method of the underlying Reader.
|
||||
// If the underlying reader supports the WriteTo method,
|
||||
// this calls the underlying WriteTo without buffering.
|
||||
func (b *Reader) WriteTo(w io.Writer) (n int64, err error) {
|
||||
b.lastByte = -1
|
||||
b.lastRuneSize = -1
|
||||
@@ -559,7 +558,7 @@ func (b *Reader) WriteTo(w io.Writer) (n int64, err error) {
|
||||
|
||||
var errNegativeWrite = errors.New("bufio: writer returned negative count from Write")
|
||||
|
||||
// writeBuf writes the [Reader]'s buffer to the writer.
|
||||
// writeBuf writes the Reader's buffer to the writer.
|
||||
func (b *Reader) writeBuf(w io.Writer) (int64, error) {
|
||||
n, err := w.Write(b.buf[b.r:b.w])
|
||||
if n < 0 {
|
||||
@@ -571,12 +570,12 @@ func (b *Reader) writeBuf(w io.Writer) (int64, error) {
|
||||
|
||||
// buffered output
|
||||
|
||||
// Writer implements buffering for an [io.Writer] object.
|
||||
// If an error occurs writing to a [Writer], no more data will be
|
||||
// accepted and all subsequent writes, and [Writer.Flush], will return the error.
|
||||
// Writer implements buffering for an io.Writer object.
|
||||
// If an error occurs writing to a Writer, no more data will be
|
||||
// accepted and all subsequent writes, and Flush, will return the error.
|
||||
// After all data has been written, the client should call the
|
||||
// [Writer.Flush] method to guarantee all data has been forwarded to
|
||||
// the underlying [io.Writer].
|
||||
// Flush method to guarantee all data has been forwarded to
|
||||
// the underlying io.Writer.
|
||||
type Writer struct {
|
||||
err error
|
||||
buf []byte
|
||||
@@ -584,9 +583,9 @@ type Writer struct {
|
||||
wr io.Writer
|
||||
}
|
||||
|
||||
// NewWriterSize returns a new [Writer] whose buffer has at least the specified
|
||||
// size. If the argument io.Writer is already a [Writer] with large enough
|
||||
// size, it returns the underlying [Writer].
|
||||
// NewWriterSize returns a new Writer whose buffer has at least the specified
|
||||
// size. If the argument io.Writer is already a Writer with large enough
|
||||
// size, it returns the underlying Writer.
|
||||
func NewWriterSize(w io.Writer, size int) *Writer {
|
||||
// Is it already a Writer?
|
||||
b, ok := w.(*Writer)
|
||||
@@ -602,9 +601,9 @@ func NewWriterSize(w io.Writer, size int) *Writer {
|
||||
}
|
||||
}
|
||||
|
||||
// NewWriter returns a new [Writer] whose buffer has the default size.
|
||||
// If the argument io.Writer is already a [Writer] with large enough buffer size,
|
||||
// it returns the underlying [Writer].
|
||||
// NewWriter returns a new Writer whose buffer has the default size.
|
||||
// If the argument io.Writer is already a Writer with large enough buffer size,
|
||||
// it returns the underlying Writer.
|
||||
func NewWriter(w io.Writer) *Writer {
|
||||
return NewWriterSize(w, defaultBufSize)
|
||||
}
|
||||
@@ -614,9 +613,9 @@ func (b *Writer) Size() int { return len(b.buf) }
|
||||
|
||||
// Reset discards any unflushed buffered data, clears any error, and
|
||||
// resets b to write its output to w.
|
||||
// Calling Reset on the zero value of [Writer] initializes the internal buffer
|
||||
// Calling Reset on the zero value of Writer initializes the internal buffer
|
||||
// to the default size.
|
||||
// Calling w.Reset(w) (that is, resetting a [Writer] to itself) does nothing.
|
||||
// Calling w.Reset(w) (that is, resetting a Writer to itself) does nothing.
|
||||
func (b *Writer) Reset(w io.Writer) {
|
||||
// If a Writer w is passed to NewWriter, NewWriter will return w.
|
||||
// Different layers of code may do that, and then later pass w
|
||||
@@ -632,7 +631,7 @@ func (b *Writer) Reset(w io.Writer) {
|
||||
b.wr = w
|
||||
}
|
||||
|
||||
// Flush writes any buffered data to the underlying [io.Writer].
|
||||
// Flush writes any buffered data to the underlying io.Writer.
|
||||
func (b *Writer) Flush() error {
|
||||
if b.err != nil {
|
||||
return b.err
|
||||
@@ -661,7 +660,7 @@ func (b *Writer) Available() int { return len(b.buf) - b.n }
|
||||
|
||||
// AvailableBuffer returns an empty buffer with b.Available() capacity.
|
||||
// This buffer is intended to be appended to and
|
||||
// passed to an immediately succeeding [Writer.Write] call.
|
||||
// passed to an immediately succeeding Write call.
|
||||
// The buffer is only valid until the next write operation on b.
|
||||
func (b *Writer) AvailableBuffer() []byte {
|
||||
return b.buf[b.n:][:0]
|
||||
@@ -778,7 +777,7 @@ func (b *Writer) WriteString(s string) (int, error) {
|
||||
return nn, nil
|
||||
}
|
||||
|
||||
// ReadFrom implements [io.ReaderFrom]. If the underlying writer
|
||||
// ReadFrom implements io.ReaderFrom. If the underlying writer
|
||||
// supports the ReadFrom method, this calls the underlying ReadFrom.
|
||||
// If there is buffered data and an underlying ReadFrom, this fills
|
||||
// the buffer and writes it before calling ReadFrom.
|
||||
@@ -830,14 +829,14 @@ func (b *Writer) ReadFrom(r io.Reader) (n int64, err error) {
|
||||
|
||||
// buffered input and output
|
||||
|
||||
// ReadWriter stores pointers to a [Reader] and a [Writer].
|
||||
// It implements [io.ReadWriter].
|
||||
// ReadWriter stores pointers to a Reader and a Writer.
|
||||
// It implements io.ReadWriter.
|
||||
type ReadWriter struct {
|
||||
*Reader
|
||||
*Writer
|
||||
}
|
||||
|
||||
// NewReadWriter allocates a new [ReadWriter] that dispatches to r and w.
|
||||
// NewReadWriter allocates a new ReadWriter that dispatches to r and w.
|
||||
func NewReadWriter(r *Reader, w *Writer) *ReadWriter {
|
||||
return &ReadWriter{r, w}
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"internal/asan"
|
||||
"io"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
@@ -586,9 +585,6 @@ func TestWriteInvalidRune(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestReadStringAllocs(t *testing.T) {
|
||||
if asan.Enabled {
|
||||
t.Skip("test allocates more with -asan; see #70079")
|
||||
}
|
||||
r := strings.NewReader(" foo foo 42 42 42 42 42 42 42 42 4.2 4.2 4.2 4.2\n")
|
||||
buf := NewReader(r)
|
||||
allocs := testing.AllocsPerRun(100, func() {
|
||||
@@ -640,7 +636,7 @@ func TestWriter(t *testing.T) {
|
||||
for l := 0; l < len(written); l++ {
|
||||
if written[l] != data[l] {
|
||||
t.Errorf("wrong bytes written")
|
||||
t.Errorf("want=%q", data[:len(written)])
|
||||
t.Errorf("want=%q", data[0:len(written)])
|
||||
t.Errorf("have=%q", written)
|
||||
}
|
||||
}
|
||||
@@ -939,6 +935,7 @@ func (t *testReader) Read(buf []byte) (n int, err error) {
|
||||
}
|
||||
|
||||
func testReadLine(t *testing.T, input []byte) {
|
||||
//for stride := 1; stride < len(input); stride++ {
|
||||
for stride := 1; stride < 2; stride++ {
|
||||
done := 0
|
||||
reader := testReader{input, stride}
|
||||
|
||||
@@ -6,7 +6,6 @@ package bufio_test
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
@@ -33,33 +32,6 @@ func ExampleWriter_AvailableBuffer() {
|
||||
// Output: 1 2 3 4
|
||||
}
|
||||
|
||||
// ExampleWriter_ReadFrom demonstrates how to use the ReadFrom method of Writer.
|
||||
func ExampleWriter_ReadFrom() {
|
||||
var buf bytes.Buffer
|
||||
writer := bufio.NewWriter(&buf)
|
||||
|
||||
data := "Hello, world!\nThis is a ReadFrom example."
|
||||
reader := strings.NewReader(data)
|
||||
|
||||
n, err := writer.ReadFrom(reader)
|
||||
if err != nil {
|
||||
fmt.Println("ReadFrom Error:", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = writer.Flush(); err != nil {
|
||||
fmt.Println("Flush Error:", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println("Bytes written:", n)
|
||||
fmt.Println("Buffer contents:", buf.String())
|
||||
// Output:
|
||||
// Bytes written: 41
|
||||
// Buffer contents: Hello, world!
|
||||
// This is a ReadFrom example.
|
||||
}
|
||||
|
||||
// The simplest use of a Scanner, to read standard input as a set of lines.
|
||||
func ExampleScanner_lines() {
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
@@ -165,36 +137,3 @@ func ExampleScanner_emptyFinalToken() {
|
||||
}
|
||||
// Output: "1" "2" "3" "4" ""
|
||||
}
|
||||
|
||||
// Use a Scanner with a custom split function to parse a comma-separated
|
||||
// list with an empty final value but stops at the token "STOP".
|
||||
func ExampleScanner_earlyStop() {
|
||||
onComma := func(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||
i := bytes.IndexByte(data, ',')
|
||||
if i == -1 {
|
||||
if !atEOF {
|
||||
return 0, nil, nil
|
||||
}
|
||||
// If we have reached the end, return the last token.
|
||||
return 0, data, bufio.ErrFinalToken
|
||||
}
|
||||
// If the token is "STOP", stop the scanning and ignore the rest.
|
||||
if string(data[:i]) == "STOP" {
|
||||
return i + 1, nil, bufio.ErrFinalToken
|
||||
}
|
||||
// Otherwise, return the token before the comma.
|
||||
return i + 1, data[:i], nil
|
||||
}
|
||||
const input = "1,2,STOP,4,"
|
||||
scanner := bufio.NewScanner(strings.NewReader(input))
|
||||
scanner.Split(onComma)
|
||||
for scanner.Scan() {
|
||||
fmt.Printf("Got a token %q\n", scanner.Text())
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
fmt.Fprintln(os.Stderr, "reading input:", err)
|
||||
}
|
||||
// Output:
|
||||
// Got a token "1"
|
||||
// Got a token "2"
|
||||
}
|
||||
|
||||
@@ -13,19 +13,19 @@ import (
|
||||
|
||||
// Scanner provides a convenient interface for reading data such as
|
||||
// a file of newline-delimited lines of text. Successive calls to
|
||||
// the [Scanner.Scan] method will step through the 'tokens' of a file, skipping
|
||||
// the Scan method will step through the 'tokens' of a file, skipping
|
||||
// the bytes between the tokens. The specification of a token is
|
||||
// defined by a split function of type [SplitFunc]; the default split
|
||||
// function breaks the input into lines with line termination stripped. [Scanner.Split]
|
||||
// defined by a split function of type SplitFunc; the default split
|
||||
// function breaks the input into lines with line termination stripped. Split
|
||||
// functions are defined in this package for scanning a file into
|
||||
// lines, bytes, UTF-8-encoded runes, and space-delimited words. The
|
||||
// client may instead provide a custom split function.
|
||||
//
|
||||
// Scanning stops unrecoverably at EOF, the first I/O error, or a token too
|
||||
// large to fit in the [Scanner.Buffer]. When a scan stops, the reader may have
|
||||
// large to fit in the buffer. When a scan stops, the reader may have
|
||||
// advanced arbitrarily far past the last token. Programs that need more
|
||||
// control over error handling or large tokens, or must run sequential scans
|
||||
// on a reader, should use [bufio.Reader] instead.
|
||||
// on a reader, should use bufio.Reader instead.
|
||||
type Scanner struct {
|
||||
r io.Reader // The reader provided by the client.
|
||||
split SplitFunc // The function to split the tokens.
|
||||
@@ -42,23 +42,21 @@ type Scanner struct {
|
||||
|
||||
// SplitFunc is the signature of the split function used to tokenize the
|
||||
// input. The arguments are an initial substring of the remaining unprocessed
|
||||
// data and a flag, atEOF, that reports whether the [Reader] has no more data
|
||||
// data and a flag, atEOF, that reports whether the Reader has no more data
|
||||
// to give. The return values are the number of bytes to advance the input
|
||||
// and the next token to return to the user, if any, plus an error, if any.
|
||||
//
|
||||
// Scanning stops if the function returns an error, in which case some of
|
||||
// the input may be discarded. If that error is [ErrFinalToken], scanning
|
||||
// stops with no error. A non-nil token delivered with [ErrFinalToken]
|
||||
// will be the last token, and a nil token with [ErrFinalToken]
|
||||
// immediately stops the scanning.
|
||||
// the input may be discarded. If that error is ErrFinalToken, scanning
|
||||
// stops with no error.
|
||||
//
|
||||
// Otherwise, the [Scanner] advances the input. If the token is not nil,
|
||||
// the [Scanner] returns it to the user. If the token is nil, the
|
||||
// Otherwise, the Scanner advances the input. If the token is not nil,
|
||||
// the Scanner returns it to the user. If the token is nil, the
|
||||
// Scanner reads more data and continues scanning; if there is no more
|
||||
// data--if atEOF was true--the [Scanner] returns. If the data does not
|
||||
// data--if atEOF was true--the Scanner returns. If the data does not
|
||||
// yet hold a complete token, for instance if it has no newline while
|
||||
// scanning lines, a [SplitFunc] can return (0, nil, nil) to signal the
|
||||
// [Scanner] to read more data into the slice and try again with a
|
||||
// scanning lines, a SplitFunc can return (0, nil, nil) to signal the
|
||||
// Scanner to read more data into the slice and try again with a
|
||||
// longer slice starting at the same point in the input.
|
||||
//
|
||||
// The function is never called with an empty data slice unless atEOF
|
||||
@@ -76,7 +74,7 @@ var (
|
||||
|
||||
const (
|
||||
// MaxScanTokenSize is the maximum size used to buffer a token
|
||||
// unless the user provides an explicit buffer with [Scanner.Buffer].
|
||||
// unless the user provides an explicit buffer with Scanner.Buffer.
|
||||
// The actual maximum token size may be smaller as the buffer
|
||||
// may need to include, for instance, a newline.
|
||||
MaxScanTokenSize = 64 * 1024
|
||||
@@ -84,8 +82,8 @@ const (
|
||||
startBufSize = 4096 // Size of initial allocation for buffer.
|
||||
)
|
||||
|
||||
// NewScanner returns a new [Scanner] to read from r.
|
||||
// The split function defaults to [ScanLines].
|
||||
// NewScanner returns a new Scanner to read from r.
|
||||
// The split function defaults to ScanLines.
|
||||
func NewScanner(r io.Reader) *Scanner {
|
||||
return &Scanner{
|
||||
r: r,
|
||||
@@ -94,7 +92,7 @@ func NewScanner(r io.Reader) *Scanner {
|
||||
}
|
||||
}
|
||||
|
||||
// Err returns the first non-EOF error that was encountered by the [Scanner].
|
||||
// Err returns the first non-EOF error that was encountered by the Scanner.
|
||||
func (s *Scanner) Err() error {
|
||||
if s.err == io.EOF {
|
||||
return nil
|
||||
@@ -102,36 +100,34 @@ func (s *Scanner) Err() error {
|
||||
return s.err
|
||||
}
|
||||
|
||||
// Bytes returns the most recent token generated by a call to [Scanner.Scan].
|
||||
// Bytes returns the most recent token generated by a call to Scan.
|
||||
// The underlying array may point to data that will be overwritten
|
||||
// by a subsequent call to Scan. It does no allocation.
|
||||
func (s *Scanner) Bytes() []byte {
|
||||
return s.token
|
||||
}
|
||||
|
||||
// Text returns the most recent token generated by a call to [Scanner.Scan]
|
||||
// Text returns the most recent token generated by a call to Scan
|
||||
// as a newly allocated string holding its bytes.
|
||||
func (s *Scanner) Text() string {
|
||||
return string(s.token)
|
||||
}
|
||||
|
||||
// ErrFinalToken is a special sentinel error value. It is intended to be
|
||||
// returned by a Split function to indicate that the scanning should stop
|
||||
// with no error. If the token being delivered with this error is not nil,
|
||||
// the token is the last token.
|
||||
//
|
||||
// returned by a Split function to indicate that the token being delivered
|
||||
// with the error is the last token and scanning should stop after this one.
|
||||
// After ErrFinalToken is received by Scan, scanning stops with no error.
|
||||
// The value is useful to stop processing early or when it is necessary to
|
||||
// deliver a final empty token (which is different from a nil token).
|
||||
// One could achieve the same behavior with a custom error value but
|
||||
// providing one here is tidier.
|
||||
// deliver a final empty token. One could achieve the same behavior
|
||||
// with a custom error value but providing one here is tidier.
|
||||
// See the emptyFinalToken example for a use of this value.
|
||||
var ErrFinalToken = errors.New("final token")
|
||||
|
||||
// Scan advances the [Scanner] to the next token, which will then be
|
||||
// available through the [Scanner.Bytes] or [Scanner.Text] method. It returns false when
|
||||
// there are no more tokens, either by reaching the end of the input or an error.
|
||||
// After Scan returns false, the [Scanner.Err] method will return any error that
|
||||
// occurred during scanning, except that if it was [io.EOF], [Scanner.Err]
|
||||
// Scan advances the Scanner to the next token, which will then be
|
||||
// available through the Bytes or Text method. It returns false when the
|
||||
// scan stops, either by reaching the end of the input or an error.
|
||||
// After Scan returns false, the Err method will return any error that
|
||||
// occurred during scanning, except that if it was io.EOF, Err
|
||||
// will return nil.
|
||||
// Scan panics if the split function returns too many empty
|
||||
// tokens without advancing the input. This is a common error mode for
|
||||
@@ -152,10 +148,7 @@ func (s *Scanner) Scan() bool {
|
||||
if err == ErrFinalToken {
|
||||
s.token = token
|
||||
s.done = true
|
||||
// When token is not nil, it means the scanning stops
|
||||
// with a trailing token, and thus the return value
|
||||
// should be true to indicate the existence of the token.
|
||||
return token != nil
|
||||
return true
|
||||
}
|
||||
s.setErr(err)
|
||||
return false
|
||||
@@ -205,7 +198,9 @@ func (s *Scanner) Scan() bool {
|
||||
if newSize == 0 {
|
||||
newSize = startBufSize
|
||||
}
|
||||
newSize = min(newSize, s.maxTokenSize)
|
||||
if newSize > s.maxTokenSize {
|
||||
newSize = s.maxTokenSize
|
||||
}
|
||||
newBuf := make([]byte, newSize)
|
||||
copy(newBuf, s.buf[s.start:s.end])
|
||||
s.buf = newBuf
|
||||
@@ -260,13 +255,13 @@ func (s *Scanner) setErr(err error) {
|
||||
}
|
||||
}
|
||||
|
||||
// Buffer sets the initial buffer to use when scanning
|
||||
// and the maximum size of buffer that may be allocated during scanning.
|
||||
// The maximum token size must be less than the larger of max and cap(buf).
|
||||
// If max <= cap(buf), [Scanner.Scan] will use this buffer only and do no allocation.
|
||||
// Buffer sets the initial buffer to use when scanning and the maximum
|
||||
// size of buffer that may be allocated during scanning. The maximum
|
||||
// token size is the larger of max and cap(buf). If max <= cap(buf),
|
||||
// Scan will use this buffer only and do no allocation.
|
||||
//
|
||||
// By default, [Scanner.Scan] uses an internal buffer and sets the
|
||||
// maximum token size to [MaxScanTokenSize].
|
||||
// By default, Scan uses an internal buffer and sets the
|
||||
// maximum token size to MaxScanTokenSize.
|
||||
//
|
||||
// Buffer panics if it is called after scanning has started.
|
||||
func (s *Scanner) Buffer(buf []byte, max int) {
|
||||
@@ -277,8 +272,8 @@ func (s *Scanner) Buffer(buf []byte, max int) {
|
||||
s.maxTokenSize = max
|
||||
}
|
||||
|
||||
// Split sets the split function for the [Scanner].
|
||||
// The default split function is [ScanLines].
|
||||
// Split sets the split function for the Scanner.
|
||||
// The default split function is ScanLines.
|
||||
//
|
||||
// Split panics if it is called after scanning has started.
|
||||
func (s *Scanner) Split(split SplitFunc) {
|
||||
@@ -290,7 +285,7 @@ func (s *Scanner) Split(split SplitFunc) {
|
||||
|
||||
// Split functions
|
||||
|
||||
// ScanBytes is a split function for a [Scanner] that returns each byte as a token.
|
||||
// ScanBytes is a split function for a Scanner that returns each byte as a token.
|
||||
func ScanBytes(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||
if atEOF && len(data) == 0 {
|
||||
return 0, nil, nil
|
||||
@@ -300,7 +295,7 @@ func ScanBytes(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||
|
||||
var errorRune = []byte(string(utf8.RuneError))
|
||||
|
||||
// ScanRunes is a split function for a [Scanner] that returns each
|
||||
// ScanRunes is a split function for a Scanner that returns each
|
||||
// UTF-8-encoded rune as a token. The sequence of runes returned is
|
||||
// equivalent to that from a range loop over the input as a string, which
|
||||
// means that erroneous UTF-8 encodings translate to U+FFFD = "\xef\xbf\xbd".
|
||||
@@ -346,7 +341,7 @@ func dropCR(data []byte) []byte {
|
||||
return data
|
||||
}
|
||||
|
||||
// ScanLines is a split function for a [Scanner] that returns each line of
|
||||
// ScanLines is a split function for a Scanner that returns each line of
|
||||
// text, stripped of any trailing end-of-line marker. The returned line may
|
||||
// be empty. The end-of-line marker is one optional carriage return followed
|
||||
// by one mandatory newline. In regular expression notation, it is `\r?\n`.
|
||||
@@ -393,7 +388,7 @@ func isSpace(r rune) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// ScanWords is a split function for a [Scanner] that returns each
|
||||
// ScanWords is a split function for a Scanner that returns each
|
||||
// space-separated word of text, with surrounding spaces deleted. It will
|
||||
// never return an empty string. The definition of space is set by
|
||||
// unicode.IsSpace.
|
||||
|
||||
@@ -68,7 +68,7 @@ func TestScanRune(t *testing.T) {
|
||||
var i, runeCount int
|
||||
var expect rune
|
||||
// Use a string range loop to validate the sequence of runes.
|
||||
for i, expect = range test {
|
||||
for i, expect = range string(test) {
|
||||
if !s.Scan() {
|
||||
break
|
||||
}
|
||||
|
||||
@@ -6,12 +6,8 @@
|
||||
# Usage: buildall.bash [-e] [pattern]
|
||||
#
|
||||
# buildall.bash builds the standard library for all Go-supported
|
||||
# architectures.
|
||||
#
|
||||
# Originally the Go build system used it as a smoke test to quickly
|
||||
# flag portability issues in builders named "misc-compile" or "all-compile".
|
||||
# As of CL 464955, the build system uses make.bash -compile-only instead,
|
||||
# so this script no longer runs in any automated fashion.
|
||||
# architectures. It is used by the "misc-compile" trybot builders,
|
||||
# as a smoke test to quickly flag portability issues.
|
||||
#
|
||||
# Options:
|
||||
# -e: stop at first failure
|
||||
@@ -41,7 +37,7 @@ GOROOT="$(cd .. && pwd)"
|
||||
|
||||
gettargets() {
|
||||
../bin/go tool dist list | sed -e 's|/|-|' |
|
||||
grep -E -v '^(android|ios)' # need C toolchain even for cross-compiling
|
||||
egrep -v '^(android|ios)' # need C toolchain even for cross-compiling
|
||||
echo linux-arm-arm5
|
||||
}
|
||||
|
||||
|
||||
@@ -53,10 +53,10 @@ type int32 int32
|
||||
// Range: -9223372036854775808 through 9223372036854775807.
|
||||
type int64 int64
|
||||
|
||||
// float32 is the set of all IEEE 754 32-bit floating-point numbers.
|
||||
// float32 is the set of all IEEE-754 32-bit floating-point numbers.
|
||||
type float32 float32
|
||||
|
||||
// float64 is the set of all IEEE 754 64-bit floating-point numbers.
|
||||
// float64 is the set of all IEEE-754 64-bit floating-point numbers.
|
||||
type float64 float64
|
||||
|
||||
// complex64 is the set of all complex numbers with float32 real and
|
||||
@@ -162,12 +162,12 @@ func delete(m map[Type]Type1, key Type)
|
||||
|
||||
// The len built-in function returns the length of v, according to its type:
|
||||
//
|
||||
// - Array: the number of elements in v.
|
||||
// - Pointer to array: the number of elements in *v (even if v is nil).
|
||||
// - Slice, or map: the number of elements in v; if v is nil, len(v) is zero.
|
||||
// - String: the number of bytes in v.
|
||||
// - Channel: the number of elements queued (unread) in the channel buffer;
|
||||
// if v is nil, len(v) is zero.
|
||||
// Array: the number of elements in v.
|
||||
// Pointer to array: the number of elements in *v (even if v is nil).
|
||||
// Slice, or map: the number of elements in v; if v is nil, len(v) is zero.
|
||||
// String: the number of bytes in v.
|
||||
// Channel: the number of elements queued (unread) in the channel buffer;
|
||||
// if v is nil, len(v) is zero.
|
||||
//
|
||||
// For some arguments, such as a string literal or a simple array expression, the
|
||||
// result can be a constant. See the Go language specification's "Length and
|
||||
@@ -176,12 +176,12 @@ func len(v Type) int
|
||||
|
||||
// The cap built-in function returns the capacity of v, according to its type:
|
||||
//
|
||||
// - Array: the number of elements in v (same as len(v)).
|
||||
// - Pointer to array: the number of elements in *v (same as len(v)).
|
||||
// - Slice: the maximum length the slice can reach when resliced;
|
||||
// if v is nil, cap(v) is zero.
|
||||
// - Channel: the channel buffer capacity, in units of elements;
|
||||
// if v is nil, cap(v) is zero.
|
||||
// Array: the number of elements in v (same as len(v)).
|
||||
// Pointer to array: the number of elements in *v (same as len(v)).
|
||||
// Slice: the maximum length the slice can reach when resliced;
|
||||
// if v is nil, cap(v) is zero.
|
||||
// Channel: the channel buffer capacity, in units of elements;
|
||||
// if v is nil, cap(v) is zero.
|
||||
//
|
||||
// For some arguments, such as a simple array expression, the result can be a
|
||||
// constant. See the Go language specification's "Length and capacity" section for
|
||||
@@ -194,18 +194,18 @@ func cap(v Type) int
|
||||
// argument, not a pointer to it. The specification of the result depends on
|
||||
// the type:
|
||||
//
|
||||
// - Slice: The size specifies the length. The capacity of the slice is
|
||||
// equal to its length. A second integer argument may be provided to
|
||||
// specify a different capacity; it must be no smaller than the
|
||||
// length. For example, make([]int, 0, 10) allocates an underlying array
|
||||
// of size 10 and returns a slice of length 0 and capacity 10 that is
|
||||
// backed by this underlying array.
|
||||
// - Map: An empty map is allocated with enough space to hold the
|
||||
// specified number of elements. The size may be omitted, in which case
|
||||
// a small starting size is allocated.
|
||||
// - Channel: The channel's buffer is initialized with the specified
|
||||
// buffer capacity. If zero, or the size is omitted, the channel is
|
||||
// unbuffered.
|
||||
// Slice: The size specifies the length. The capacity of the slice is
|
||||
// equal to its length. A second integer argument may be provided to
|
||||
// specify a different capacity; it must be no smaller than the
|
||||
// length. For example, make([]int, 0, 10) allocates an underlying array
|
||||
// of size 10 and returns a slice of length 0 and capacity 10 that is
|
||||
// backed by this underlying array.
|
||||
// Map: An empty map is allocated with enough space to hold the
|
||||
// specified number of elements. The size may be omitted, in which case
|
||||
// a small starting size is allocated.
|
||||
// Channel: The channel's buffer is initialized with the specified
|
||||
// buffer capacity. If zero, or the size is omitted, the channel is
|
||||
// unbuffered.
|
||||
func make(t Type, size ...IntegerType) Type
|
||||
|
||||
// The max built-in function returns the largest value of a fixed number of
|
||||
@@ -247,7 +247,7 @@ func imag(c ComplexType) FloatType
|
||||
// to the zero value of the respective element type. If the argument
|
||||
// type is a type parameter, the type parameter's type set must
|
||||
// contain only map or slice types, and clear performs the operation
|
||||
// implied by the type argument. If t is nil, clear is a no-op.
|
||||
// implied by the type argument.
|
||||
func clear[T ~[]Type | ~map[Type]Type1](t T)
|
||||
|
||||
// The close built-in function closes a channel, which must be either
|
||||
@@ -284,10 +284,9 @@ func panic(v any)
|
||||
// by restoring normal execution and retrieves the error value passed to the
|
||||
// call of panic. If recover is called outside the deferred function it will
|
||||
// not stop a panicking sequence. In this case, or when the goroutine is not
|
||||
// panicking, recover returns nil.
|
||||
//
|
||||
// Prior to Go 1.21, recover would also return nil if panic is called with
|
||||
// a nil argument. See [panic] for details.
|
||||
// panicking, or if the argument supplied to panic was nil, recover returns
|
||||
// nil. Thus the return value from recover reports whether the goroutine is
|
||||
// panicking.
|
||||
func recover() any
|
||||
|
||||
// The print built-in function formats its arguments in an
|
||||
|
||||
@@ -98,18 +98,3 @@ func TestIndexNearPageBoundary(t *testing.T) {
|
||||
}
|
||||
q[len(q)-1] = 0
|
||||
}
|
||||
|
||||
func TestCountNearPageBoundary(t *testing.T) {
|
||||
t.Parallel()
|
||||
b := dangerousSlice(t)
|
||||
for i := range b {
|
||||
c := Count(b[i:], []byte{1})
|
||||
if c != 0 {
|
||||
t.Fatalf("Count(b[%d:], {1})=%d, want 0\n", i, c)
|
||||
}
|
||||
c = Count(b[:i], []byte{0})
|
||||
if c != i {
|
||||
t.Fatalf("Count(b[:%d], {0})=%d, want %d\n", i, c, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
// smallBufferSize is an initial allocation minimal capacity.
|
||||
const smallBufferSize = 64
|
||||
|
||||
// A Buffer is a variable-sized buffer of bytes with [Buffer.Read] and [Buffer.Write] methods.
|
||||
// A Buffer is a variable-sized buffer of bytes with Read and Write methods.
|
||||
// The zero value for Buffer is an empty buffer ready to use.
|
||||
type Buffer struct {
|
||||
buf []byte // contents are the bytes buf[off : len(buf)]
|
||||
@@ -48,21 +48,21 @@ const maxInt = int(^uint(0) >> 1)
|
||||
|
||||
// Bytes returns a slice of length b.Len() holding the unread portion of the buffer.
|
||||
// The slice is valid for use only until the next buffer modification (that is,
|
||||
// only until the next call to a method like [Buffer.Read], [Buffer.Write], [Buffer.Reset], or [Buffer.Truncate]).
|
||||
// only until the next call to a method like Read, Write, Reset, or Truncate).
|
||||
// The slice aliases the buffer content at least until the next buffer modification,
|
||||
// so immediate changes to the slice will affect the result of future reads.
|
||||
func (b *Buffer) Bytes() []byte { return b.buf[b.off:] }
|
||||
|
||||
// AvailableBuffer returns an empty buffer with b.Available() capacity.
|
||||
// This buffer is intended to be appended to and
|
||||
// passed to an immediately succeeding [Buffer.Write] call.
|
||||
// passed to an immediately succeeding Write call.
|
||||
// The buffer is only valid until the next write operation on b.
|
||||
func (b *Buffer) AvailableBuffer() []byte { return b.buf[len(b.buf):] }
|
||||
|
||||
// String returns the contents of the unread portion of the buffer
|
||||
// as a string. If the [Buffer] is a nil pointer, it returns "<nil>".
|
||||
// as a string. If the Buffer is a nil pointer, it returns "<nil>".
|
||||
//
|
||||
// To build strings more efficiently, see the [strings.Builder] type.
|
||||
// To build strings more efficiently, see the strings.Builder type.
|
||||
func (b *Buffer) String() string {
|
||||
if b == nil {
|
||||
// Special case, useful in debugging.
|
||||
@@ -102,7 +102,7 @@ func (b *Buffer) Truncate(n int) {
|
||||
|
||||
// Reset resets the buffer to be empty,
|
||||
// but it retains the underlying storage for use by future writes.
|
||||
// Reset is the same as [Buffer.Truncate](0).
|
||||
// Reset is the same as Truncate(0).
|
||||
func (b *Buffer) Reset() {
|
||||
b.buf = b.buf[:0]
|
||||
b.off = 0
|
||||
@@ -160,7 +160,7 @@ func (b *Buffer) grow(n int) int {
|
||||
// another n bytes. After Grow(n), at least n bytes can be written to the
|
||||
// buffer without another allocation.
|
||||
// If n is negative, Grow will panic.
|
||||
// If the buffer can't grow it will panic with [ErrTooLarge].
|
||||
// If the buffer can't grow it will panic with ErrTooLarge.
|
||||
func (b *Buffer) Grow(n int) {
|
||||
if n < 0 {
|
||||
panic("bytes.Buffer.Grow: negative count")
|
||||
@@ -171,7 +171,7 @@ func (b *Buffer) Grow(n int) {
|
||||
|
||||
// Write appends the contents of p to the buffer, growing the buffer as
|
||||
// needed. The return value n is the length of p; err is always nil. If the
|
||||
// buffer becomes too large, Write will panic with [ErrTooLarge].
|
||||
// buffer becomes too large, Write will panic with ErrTooLarge.
|
||||
func (b *Buffer) Write(p []byte) (n int, err error) {
|
||||
b.lastRead = opInvalid
|
||||
m, ok := b.tryGrowByReslice(len(p))
|
||||
@@ -183,7 +183,7 @@ func (b *Buffer) Write(p []byte) (n int, err error) {
|
||||
|
||||
// WriteString appends the contents of s to the buffer, growing the buffer as
|
||||
// needed. The return value n is the length of s; err is always nil. If the
|
||||
// buffer becomes too large, WriteString will panic with [ErrTooLarge].
|
||||
// buffer becomes too large, WriteString will panic with ErrTooLarge.
|
||||
func (b *Buffer) WriteString(s string) (n int, err error) {
|
||||
b.lastRead = opInvalid
|
||||
m, ok := b.tryGrowByReslice(len(s))
|
||||
@@ -193,16 +193,16 @@ func (b *Buffer) WriteString(s string) (n int, err error) {
|
||||
return copy(b.buf[m:], s), nil
|
||||
}
|
||||
|
||||
// MinRead is the minimum slice size passed to a [Buffer.Read] call by
|
||||
// [Buffer.ReadFrom]. As long as the [Buffer] has at least MinRead bytes beyond
|
||||
// what is required to hold the contents of r, [Buffer.ReadFrom] will not grow the
|
||||
// MinRead is the minimum slice size passed to a Read call by
|
||||
// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond
|
||||
// what is required to hold the contents of r, ReadFrom will not grow the
|
||||
// underlying buffer.
|
||||
const MinRead = 512
|
||||
|
||||
// ReadFrom reads data from r until EOF and appends it to the buffer, growing
|
||||
// the buffer as needed. The return value n is the number of bytes read. Any
|
||||
// error except io.EOF encountered during the read is also returned. If the
|
||||
// buffer becomes too large, ReadFrom will panic with [ErrTooLarge].
|
||||
// buffer becomes too large, ReadFrom will panic with ErrTooLarge.
|
||||
func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) {
|
||||
b.lastRead = opInvalid
|
||||
for {
|
||||
@@ -247,13 +247,13 @@ func growSlice(b []byte, n int) []byte {
|
||||
c = 2 * cap(b)
|
||||
}
|
||||
b2 := append([]byte(nil), make([]byte, c)...)
|
||||
i := copy(b2, b)
|
||||
return b2[:i]
|
||||
copy(b2, b)
|
||||
return b2[:len(b)]
|
||||
}
|
||||
|
||||
// WriteTo writes data to w until the buffer is drained or an error occurs.
|
||||
// The return value n is the number of bytes written; it always fits into an
|
||||
// int, but it is int64 to match the [io.WriterTo] interface. Any error
|
||||
// int, but it is int64 to match the io.WriterTo interface. Any error
|
||||
// encountered during the write is also returned.
|
||||
func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {
|
||||
b.lastRead = opInvalid
|
||||
@@ -279,9 +279,9 @@ func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {
|
||||
}
|
||||
|
||||
// WriteByte appends the byte c to the buffer, growing the buffer as needed.
|
||||
// The returned error is always nil, but is included to match [bufio.Writer]'s
|
||||
// The returned error is always nil, but is included to match bufio.Writer's
|
||||
// WriteByte. If the buffer becomes too large, WriteByte will panic with
|
||||
// [ErrTooLarge].
|
||||
// ErrTooLarge.
|
||||
func (b *Buffer) WriteByte(c byte) error {
|
||||
b.lastRead = opInvalid
|
||||
m, ok := b.tryGrowByReslice(1)
|
||||
@@ -294,8 +294,8 @@ func (b *Buffer) WriteByte(c byte) error {
|
||||
|
||||
// WriteRune appends the UTF-8 encoding of Unicode code point r to the
|
||||
// buffer, returning its length and an error, which is always nil but is
|
||||
// included to match [bufio.Writer]'s WriteRune. The buffer is grown as needed;
|
||||
// if it becomes too large, WriteRune will panic with [ErrTooLarge].
|
||||
// included to match bufio.Writer's WriteRune. The buffer is grown as needed;
|
||||
// if it becomes too large, WriteRune will panic with ErrTooLarge.
|
||||
func (b *Buffer) WriteRune(r rune) (n int, err error) {
|
||||
// Compare as uint32 to correctly handle negative runes.
|
||||
if uint32(r) < utf8.RuneSelf {
|
||||
@@ -313,7 +313,7 @@ func (b *Buffer) WriteRune(r rune) (n int, err error) {
|
||||
|
||||
// Read reads the next len(p) bytes from the buffer or until the buffer
|
||||
// is drained. The return value n is the number of bytes read. If the
|
||||
// buffer has no data to return, err is [io.EOF] (unless len(p) is zero);
|
||||
// buffer has no data to return, err is io.EOF (unless len(p) is zero);
|
||||
// otherwise it is nil.
|
||||
func (b *Buffer) Read(p []byte) (n int, err error) {
|
||||
b.lastRead = opInvalid
|
||||
@@ -334,7 +334,7 @@ func (b *Buffer) Read(p []byte) (n int, err error) {
|
||||
}
|
||||
|
||||
// Next returns a slice containing the next n bytes from the buffer,
|
||||
// advancing the buffer as if the bytes had been returned by [Buffer.Read].
|
||||
// advancing the buffer as if the bytes had been returned by Read.
|
||||
// If there are fewer than n bytes in the buffer, Next returns the entire buffer.
|
||||
// The slice is only valid until the next call to a read or write method.
|
||||
func (b *Buffer) Next(n int) []byte {
|
||||
@@ -352,7 +352,7 @@ func (b *Buffer) Next(n int) []byte {
|
||||
}
|
||||
|
||||
// ReadByte reads and returns the next byte from the buffer.
|
||||
// If no byte is available, it returns error [io.EOF].
|
||||
// If no byte is available, it returns error io.EOF.
|
||||
func (b *Buffer) ReadByte() (byte, error) {
|
||||
if b.empty() {
|
||||
// Buffer is empty, reset to recover space.
|
||||
@@ -388,10 +388,10 @@ func (b *Buffer) ReadRune() (r rune, size int, err error) {
|
||||
return r, n, nil
|
||||
}
|
||||
|
||||
// UnreadRune unreads the last rune returned by [Buffer.ReadRune].
|
||||
// UnreadRune unreads the last rune returned by ReadRune.
|
||||
// If the most recent read or write operation on the buffer was
|
||||
// not a successful [Buffer.ReadRune], UnreadRune returns an error. (In this regard
|
||||
// it is stricter than [Buffer.UnreadByte], which will unread the last byte
|
||||
// not a successful ReadRune, UnreadRune returns an error. (In this regard
|
||||
// it is stricter than UnreadByte, which will unread the last byte
|
||||
// from any read operation.)
|
||||
func (b *Buffer) UnreadRune() error {
|
||||
if b.lastRead <= opInvalid {
|
||||
@@ -424,7 +424,7 @@ func (b *Buffer) UnreadByte() error {
|
||||
// ReadBytes reads until the first occurrence of delim in the input,
|
||||
// returning a slice containing the data up to and including the delimiter.
|
||||
// If ReadBytes encounters an error before finding a delimiter,
|
||||
// it returns the data read before the error and the error itself (often [io.EOF]).
|
||||
// it returns the data read before the error and the error itself (often io.EOF).
|
||||
// ReadBytes returns err != nil if and only if the returned data does not end in
|
||||
// delim.
|
||||
func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) {
|
||||
@@ -452,7 +452,7 @@ func (b *Buffer) readSlice(delim byte) (line []byte, err error) {
|
||||
// ReadString reads until the first occurrence of delim in the input,
|
||||
// returning a string containing the data up to and including the delimiter.
|
||||
// If ReadString encounters an error before finding a delimiter,
|
||||
// it returns the data read before the error and the error itself (often [io.EOF]).
|
||||
// it returns the data read before the error and the error itself (often io.EOF).
|
||||
// ReadString returns err != nil if and only if the returned data does not end
|
||||
// in delim.
|
||||
func (b *Buffer) ReadString(delim byte) (line string, err error) {
|
||||
@@ -460,23 +460,23 @@ func (b *Buffer) ReadString(delim byte) (line string, err error) {
|
||||
return string(slice), err
|
||||
}
|
||||
|
||||
// NewBuffer creates and initializes a new [Buffer] using buf as its
|
||||
// initial contents. The new [Buffer] takes ownership of buf, and the
|
||||
// NewBuffer creates and initializes a new Buffer using buf as its
|
||||
// initial contents. The new Buffer takes ownership of buf, and the
|
||||
// caller should not use buf after this call. NewBuffer is intended to
|
||||
// prepare a [Buffer] to read existing data. It can also be used to set
|
||||
// prepare a Buffer to read existing data. It can also be used to set
|
||||
// the initial size of the internal buffer for writing. To do that,
|
||||
// buf should have the desired capacity but a length of zero.
|
||||
//
|
||||
// In most cases, new([Buffer]) (or just declaring a [Buffer] variable) is
|
||||
// sufficient to initialize a [Buffer].
|
||||
// In most cases, new(Buffer) (or just declaring a Buffer variable) is
|
||||
// sufficient to initialize a Buffer.
|
||||
func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} }
|
||||
|
||||
// NewBufferString creates and initializes a new [Buffer] using string s as its
|
||||
// NewBufferString creates and initializes a new Buffer using string s as its
|
||||
// initial contents. It is intended to prepare a buffer to read an existing
|
||||
// string.
|
||||
//
|
||||
// In most cases, new([Buffer]) (or just declaring a [Buffer] variable) is
|
||||
// sufficient to initialize a [Buffer].
|
||||
// In most cases, new(Buffer) (or just declaring a Buffer variable) is
|
||||
// sufficient to initialize a Buffer.
|
||||
func NewBufferString(s string) *Buffer {
|
||||
return &Buffer{buf: []byte(s)}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ package bytes_test
|
||||
import (
|
||||
. "bytes"
|
||||
"fmt"
|
||||
"internal/testenv"
|
||||
"io"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
@@ -95,22 +94,6 @@ func TestNewBuffer(t *testing.T) {
|
||||
check(t, "NewBuffer", buf, testString)
|
||||
}
|
||||
|
||||
var buf Buffer
|
||||
|
||||
// Calling NewBuffer and immediately shallow copying the Buffer struct
|
||||
// should not result in any allocations.
|
||||
// This can be used to reset the underlying []byte of an existing Buffer.
|
||||
func TestNewBufferShallow(t *testing.T) {
|
||||
testenv.SkipIfOptimizationOff(t)
|
||||
n := testing.AllocsPerRun(1000, func() {
|
||||
buf = *NewBuffer(testBytes)
|
||||
})
|
||||
if n > 0 {
|
||||
t.Errorf("allocations occurred while shallow copying")
|
||||
}
|
||||
check(t, "NewBuffer", &buf, testString)
|
||||
}
|
||||
|
||||
func TestNewBufferString(t *testing.T) {
|
||||
buf := NewBufferString(testString)
|
||||
check(t, "NewBufferString", buf, testString)
|
||||
@@ -213,7 +196,7 @@ func TestLargeByteWrites(t *testing.T) {
|
||||
func TestLargeStringReads(t *testing.T) {
|
||||
var buf Buffer
|
||||
for i := 3; i < 30; i += 3 {
|
||||
s := fillString(t, "TestLargeReads (1)", &buf, "", 5, testString[:len(testString)/i])
|
||||
s := fillString(t, "TestLargeReads (1)", &buf, "", 5, testString[0:len(testString)/i])
|
||||
empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(testString)))
|
||||
}
|
||||
check(t, "TestLargeStringReads (3)", &buf, "")
|
||||
@@ -222,7 +205,7 @@ func TestLargeStringReads(t *testing.T) {
|
||||
func TestLargeByteReads(t *testing.T) {
|
||||
var buf Buffer
|
||||
for i := 3; i < 30; i += 3 {
|
||||
s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[:len(testBytes)/i])
|
||||
s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
|
||||
empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(testString)))
|
||||
}
|
||||
check(t, "TestLargeByteReads (3)", &buf, "")
|
||||
@@ -274,7 +257,7 @@ func TestNil(t *testing.T) {
|
||||
func TestReadFrom(t *testing.T) {
|
||||
var buf Buffer
|
||||
for i := 3; i < 30; i += 3 {
|
||||
s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[:len(testBytes)/i])
|
||||
s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
|
||||
var b Buffer
|
||||
b.ReadFrom(&buf)
|
||||
empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(testString)))
|
||||
@@ -337,7 +320,7 @@ func TestReadFromNegativeReader(t *testing.T) {
|
||||
func TestWriteTo(t *testing.T) {
|
||||
var buf Buffer
|
||||
for i := 3; i < 30; i += 3 {
|
||||
s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[:len(testBytes)/i])
|
||||
s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
|
||||
var b Buffer
|
||||
buf.WriteTo(&b)
|
||||
empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(testString)))
|
||||
|
||||
@@ -8,10 +8,8 @@ package bytes
|
||||
|
||||
import (
|
||||
"internal/bytealg"
|
||||
"math/bits"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
_ "unsafe" // for linkname
|
||||
)
|
||||
|
||||
// Equal reports whether a and b
|
||||
@@ -114,7 +112,7 @@ func LastIndex(s, sep []byte) int {
|
||||
case n == 0:
|
||||
return len(s)
|
||||
case n == 1:
|
||||
return bytealg.LastIndexByte(s, sep[0])
|
||||
return LastIndexByte(s, sep[0])
|
||||
case n == len(s):
|
||||
if Equal(s, sep) {
|
||||
return 0
|
||||
@@ -123,21 +121,43 @@ func LastIndex(s, sep []byte) int {
|
||||
case n > len(s):
|
||||
return -1
|
||||
}
|
||||
return bytealg.LastIndexRabinKarp(s, sep)
|
||||
// Rabin-Karp search from the end of the string
|
||||
hashss, pow := bytealg.HashStrRevBytes(sep)
|
||||
last := len(s) - n
|
||||
var h uint32
|
||||
for i := len(s) - 1; i >= last; i-- {
|
||||
h = h*bytealg.PrimeRK + uint32(s[i])
|
||||
}
|
||||
if h == hashss && Equal(s[last:], sep) {
|
||||
return last
|
||||
}
|
||||
for i := last - 1; i >= 0; i-- {
|
||||
h *= bytealg.PrimeRK
|
||||
h += uint32(s[i])
|
||||
h -= pow * uint32(s[i+n])
|
||||
if h == hashss && Equal(s[i:i+n], sep) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// LastIndexByte returns the index of the last instance of c in s, or -1 if c is not present in s.
|
||||
func LastIndexByte(s []byte, c byte) int {
|
||||
return bytealg.LastIndexByte(s, c)
|
||||
for i := len(s) - 1; i >= 0; i-- {
|
||||
if s[i] == c {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// IndexRune interprets s as a sequence of UTF-8-encoded code points.
|
||||
// It returns the byte index of the first occurrence in s of the given rune.
|
||||
// It returns -1 if rune is not present in s.
|
||||
// If r is [utf8.RuneError], it returns the first instance of any
|
||||
// If r is utf8.RuneError, it returns the first instance of any
|
||||
// invalid UTF-8 byte sequence.
|
||||
func IndexRune(s []byte, r rune) int {
|
||||
const haveFastIndex = bytealg.MaxBruteForce > 0
|
||||
switch {
|
||||
case 0 <= r && r < utf8.RuneSelf:
|
||||
return IndexByte(s, byte(r))
|
||||
@@ -153,64 +173,9 @@ func IndexRune(s []byte, r rune) int {
|
||||
case !utf8.ValidRune(r):
|
||||
return -1
|
||||
default:
|
||||
// Search for rune r using the last byte of its UTF-8 encoded form.
|
||||
// The distribution of the last byte is more uniform compared to the
|
||||
// first byte which has a 78% chance of being [240, 243, 244].
|
||||
var b [utf8.UTFMax]byte
|
||||
n := utf8.EncodeRune(b[:], r)
|
||||
last := n - 1
|
||||
i := last
|
||||
fails := 0
|
||||
for i < len(s) {
|
||||
if s[i] != b[last] {
|
||||
o := IndexByte(s[i+1:], b[last])
|
||||
if o < 0 {
|
||||
return -1
|
||||
}
|
||||
i += o + 1
|
||||
}
|
||||
// Step backwards comparing bytes.
|
||||
for j := 1; j < n; j++ {
|
||||
if s[i-j] != b[last-j] {
|
||||
goto next
|
||||
}
|
||||
}
|
||||
return i - last
|
||||
next:
|
||||
fails++
|
||||
i++
|
||||
if (haveFastIndex && fails > bytealg.Cutover(i)) && i < len(s) ||
|
||||
(!haveFastIndex && fails >= 4+i>>4 && i < len(s)) {
|
||||
goto fallback
|
||||
}
|
||||
}
|
||||
return -1
|
||||
|
||||
fallback:
|
||||
// Switch to bytealg.Index, if available, or a brute force search when
|
||||
// IndexByte returns too many false positives.
|
||||
if haveFastIndex {
|
||||
if j := bytealg.Index(s[i-last:], b[:n]); j >= 0 {
|
||||
return i + j - last
|
||||
}
|
||||
} else {
|
||||
// If bytealg.Index is not available a brute force search is
|
||||
// ~1.5-3x faster than Rabin-Karp since n is small.
|
||||
c0 := b[last]
|
||||
c1 := b[last-1] // There are at least 2 chars to match
|
||||
loop:
|
||||
for ; i < len(s); i++ {
|
||||
if s[i] == c0 && s[i-1] == c1 {
|
||||
for k := 2; k < n; k++ {
|
||||
if s[i-k] != b[last-k] {
|
||||
continue loop
|
||||
}
|
||||
}
|
||||
return i - last
|
||||
}
|
||||
}
|
||||
}
|
||||
return -1
|
||||
return Index(s, b[:n])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -412,20 +377,22 @@ func genSplit(s, sep []byte, sepSave, n int) [][]byte {
|
||||
// the subslices between those separators.
|
||||
// If sep is empty, SplitN splits after each UTF-8 sequence.
|
||||
// The count determines the number of subslices to return:
|
||||
// - n > 0: at most n subslices; the last subslice will be the unsplit remainder;
|
||||
// - n == 0: the result is nil (zero subslices);
|
||||
// - n < 0: all subslices.
|
||||
//
|
||||
// To split around the first instance of a separator, see [Cut].
|
||||
// n > 0: at most n subslices; the last subslice will be the unsplit remainder.
|
||||
// n == 0: the result is nil (zero subslices)
|
||||
// n < 0: all subslices
|
||||
//
|
||||
// To split around the first instance of a separator, see Cut.
|
||||
func SplitN(s, sep []byte, n int) [][]byte { return genSplit(s, sep, 0, n) }
|
||||
|
||||
// SplitAfterN slices s into subslices after each instance of sep and
|
||||
// returns a slice of those subslices.
|
||||
// If sep is empty, SplitAfterN splits after each UTF-8 sequence.
|
||||
// The count determines the number of subslices to return:
|
||||
// - n > 0: at most n subslices; the last subslice will be the unsplit remainder;
|
||||
// - n == 0: the result is nil (zero subslices);
|
||||
// - n < 0: all subslices.
|
||||
//
|
||||
// n > 0: at most n subslices; the last subslice will be the unsplit remainder.
|
||||
// n == 0: the result is nil (zero subslices)
|
||||
// n < 0: all subslices
|
||||
func SplitAfterN(s, sep []byte, n int) [][]byte {
|
||||
return genSplit(s, sep, len(sep), n)
|
||||
}
|
||||
@@ -435,7 +402,7 @@ func SplitAfterN(s, sep []byte, n int) [][]byte {
|
||||
// If sep is empty, Split splits after each UTF-8 sequence.
|
||||
// It is equivalent to SplitN with a count of -1.
|
||||
//
|
||||
// To split around the first instance of a separator, see [Cut].
|
||||
// To split around the first instance of a separator, see Cut.
|
||||
func Split(s, sep []byte) [][]byte { return genSplit(s, sep, 0, -1) }
|
||||
|
||||
// SplitAfter slices s into all subslices after each instance of sep and
|
||||
@@ -450,7 +417,7 @@ var asciiSpace = [256]uint8{'\t': 1, '\n': 1, '\v': 1, '\f': 1, '\r': 1, ' ': 1}
|
||||
|
||||
// Fields interprets s as a sequence of UTF-8-encoded code points.
|
||||
// It splits the slice s around each instance of one or more consecutive white space
|
||||
// characters, as defined by [unicode.IsSpace], returning a slice of subslices of s or an
|
||||
// characters, as defined by unicode.IsSpace, returning a slice of subslices of s or an
|
||||
// empty slice if s contains only white space.
|
||||
func Fields(s []byte) [][]byte {
|
||||
// First count the fields.
|
||||
@@ -581,7 +548,7 @@ func Join(s [][]byte, sep []byte) []byte {
|
||||
n += len(v)
|
||||
}
|
||||
|
||||
b := bytealg.MakeNoZero(n)[:n:n]
|
||||
b := bytealg.MakeNoZero(n)
|
||||
bp := copy(b, s[0])
|
||||
for _, v := range s[1:] {
|
||||
bp += copy(b[bp:], sep)
|
||||
@@ -590,12 +557,12 @@ func Join(s [][]byte, sep []byte) []byte {
|
||||
return b
|
||||
}
|
||||
|
||||
// HasPrefix reports whether the byte slice s begins with prefix.
|
||||
// HasPrefix tests whether the byte slice s begins with prefix.
|
||||
func HasPrefix(s, prefix []byte) bool {
|
||||
return len(s) >= len(prefix) && Equal(s[:len(prefix)], prefix)
|
||||
return len(s) >= len(prefix) && Equal(s[0:len(prefix)], prefix)
|
||||
}
|
||||
|
||||
// HasSuffix reports whether the byte slice s ends with suffix.
|
||||
// HasSuffix tests whether the byte slice s ends with suffix.
|
||||
func HasSuffix(s, suffix []byte) bool {
|
||||
return len(s) >= len(suffix) && Equal(s[len(s)-len(suffix):], suffix)
|
||||
}
|
||||
@@ -624,18 +591,6 @@ func Map(mapping func(r rune) rune, s []byte) []byte {
|
||||
return b
|
||||
}
|
||||
|
||||
// Despite being an exported symbol,
|
||||
// Repeat is linknamed by widely used packages.
|
||||
// Notable members of the hall of shame include:
|
||||
// - gitee.com/quant1x/num
|
||||
//
|
||||
// Do not remove or change the type signature.
|
||||
// See go.dev/issue/67401.
|
||||
//
|
||||
// Note that this comment is not part of the doc comment.
|
||||
//
|
||||
//go:linkname Repeat
|
||||
|
||||
// Repeat returns a new byte slice consisting of count copies of b.
|
||||
//
|
||||
// It panics if count is negative or if the result of (len(b) * count)
|
||||
@@ -651,11 +606,10 @@ func Repeat(b []byte, count int) []byte {
|
||||
if count < 0 {
|
||||
panic("bytes: negative Repeat count")
|
||||
}
|
||||
hi, lo := bits.Mul(uint(len(b)), uint(count))
|
||||
if hi > 0 || lo > uint(maxInt) {
|
||||
if len(b) >= maxInt/count {
|
||||
panic("bytes: Repeat output length overflow")
|
||||
}
|
||||
n := int(lo) // lo = len(b) * count
|
||||
n := len(b) * count
|
||||
|
||||
if len(b) == 0 {
|
||||
return []byte{}
|
||||
@@ -679,10 +633,13 @@ func Repeat(b []byte, count int) []byte {
|
||||
chunkMax = len(b)
|
||||
}
|
||||
}
|
||||
nb := bytealg.MakeNoZero(n)[:n:n]
|
||||
nb := bytealg.MakeNoZero(n)
|
||||
bp := copy(nb, b)
|
||||
for bp < n {
|
||||
chunk := min(bp, chunkMax)
|
||||
chunk := bp
|
||||
if chunk > chunkMax {
|
||||
chunk = chunkMax
|
||||
}
|
||||
bp += copy(nb[bp:], nb[:chunk])
|
||||
}
|
||||
return nb
|
||||
@@ -706,7 +663,7 @@ func ToUpper(s []byte) []byte {
|
||||
// Just return a copy.
|
||||
return append([]byte(""), s...)
|
||||
}
|
||||
b := bytealg.MakeNoZero(len(s))[:len(s):len(s)]
|
||||
b := bytealg.MakeNoZero(len(s))
|
||||
for i := 0; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if 'a' <= c && c <= 'z' {
|
||||
@@ -736,7 +693,7 @@ func ToLower(s []byte) []byte {
|
||||
if !hasUpper {
|
||||
return append([]byte(""), s...)
|
||||
}
|
||||
b := bytealg.MakeNoZero(len(s))[:len(s):len(s)]
|
||||
b := bytealg.MakeNoZero(len(s))
|
||||
for i := 0; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if 'A' <= c && c <= 'Z' {
|
||||
@@ -1379,7 +1336,7 @@ func Index(s, sep []byte) int {
|
||||
// we should cutover at even larger average skips,
|
||||
// because Equal becomes that much more expensive.
|
||||
// This code does not take that effect into account.
|
||||
j := bytealg.IndexRabinKarp(s[i:], sep)
|
||||
j := bytealg.IndexRabinKarpBytes(s[i:], sep)
|
||||
if j < 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build js && wasm
|
||||
|
||||
package bytes_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIssue65571(t *testing.T) {
|
||||
b := make([]byte, 1<<31+1)
|
||||
b[1<<31] = 1
|
||||
i := bytes.IndexByte(b, 1)
|
||||
if i != 1<<31 {
|
||||
t.Errorf("IndexByte(b, 1) = %d; want %d", i, 1<<31)
|
||||
}
|
||||
}
|
||||
@@ -8,10 +8,9 @@ import (
|
||||
. "bytes"
|
||||
"fmt"
|
||||
"internal/testenv"
|
||||
"iter"
|
||||
"math"
|
||||
"math/rand"
|
||||
"slices"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"unicode"
|
||||
@@ -19,6 +18,18 @@ import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func eq(a, b []string) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < len(a); i++ {
|
||||
if a[i] != b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func sliceOfString(s [][]byte) []string {
|
||||
result := make([]string, len(s))
|
||||
for i, v := range s {
|
||||
@@ -27,37 +38,6 @@ func sliceOfString(s [][]byte) []string {
|
||||
return result
|
||||
}
|
||||
|
||||
func collect(t *testing.T, seq iter.Seq[[]byte]) [][]byte {
|
||||
out := slices.Collect(seq)
|
||||
out1 := slices.Collect(seq)
|
||||
if !slices.Equal(sliceOfString(out), sliceOfString(out1)) {
|
||||
t.Fatalf("inconsistent seq:\n%s\n%s", out, out1)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
type LinesTest struct {
|
||||
a string
|
||||
b []string
|
||||
}
|
||||
|
||||
var linesTests = []LinesTest{
|
||||
{a: "abc\nabc\n", b: []string{"abc\n", "abc\n"}},
|
||||
{a: "abc\r\nabc", b: []string{"abc\r\n", "abc"}},
|
||||
{a: "abc\r\n", b: []string{"abc\r\n"}},
|
||||
{a: "\nabc", b: []string{"\n", "abc"}},
|
||||
{a: "\nabc\n\n", b: []string{"\n", "abc\n", "\n"}},
|
||||
}
|
||||
|
||||
func TestLines(t *testing.T) {
|
||||
for _, s := range linesTests {
|
||||
result := sliceOfString(slices.Collect(Lines([]byte(s.a))))
|
||||
if !slices.Equal(result, s.b) {
|
||||
t.Errorf(`slices.Collect(Lines(%q)) = %q; want %q`, s.a, result, s.b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// For ease of reading, the test cases use strings that are converted to byte
|
||||
// slices before invoking the functions.
|
||||
|
||||
@@ -197,11 +177,6 @@ var indexTests = []BinOpTest{
|
||||
{"oxoxoxoxoxoxoxoxoxoxoxox", "oy", -1},
|
||||
// test fallback to Rabin-Karp.
|
||||
{"000000000000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000000001", 5},
|
||||
// test fallback to IndexRune
|
||||
{"oxoxoxoxoxoxoxoxoxoxox☺", "☺", 22},
|
||||
// invalid UTF-8 byte sequence (must be longer than bytealg.MaxBruteForce to
|
||||
// test that we don't use IndexRune)
|
||||
{"xx0123456789012345678901234567890123456789012345678901234567890120123456789012345678901234567890123456xxx\xed\x9f\xc0", "\xed\x9f\xc0", 105},
|
||||
}
|
||||
|
||||
var lastIndexTests = []BinOpTest{
|
||||
@@ -450,31 +425,6 @@ func TestIndexRune(t *testing.T) {
|
||||
{"some_text=some_value", '=', 9},
|
||||
{"☺a", 'a', 3},
|
||||
{"a☻☺b", '☺', 4},
|
||||
{"𠀳𠀗𠀾𠁄𠀧𠁆𠁂𠀫𠀖𠀪𠀲𠀴𠁀𠀨𠀿", '𠀿', 56},
|
||||
|
||||
// 2 bytes
|
||||
{"ӆ", 'ӆ', 0},
|
||||
{"a", 'ӆ', -1},
|
||||
{" ӆ", 'ӆ', 2},
|
||||
{" a", 'ӆ', -1},
|
||||
{strings.Repeat("ц", 64) + "ӆ", 'ӆ', 128}, // test cutover
|
||||
{strings.Repeat("ц", 64), 'ӆ', -1},
|
||||
|
||||
// 3 bytes
|
||||
{"Ꚁ", 'Ꚁ', 0},
|
||||
{"a", 'Ꚁ', -1},
|
||||
{" Ꚁ", 'Ꚁ', 2},
|
||||
{" a", 'Ꚁ', -1},
|
||||
{strings.Repeat("Ꙁ", 64) + "Ꚁ", 'Ꚁ', 192}, // test cutover
|
||||
{strings.Repeat("Ꙁ", 64) + "Ꚁ", '䚀', -1}, // 'Ꚁ' and '䚀' share the same last two bytes
|
||||
|
||||
// 4 bytes
|
||||
{"𡌀", '𡌀', 0},
|
||||
{"a", '𡌀', -1},
|
||||
{" 𡌀", '𡌀', 2},
|
||||
{" a", '𡌀', -1},
|
||||
{strings.Repeat("𡋀", 64) + "𡌀", '𡌀', 256}, // test cutover
|
||||
{strings.Repeat("𡋀", 64) + "𡌀", '𣌀', -1}, // '𡌀' and '𣌀' share the same last two bytes
|
||||
|
||||
// RuneError should match any invalid UTF-8 byte sequence.
|
||||
{"<22>", '<27>', 0},
|
||||
@@ -488,13 +438,6 @@ func TestIndexRune(t *testing.T) {
|
||||
{"a☺b☻c☹d\xe2\x98<39>\xff<66>\xed\xa0\x80", -1, -1},
|
||||
{"a☺b☻c☹d\xe2\x98<39>\xff<66>\xed\xa0\x80", 0xD800, -1}, // Surrogate pair
|
||||
{"a☺b☻c☹d\xe2\x98<39>\xff<66>\xed\xa0\x80", utf8.MaxRune + 1, -1},
|
||||
|
||||
// Test the cutover to bytealg.Index when it is triggered in
|
||||
// the middle of rune that contains consecutive runs of equal bytes.
|
||||
{"aaaaaKKKK\U000bc104", '\U000bc104', 17}, // cutover: (n + 16) / 8
|
||||
{"aaaaaKKKK鄄", '鄄', 17},
|
||||
{"aaKKKKKa\U000bc104", '\U000bc104', 18}, // cutover: 4 + n>>4
|
||||
{"aaKKKKKa鄄", '鄄', 18},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
if got := IndexRune([]byte(tt.in), tt.rune); got != tt.want {
|
||||
@@ -642,21 +585,6 @@ func BenchmarkIndexRuneASCII(b *testing.B) {
|
||||
benchBytes(b, indexSizes, bmIndexRuneASCII(IndexRune))
|
||||
}
|
||||
|
||||
func BenchmarkIndexRuneUnicode(b *testing.B) {
|
||||
b.Run("Latin", func(b *testing.B) {
|
||||
// Latin is mostly 1, 2, 3 byte runes.
|
||||
benchBytes(b, indexSizes, bmIndexRuneUnicode(unicode.Latin, 'é'))
|
||||
})
|
||||
b.Run("Cyrillic", func(b *testing.B) {
|
||||
// Cyrillic is mostly 2 and 3 byte runes.
|
||||
benchBytes(b, indexSizes, bmIndexRuneUnicode(unicode.Cyrillic, 'Ꙁ'))
|
||||
})
|
||||
b.Run("Han", func(b *testing.B) {
|
||||
// Han consists only of 3 and 4 byte runes.
|
||||
benchBytes(b, indexSizes, bmIndexRuneUnicode(unicode.Han, '𠀿'))
|
||||
})
|
||||
}
|
||||
|
||||
func bmIndexRuneASCII(index func([]byte, rune) int) func(b *testing.B, n int) {
|
||||
return func(b *testing.B, n int) {
|
||||
buf := bmbuf[0:n]
|
||||
@@ -687,61 +615,6 @@ func bmIndexRune(index func([]byte, rune) int) func(b *testing.B, n int) {
|
||||
}
|
||||
}
|
||||
|
||||
func bmIndexRuneUnicode(rt *unicode.RangeTable, needle rune) func(b *testing.B, n int) {
|
||||
var rs []rune
|
||||
for _, r16 := range rt.R16 {
|
||||
for r := rune(r16.Lo); r <= rune(r16.Hi); r += rune(r16.Stride) {
|
||||
if r != needle {
|
||||
rs = append(rs, rune(r))
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, r32 := range rt.R32 {
|
||||
for r := rune(r32.Lo); r <= rune(r32.Hi); r += rune(r32.Stride) {
|
||||
if r != needle {
|
||||
rs = append(rs, rune(r))
|
||||
}
|
||||
}
|
||||
}
|
||||
// Shuffle the runes so that they are not in descending order.
|
||||
// The sort is deterministic since this is used for benchmarks,
|
||||
// which need to be repeatable.
|
||||
rr := rand.New(rand.NewSource(1))
|
||||
rr.Shuffle(len(rs), func(i, j int) {
|
||||
rs[i], rs[j] = rs[j], rs[i]
|
||||
})
|
||||
uchars := string(rs)
|
||||
|
||||
return func(b *testing.B, n int) {
|
||||
buf := bmbuf[0:n]
|
||||
o := copy(buf, uchars)
|
||||
for o < len(buf) {
|
||||
o += copy(buf[o:], uchars)
|
||||
}
|
||||
|
||||
// Make space for the needle rune at the end of buf.
|
||||
m := utf8.RuneLen(needle)
|
||||
for o := m; o > 0; {
|
||||
_, sz := utf8.DecodeLastRune(buf)
|
||||
copy(buf[len(buf)-sz:], "\x00\x00\x00\x00")
|
||||
buf = buf[:len(buf)-sz]
|
||||
o -= sz
|
||||
}
|
||||
buf = utf8.AppendRune(buf[:n-m], needle)
|
||||
|
||||
n -= m // adjust for rune len
|
||||
for i := 0; i < b.N; i++ {
|
||||
j := IndexRune(buf, needle)
|
||||
if j != n {
|
||||
b.Fatal("bad index", j)
|
||||
}
|
||||
}
|
||||
for i := range buf {
|
||||
buf[i] = '\x00'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEqual(b *testing.B) {
|
||||
b.Run("0", func(b *testing.B) {
|
||||
var buf [4]byte
|
||||
@@ -756,11 +629,6 @@ func BenchmarkEqual(b *testing.B) {
|
||||
})
|
||||
|
||||
sizes := []int{1, 6, 9, 15, 16, 20, 32, 4 << 10, 4 << 20, 64 << 20}
|
||||
|
||||
b.Run("same", func(b *testing.B) {
|
||||
benchBytes(b, sizes, bmEqual(func(a, b []byte) bool { return Equal(a, a) }))
|
||||
})
|
||||
|
||||
benchBytes(b, sizes, bmEqual(Equal))
|
||||
}
|
||||
|
||||
@@ -935,18 +803,10 @@ func TestSplit(t *testing.T) {
|
||||
}
|
||||
|
||||
result := sliceOfString(a)
|
||||
if !slices.Equal(result, tt.a) {
|
||||
if !eq(result, tt.a) {
|
||||
t.Errorf(`Split(%q, %q, %d) = %v; want %v`, tt.s, tt.sep, tt.n, result, tt.a)
|
||||
continue
|
||||
}
|
||||
|
||||
if tt.n < 0 {
|
||||
b := sliceOfString(slices.Collect(SplitSeq([]byte(tt.s), []byte(tt.sep))))
|
||||
if !slices.Equal(b, tt.a) {
|
||||
t.Errorf(`collect(SplitSeq(%q, %q)) = %v; want %v`, tt.s, tt.sep, b, tt.a)
|
||||
}
|
||||
}
|
||||
|
||||
if tt.n == 0 || len(a) == 0 {
|
||||
continue
|
||||
}
|
||||
@@ -960,8 +820,8 @@ func TestSplit(t *testing.T) {
|
||||
t.Errorf(`Join(Split(%q, %q, %d), %q) = %q`, tt.s, tt.sep, tt.n, tt.sep, s)
|
||||
}
|
||||
if tt.n < 0 {
|
||||
b := sliceOfString(Split([]byte(tt.s), []byte(tt.sep)))
|
||||
if !slices.Equal(result, b) {
|
||||
b := Split([]byte(tt.s), []byte(tt.sep))
|
||||
if !reflect.DeepEqual(a, b) {
|
||||
t.Errorf("Split disagrees withSplitN(%q, %q, %d) = %v; want %v", tt.s, tt.sep, tt.n, b, a)
|
||||
}
|
||||
}
|
||||
@@ -1001,18 +861,11 @@ func TestSplitAfter(t *testing.T) {
|
||||
}
|
||||
|
||||
result := sliceOfString(a)
|
||||
if !slices.Equal(result, tt.a) {
|
||||
if !eq(result, tt.a) {
|
||||
t.Errorf(`Split(%q, %q, %d) = %v; want %v`, tt.s, tt.sep, tt.n, result, tt.a)
|
||||
continue
|
||||
}
|
||||
|
||||
if tt.n < 0 {
|
||||
b := sliceOfString(slices.Collect(SplitAfterSeq([]byte(tt.s), []byte(tt.sep))))
|
||||
if !slices.Equal(b, tt.a) {
|
||||
t.Errorf(`collect(SplitAfterSeq(%q, %q)) = %v; want %v`, tt.s, tt.sep, b, tt.a)
|
||||
}
|
||||
}
|
||||
|
||||
if want := tt.a[len(tt.a)-1] + "z"; string(x) != want {
|
||||
t.Errorf("last appended result was %s; want %s", x, want)
|
||||
}
|
||||
@@ -1022,8 +875,8 @@ func TestSplitAfter(t *testing.T) {
|
||||
t.Errorf(`Join(Split(%q, %q, %d), %q) = %q`, tt.s, tt.sep, tt.n, tt.sep, s)
|
||||
}
|
||||
if tt.n < 0 {
|
||||
b := sliceOfString(SplitAfter([]byte(tt.s), []byte(tt.sep)))
|
||||
if !slices.Equal(result, b) {
|
||||
b := SplitAfter([]byte(tt.s), []byte(tt.sep))
|
||||
if !reflect.DeepEqual(a, b) {
|
||||
t.Errorf("SplitAfter disagrees withSplitAfterN(%q, %q, %d) = %v; want %v", tt.s, tt.sep, tt.n, b, a)
|
||||
}
|
||||
}
|
||||
@@ -1061,16 +914,11 @@ func TestFields(t *testing.T) {
|
||||
}
|
||||
|
||||
result := sliceOfString(a)
|
||||
if !slices.Equal(result, tt.a) {
|
||||
if !eq(result, tt.a) {
|
||||
t.Errorf("Fields(%q) = %v; want %v", tt.s, a, tt.a)
|
||||
continue
|
||||
}
|
||||
|
||||
result2 := sliceOfString(collect(t, FieldsSeq([]byte(tt.s))))
|
||||
if !slices.Equal(result2, tt.a) {
|
||||
t.Errorf(`collect(FieldsSeq(%q)) = %v; want %v`, tt.s, result2, tt.a)
|
||||
}
|
||||
|
||||
if string(b) != tt.s {
|
||||
t.Errorf("slice changed to %s; want %s", string(b), tt.s)
|
||||
}
|
||||
@@ -1086,7 +934,7 @@ func TestFieldsFunc(t *testing.T) {
|
||||
for _, tt := range fieldstests {
|
||||
a := FieldsFunc([]byte(tt.s), unicode.IsSpace)
|
||||
result := sliceOfString(a)
|
||||
if !slices.Equal(result, tt.a) {
|
||||
if !eq(result, tt.a) {
|
||||
t.Errorf("FieldsFunc(%q, unicode.IsSpace) = %v; want %v", tt.s, a, tt.a)
|
||||
continue
|
||||
}
|
||||
@@ -1109,15 +957,10 @@ func TestFieldsFunc(t *testing.T) {
|
||||
}
|
||||
|
||||
result := sliceOfString(a)
|
||||
if !slices.Equal(result, tt.a) {
|
||||
if !eq(result, tt.a) {
|
||||
t.Errorf("FieldsFunc(%q) = %v, want %v", tt.s, a, tt.a)
|
||||
}
|
||||
|
||||
result2 := sliceOfString(collect(t, FieldsFuncSeq([]byte(tt.s), pred)))
|
||||
if !slices.Equal(result2, tt.a) {
|
||||
t.Errorf(`collect(FieldsFuncSeq(%q)) = %v; want %v`, tt.s, result2, tt.a)
|
||||
}
|
||||
|
||||
if string(b) != tt.s {
|
||||
t.Errorf("slice changed to %s; want %s", b, tt.s)
|
||||
}
|
||||
@@ -1394,48 +1237,45 @@ func repeat(b []byte, count int) (err error) {
|
||||
|
||||
// See Issue golang.org/issue/16237
|
||||
func TestRepeatCatchesOverflow(t *testing.T) {
|
||||
type testCase struct {
|
||||
tests := [...]struct {
|
||||
s string
|
||||
count int
|
||||
errStr string
|
||||
}
|
||||
|
||||
runTestCases := func(prefix string, tests []testCase) {
|
||||
for i, tt := range tests {
|
||||
err := repeat([]byte(tt.s), tt.count)
|
||||
if tt.errStr == "" {
|
||||
if err != nil {
|
||||
t.Errorf("#%d panicked %v", i, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if err == nil || !strings.Contains(err.Error(), tt.errStr) {
|
||||
t.Errorf("%s#%d got %q want %q", prefix, i, err, tt.errStr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const maxInt = int(^uint(0) >> 1)
|
||||
|
||||
runTestCases("", []testCase{
|
||||
}{
|
||||
0: {"--", -2147483647, "negative"},
|
||||
1: {"", maxInt, ""},
|
||||
1: {"", int(^uint(0) >> 1), ""},
|
||||
2: {"-", 10, ""},
|
||||
3: {"gopher", 0, ""},
|
||||
4: {"-", -1, "negative"},
|
||||
5: {"--", -102, "negative"},
|
||||
6: {string(make([]byte, 255)), int((^uint(0))/255 + 1), "overflow"},
|
||||
})
|
||||
|
||||
const is64Bit = 1<<(^uintptr(0)>>63)/2 != 0
|
||||
if !is64Bit {
|
||||
return
|
||||
}
|
||||
|
||||
runTestCases("64-bit", []testCase{
|
||||
0: {"-", maxInt, "out of range"},
|
||||
})
|
||||
for i, tt := range tests {
|
||||
err := repeat([]byte(tt.s), tt.count)
|
||||
if tt.errStr == "" {
|
||||
if err != nil {
|
||||
t.Errorf("#%d panicked %v", i, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if err == nil || !strings.Contains(err.Error(), tt.errStr) {
|
||||
t.Errorf("#%d expected %q got %q", i, tt.errStr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func runesEqual(a, b []rune) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i, r := range a {
|
||||
if r != b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type RunesTest struct {
|
||||
@@ -1458,7 +1298,7 @@ func TestRunes(t *testing.T) {
|
||||
for _, tt := range RunesTests {
|
||||
tin := []byte(tt.in)
|
||||
a := Runes(tin)
|
||||
if !slices.Equal(a, tt.out) {
|
||||
if !runesEqual(a, tt.out) {
|
||||
t.Errorf("Runes(%q) = %v; want %v", tin, a, tt.out)
|
||||
continue
|
||||
}
|
||||
@@ -2184,11 +2024,6 @@ func makeBenchInputHard() []byte {
|
||||
var benchInputHard = makeBenchInputHard()
|
||||
|
||||
func benchmarkIndexHard(b *testing.B, sep []byte) {
|
||||
n := Index(benchInputHard, sep)
|
||||
if n < 0 {
|
||||
n = len(benchInputHard)
|
||||
}
|
||||
b.SetBytes(int64(n))
|
||||
for i := 0; i < b.N; i++ {
|
||||
Index(benchInputHard, sep)
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ package bytes_test
|
||||
import (
|
||||
. "bytes"
|
||||
"fmt"
|
||||
"internal/testenv"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -72,7 +73,7 @@ func TestCompareBytes(t *testing.T) {
|
||||
}
|
||||
lengths = append(lengths, 256, 512, 1024, 1333, 4095, 4096, 4097)
|
||||
|
||||
if !testing.Short() {
|
||||
if !testing.Short() || testenv.Builder() != "" {
|
||||
lengths = append(lengths, 65535, 65536, 65537, 99999)
|
||||
}
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"unicode"
|
||||
)
|
||||
@@ -81,9 +81,9 @@ func ExampleBuffer_Next() {
|
||||
var b bytes.Buffer
|
||||
b.Grow(64)
|
||||
b.Write([]byte("abcde"))
|
||||
fmt.Printf("%s\n", b.Next(2))
|
||||
fmt.Printf("%s\n", b.Next(2))
|
||||
fmt.Printf("%s", b.Next(2))
|
||||
fmt.Printf("%s\n", string(b.Next(2)))
|
||||
fmt.Printf("%s\n", string(b.Next(2)))
|
||||
fmt.Printf("%s", string(b.Next(2)))
|
||||
// Output:
|
||||
// ab
|
||||
// cd
|
||||
@@ -102,7 +102,7 @@ func ExampleBuffer_Read() {
|
||||
fmt.Println(n)
|
||||
fmt.Println(b.String())
|
||||
fmt.Println(string(rdbuf))
|
||||
// Output:
|
||||
// Output
|
||||
// 1
|
||||
// bcde
|
||||
// a
|
||||
@@ -118,7 +118,7 @@ func ExampleBuffer_ReadByte() {
|
||||
}
|
||||
fmt.Println(c)
|
||||
fmt.Println(b.String())
|
||||
// Output:
|
||||
// Output
|
||||
// 97
|
||||
// bcde
|
||||
}
|
||||
@@ -165,8 +165,11 @@ func ExampleCompare_search() {
|
||||
// Binary search to find a matching byte slice.
|
||||
var needle []byte
|
||||
var haystack [][]byte // Assume sorted
|
||||
_, found := slices.BinarySearchFunc(haystack, needle, bytes.Compare)
|
||||
if found {
|
||||
i := sort.Search(len(haystack), func(i int) bool {
|
||||
// Return haystack[i] >= needle.
|
||||
return bytes.Compare(haystack[i], needle) >= 0
|
||||
})
|
||||
if i < len(haystack) && bytes.Equal(haystack[i], needle) {
|
||||
// Found it!
|
||||
}
|
||||
}
|
||||
@@ -209,17 +212,6 @@ func ExampleContainsRune() {
|
||||
// false
|
||||
}
|
||||
|
||||
func ExampleContainsFunc() {
|
||||
f := func(r rune) bool {
|
||||
return r >= 'a' && r <= 'z'
|
||||
}
|
||||
fmt.Println(bytes.ContainsFunc([]byte("HELLO"), f))
|
||||
fmt.Println(bytes.ContainsFunc([]byte("World"), f))
|
||||
// Output:
|
||||
// false
|
||||
// true
|
||||
}
|
||||
|
||||
func ExampleCount() {
|
||||
fmt.Println(bytes.Count([]byte("cheese"), []byte("e")))
|
||||
fmt.Println(bytes.Count([]byte("five"), []byte(""))) // before & after each rune
|
||||
@@ -502,10 +494,10 @@ func ExampleTitle() {
|
||||
|
||||
func ExampleToTitle() {
|
||||
fmt.Printf("%s\n", bytes.ToTitle([]byte("loud noises")))
|
||||
fmt.Printf("%s\n", bytes.ToTitle([]byte("брат")))
|
||||
fmt.Printf("%s\n", bytes.ToTitle([]byte("хлеб")))
|
||||
// Output:
|
||||
// LOUD NOISES
|
||||
// БРАТ
|
||||
// ХЛЕБ
|
||||
}
|
||||
|
||||
func ExampleToTitleSpecial() {
|
||||
|
||||
@@ -1,148 +0,0 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bytes
|
||||
|
||||
import (
|
||||
"iter"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Lines returns an iterator over the newline-terminated lines in the byte slice s.
|
||||
// The lines yielded by the iterator include their terminating newlines.
|
||||
// If s is empty, the iterator yields no lines at all.
|
||||
// If s does not end in a newline, the final yielded line will not end in a newline.
|
||||
// It returns a single-use iterator.
|
||||
func Lines(s []byte) iter.Seq[[]byte] {
|
||||
return func(yield func([]byte) bool) {
|
||||
for len(s) > 0 {
|
||||
var line []byte
|
||||
if i := IndexByte(s, '\n'); i >= 0 {
|
||||
line, s = s[:i+1], s[i+1:]
|
||||
} else {
|
||||
line, s = s, nil
|
||||
}
|
||||
if !yield(line[:len(line):len(line)]) {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// explodeSeq returns an iterator over the runes in s.
|
||||
func explodeSeq(s []byte) iter.Seq[[]byte] {
|
||||
return func(yield func([]byte) bool) {
|
||||
for len(s) > 0 {
|
||||
_, size := utf8.DecodeRune(s)
|
||||
if !yield(s[:size:size]) {
|
||||
return
|
||||
}
|
||||
s = s[size:]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// splitSeq is SplitSeq or SplitAfterSeq, configured by how many
|
||||
// bytes of sep to include in the results (none or all).
|
||||
func splitSeq(s, sep []byte, sepSave int) iter.Seq[[]byte] {
|
||||
if len(sep) == 0 {
|
||||
return explodeSeq(s)
|
||||
}
|
||||
return func(yield func([]byte) bool) {
|
||||
for {
|
||||
i := Index(s, sep)
|
||||
if i < 0 {
|
||||
break
|
||||
}
|
||||
frag := s[:i+sepSave]
|
||||
if !yield(frag[:len(frag):len(frag)]) {
|
||||
return
|
||||
}
|
||||
s = s[i+len(sep):]
|
||||
}
|
||||
yield(s[:len(s):len(s)])
|
||||
}
|
||||
}
|
||||
|
||||
// SplitSeq returns an iterator over all subslices of s separated by sep.
|
||||
// The iterator yields the same subslices that would be returned by [Split](s, sep),
|
||||
// but without constructing a new slice containing the subslices.
|
||||
// It returns a single-use iterator.
|
||||
func SplitSeq(s, sep []byte) iter.Seq[[]byte] {
|
||||
return splitSeq(s, sep, 0)
|
||||
}
|
||||
|
||||
// SplitAfterSeq returns an iterator over subslices of s split after each instance of sep.
|
||||
// The iterator yields the same subslices that would be returned by [SplitAfter](s, sep),
|
||||
// but without constructing a new slice containing the subslices.
|
||||
// It returns a single-use iterator.
|
||||
func SplitAfterSeq(s, sep []byte) iter.Seq[[]byte] {
|
||||
return splitSeq(s, sep, len(sep))
|
||||
}
|
||||
|
||||
// FieldsSeq returns an iterator over subslices of s split around runs of
|
||||
// whitespace characters, as defined by [unicode.IsSpace].
|
||||
// The iterator yields the same subslices that would be returned by [Fields](s),
|
||||
// but without constructing a new slice containing the subslices.
|
||||
func FieldsSeq(s []byte) iter.Seq[[]byte] {
|
||||
return func(yield func([]byte) bool) {
|
||||
start := -1
|
||||
for i := 0; i < len(s); {
|
||||
size := 1
|
||||
r := rune(s[i])
|
||||
isSpace := asciiSpace[s[i]] != 0
|
||||
if r >= utf8.RuneSelf {
|
||||
r, size = utf8.DecodeRune(s[i:])
|
||||
isSpace = unicode.IsSpace(r)
|
||||
}
|
||||
if isSpace {
|
||||
if start >= 0 {
|
||||
if !yield(s[start:i:i]) {
|
||||
return
|
||||
}
|
||||
start = -1
|
||||
}
|
||||
} else if start < 0 {
|
||||
start = i
|
||||
}
|
||||
i += size
|
||||
}
|
||||
if start >= 0 {
|
||||
yield(s[start:len(s):len(s)])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FieldsFuncSeq returns an iterator over subslices of s split around runs of
|
||||
// Unicode code points satisfying f(c).
|
||||
// The iterator yields the same subslices that would be returned by [FieldsFunc](s),
|
||||
// but without constructing a new slice containing the subslices.
|
||||
func FieldsFuncSeq(s []byte, f func(rune) bool) iter.Seq[[]byte] {
|
||||
return func(yield func([]byte) bool) {
|
||||
start := -1
|
||||
for i := 0; i < len(s); {
|
||||
size := 1
|
||||
r := rune(s[i])
|
||||
if r >= utf8.RuneSelf {
|
||||
r, size = utf8.DecodeRune(s[i:])
|
||||
}
|
||||
if f(r) {
|
||||
if start >= 0 {
|
||||
if !yield(s[start:i:i]) {
|
||||
return
|
||||
}
|
||||
start = -1
|
||||
}
|
||||
} else if start < 0 {
|
||||
start = i
|
||||
}
|
||||
i += size
|
||||
}
|
||||
if start >= 0 {
|
||||
yield(s[start:len(s):len(s)])
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -10,10 +10,10 @@ import (
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// A Reader implements the [io.Reader], [io.ReaderAt], [io.WriterTo], [io.Seeker],
|
||||
// [io.ByteScanner], and [io.RuneScanner] interfaces by reading from
|
||||
// A Reader implements the io.Reader, io.ReaderAt, io.WriterTo, io.Seeker,
|
||||
// io.ByteScanner, and io.RuneScanner interfaces by reading from
|
||||
// a byte slice.
|
||||
// Unlike a [Buffer], a Reader is read-only and supports seeking.
|
||||
// Unlike a Buffer, a Reader is read-only and supports seeking.
|
||||
// The zero value for Reader operates like a Reader of an empty slice.
|
||||
type Reader struct {
|
||||
s []byte
|
||||
@@ -31,11 +31,11 @@ func (r *Reader) Len() int {
|
||||
}
|
||||
|
||||
// Size returns the original length of the underlying byte slice.
|
||||
// Size is the number of bytes available for reading via [Reader.ReadAt].
|
||||
// The result is unaffected by any method calls except [Reader.Reset].
|
||||
// Size is the number of bytes available for reading via ReadAt.
|
||||
// The result is unaffected by any method calls except Reset.
|
||||
func (r *Reader) Size() int64 { return int64(len(r.s)) }
|
||||
|
||||
// Read implements the [io.Reader] interface.
|
||||
// Read implements the io.Reader interface.
|
||||
func (r *Reader) Read(b []byte) (n int, err error) {
|
||||
if r.i >= int64(len(r.s)) {
|
||||
return 0, io.EOF
|
||||
@@ -46,7 +46,7 @@ func (r *Reader) Read(b []byte) (n int, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// ReadAt implements the [io.ReaderAt] interface.
|
||||
// ReadAt implements the io.ReaderAt interface.
|
||||
func (r *Reader) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
// cannot modify state - see io.ReaderAt
|
||||
if off < 0 {
|
||||
@@ -62,7 +62,7 @@ func (r *Reader) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// ReadByte implements the [io.ByteReader] interface.
|
||||
// ReadByte implements the io.ByteReader interface.
|
||||
func (r *Reader) ReadByte() (byte, error) {
|
||||
r.prevRune = -1
|
||||
if r.i >= int64(len(r.s)) {
|
||||
@@ -73,7 +73,7 @@ func (r *Reader) ReadByte() (byte, error) {
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// UnreadByte complements [Reader.ReadByte] in implementing the [io.ByteScanner] interface.
|
||||
// UnreadByte complements ReadByte in implementing the io.ByteScanner interface.
|
||||
func (r *Reader) UnreadByte() error {
|
||||
if r.i <= 0 {
|
||||
return errors.New("bytes.Reader.UnreadByte: at beginning of slice")
|
||||
@@ -83,7 +83,7 @@ func (r *Reader) UnreadByte() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadRune implements the [io.RuneReader] interface.
|
||||
// ReadRune implements the io.RuneReader interface.
|
||||
func (r *Reader) ReadRune() (ch rune, size int, err error) {
|
||||
if r.i >= int64(len(r.s)) {
|
||||
r.prevRune = -1
|
||||
@@ -99,7 +99,7 @@ func (r *Reader) ReadRune() (ch rune, size int, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// UnreadRune complements [Reader.ReadRune] in implementing the [io.RuneScanner] interface.
|
||||
// UnreadRune complements ReadRune in implementing the io.RuneScanner interface.
|
||||
func (r *Reader) UnreadRune() error {
|
||||
if r.i <= 0 {
|
||||
return errors.New("bytes.Reader.UnreadRune: at beginning of slice")
|
||||
@@ -112,7 +112,7 @@ func (r *Reader) UnreadRune() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Seek implements the [io.Seeker] interface.
|
||||
// Seek implements the io.Seeker interface.
|
||||
func (r *Reader) Seek(offset int64, whence int) (int64, error) {
|
||||
r.prevRune = -1
|
||||
var abs int64
|
||||
@@ -133,7 +133,7 @@ func (r *Reader) Seek(offset int64, whence int) (int64, error) {
|
||||
return abs, nil
|
||||
}
|
||||
|
||||
// WriteTo implements the [io.WriterTo] interface.
|
||||
// WriteTo implements the io.WriterTo interface.
|
||||
func (r *Reader) WriteTo(w io.Writer) (n int64, err error) {
|
||||
r.prevRune = -1
|
||||
if r.i >= int64(len(r.s)) {
|
||||
@@ -152,8 +152,8 @@ func (r *Reader) WriteTo(w io.Writer) (n int64, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// Reset resets the [Reader] to be reading from b.
|
||||
// Reset resets the Reader to be reading from b.
|
||||
func (r *Reader) Reset(b []byte) { *r = Reader{b, 0, -1} }
|
||||
|
||||
// NewReader returns a new [Reader] reading from b.
|
||||
// NewReader returns a new Reader reading from b.
|
||||
func NewReader(b []byte) *Reader { return &Reader{b, 0, -1} }
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -27,6 +28,26 @@ func TestMain(m *testing.M) {
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
// addr2linePath returns the path to the "addr2line" binary to run.
|
||||
func addr2linePath(t testing.TB) string {
|
||||
t.Helper()
|
||||
testenv.MustHaveExec(t)
|
||||
|
||||
addr2linePathOnce.Do(func() {
|
||||
addr2lineExePath, addr2linePathErr = os.Executable()
|
||||
})
|
||||
if addr2linePathErr != nil {
|
||||
t.Fatal(addr2linePathErr)
|
||||
}
|
||||
return addr2lineExePath
|
||||
}
|
||||
|
||||
var (
|
||||
addr2linePathOnce sync.Once
|
||||
addr2lineExePath string
|
||||
addr2linePathErr error
|
||||
)
|
||||
|
||||
func loadSyms(t *testing.T, dbgExePath string) map[string]string {
|
||||
cmd := testenv.Command(t, testenv.GoToolPath(t), "tool", "nm", dbgExePath)
|
||||
out, err := cmd.CombinedOutput()
|
||||
@@ -49,7 +70,7 @@ func loadSyms(t *testing.T, dbgExePath string) map[string]string {
|
||||
}
|
||||
|
||||
func runAddr2Line(t *testing.T, dbgExePath, addr string) (funcname, path, lineno string) {
|
||||
cmd := testenv.Command(t, testenv.Executable(t), dbgExePath)
|
||||
cmd := testenv.Command(t, addr2linePath(t), dbgExePath)
|
||||
cmd.Stdin = strings.NewReader(addr)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
@@ -87,22 +108,41 @@ func testAddr2Line(t *testing.T, dbgExePath, addr string) {
|
||||
// Debug paths are stored slash-separated, so convert to system-native.
|
||||
srcPath = filepath.FromSlash(srcPath)
|
||||
fi2, err := os.Stat(srcPath)
|
||||
|
||||
// If GOROOT_FINAL is set and srcPath is not the file we expect, perhaps
|
||||
// srcPath has had GOROOT_FINAL substituted for GOROOT and GOROOT hasn't been
|
||||
// moved to its final location yet. If so, try the original location instead.
|
||||
if gorootFinal := os.Getenv("GOROOT_FINAL"); gorootFinal != "" &&
|
||||
(os.IsNotExist(err) || (err == nil && !os.SameFile(fi1, fi2))) {
|
||||
// srcPath is clean, but GOROOT_FINAL itself might not be.
|
||||
// (See https://golang.org/issue/41447.)
|
||||
gorootFinal = filepath.Clean(gorootFinal)
|
||||
|
||||
if strings.HasPrefix(srcPath, gorootFinal) {
|
||||
fi2, err = os.Stat(runtime.GOROOT() + strings.TrimPrefix(srcPath, gorootFinal))
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Stat failed: %v", err)
|
||||
}
|
||||
if !os.SameFile(fi1, fi2) {
|
||||
t.Fatalf("addr2line_test.go and %s are not same file", srcPath)
|
||||
}
|
||||
if want := "102"; srcLineNo != want {
|
||||
t.Fatalf("line number = %v; want %s", srcLineNo, want)
|
||||
if srcLineNo != "138" {
|
||||
t.Fatalf("line number = %v; want 138", srcLineNo)
|
||||
}
|
||||
}
|
||||
|
||||
// This is line 101. The test depends on that.
|
||||
// This is line 137. The test depends on that.
|
||||
func TestAddr2Line(t *testing.T) {
|
||||
testenv.MustHaveGoBuild(t)
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
tmpDir, err := os.MkdirTemp("", "TestAddr2Line")
|
||||
if err != nil {
|
||||
t.Fatal("TempDir failed: ", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// Build copy of test binary with debug symbols,
|
||||
// since the one running now may not have them.
|
||||
|
||||
@@ -28,7 +28,6 @@ import (
|
||||
"strings"
|
||||
|
||||
"cmd/internal/objfile"
|
||||
"cmd/internal/telemetry/counter"
|
||||
)
|
||||
|
||||
func printUsage(w *os.File) {
|
||||
@@ -46,7 +45,6 @@ func usage() {
|
||||
func main() {
|
||||
log.SetFlags(0)
|
||||
log.SetPrefix("addr2line: ")
|
||||
counter.Open()
|
||||
|
||||
// pprof expects this behavior when checking for addr2line
|
||||
if len(os.Args) > 1 && os.Args[1] == "--help" {
|
||||
@@ -56,8 +54,6 @@ func main() {
|
||||
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
counter.Inc("addr2line/invocations")
|
||||
counter.CountFlags("addr2line/flag:", *flag.CommandLine)
|
||||
if flag.NArg() != 1 {
|
||||
usage()
|
||||
}
|
||||
|
||||
@@ -2,10 +2,9 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This package computes the exported API of a set of Go packages.
|
||||
// Package api computes the exported API of a set of Go packages.
|
||||
// It is only a test, not a command, nor a usefully importable package.
|
||||
|
||||
package main
|
||||
package api
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
@@ -25,7 +24,7 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -106,7 +105,7 @@ func Check(t *testing.T) {
|
||||
}
|
||||
|
||||
var nextFiles []string
|
||||
if v := runtime.Version(); strings.Contains(v, "devel") || strings.Contains(v, "beta") {
|
||||
if strings.Contains(runtime.Version(), "devel") {
|
||||
next, err := filepath.Glob(filepath.Join(testenv.GOROOT(t), "api/next/*.txt"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -232,8 +231,8 @@ func compareAPI(w io.Writer, features, required, exception []string) (ok bool) {
|
||||
featureSet := set(features)
|
||||
exceptionSet := set(exception)
|
||||
|
||||
slices.Sort(features)
|
||||
slices.Sort(required)
|
||||
sort.Strings(features)
|
||||
sort.Strings(required)
|
||||
|
||||
take := func(sl *[]string) string {
|
||||
s := (*sl)[0]
|
||||
@@ -378,7 +377,7 @@ func (w *Walker) Features() (fs []string) {
|
||||
for f := range w.features {
|
||||
fs = append(fs, f)
|
||||
}
|
||||
slices.Sort(fs)
|
||||
sort.Strings(fs)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -431,7 +430,7 @@ func tagKey(dir string, context *build.Context, tags []string) string {
|
||||
// an indirect imported package. See https://github.com/golang/go/issues/21181
|
||||
// for more detail.
|
||||
tags = append(tags, context.GOOS, context.GOARCH)
|
||||
slices.Sort(tags)
|
||||
sort.Strings(tags)
|
||||
|
||||
for _, tag := range tags {
|
||||
if ctags[tag] {
|
||||
@@ -490,8 +489,7 @@ func (w *Walker) loadImports() {
|
||||
if w.context.Dir != "" {
|
||||
cmd.Dir = w.context.Dir
|
||||
}
|
||||
cmd.Stderr = os.Stderr
|
||||
out, err := cmd.Output()
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
log.Fatalf("loading imports: %v\n%s", err, out)
|
||||
}
|
||||
@@ -535,7 +533,7 @@ func (w *Walker) loadImports() {
|
||||
}
|
||||
}
|
||||
|
||||
slices.Sort(stdPackages)
|
||||
sort.Strings(stdPackages)
|
||||
imports = listImports{
|
||||
stdPackages: stdPackages,
|
||||
importMap: importMap,
|
||||
@@ -717,7 +715,7 @@ func sortedMethodNames(typ *types.Interface) []string {
|
||||
for i := range list {
|
||||
list[i] = typ.Method(i).Name()
|
||||
}
|
||||
slices.Sort(list)
|
||||
sort.Strings(list)
|
||||
return list
|
||||
}
|
||||
|
||||
@@ -747,7 +745,7 @@ func (w *Walker) sortedEmbeddeds(typ *types.Interface) []string {
|
||||
list = append(list, buf.String())
|
||||
}
|
||||
}
|
||||
slices.Sort(list)
|
||||
sort.Strings(list)
|
||||
return list
|
||||
}
|
||||
|
||||
@@ -843,9 +841,6 @@ func (w *Walker) writeType(buf *bytes.Buffer, typ types.Type) {
|
||||
buf.WriteString(s)
|
||||
w.writeType(buf, typ.Elem())
|
||||
|
||||
case *types.Alias:
|
||||
w.writeType(buf, types.Unalias(typ))
|
||||
|
||||
case *types.Named:
|
||||
obj := typ.Obj()
|
||||
pkg := obj.Pkg()
|
||||
@@ -854,16 +849,6 @@ func (w *Walker) writeType(buf *bytes.Buffer, typ types.Type) {
|
||||
buf.WriteByte('.')
|
||||
}
|
||||
buf.WriteString(typ.Obj().Name())
|
||||
if targs := typ.TypeArgs(); targs.Len() > 0 {
|
||||
buf.WriteByte('[')
|
||||
for i := 0; i < targs.Len(); i++ {
|
||||
if i > 0 {
|
||||
buf.WriteString(", ")
|
||||
}
|
||||
w.writeType(buf, targs.At(i))
|
||||
}
|
||||
buf.WriteByte(']')
|
||||
}
|
||||
|
||||
case *types.TypeParam:
|
||||
// Type parameter names may change, so use a placeholder instead.
|
||||
@@ -970,17 +955,17 @@ func (w *Walker) emitType(obj *types.TypeName) {
|
||||
if w.isDeprecated(obj) {
|
||||
w.emitf("type %s //deprecated", name)
|
||||
}
|
||||
typ := obj.Type()
|
||||
if obj.IsAlias() {
|
||||
w.emitf("type %s = %s", name, w.typeString(typ))
|
||||
return
|
||||
}
|
||||
if tparams := obj.Type().(*types.Named).TypeParams(); tparams != nil {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(name)
|
||||
w.writeTypeParams(&buf, tparams, true)
|
||||
name = buf.String()
|
||||
}
|
||||
typ := obj.Type()
|
||||
if obj.IsAlias() {
|
||||
w.emitf("type %s = %s", name, w.typeString(typ))
|
||||
return
|
||||
}
|
||||
switch typ := typ.Underlying().(type) {
|
||||
case *types.Struct:
|
||||
w.emitStructType(name, typ)
|
||||
@@ -1019,7 +1004,7 @@ func (w *Walker) emitType(obj *types.TypeName) {
|
||||
|
||||
func (w *Walker) emitStructType(name string, typ *types.Struct) {
|
||||
typeStruct := fmt.Sprintf("type %s struct", name)
|
||||
w.emitf("%s", typeStruct)
|
||||
w.emitf(typeStruct)
|
||||
defer w.pushScope(typeStruct)()
|
||||
|
||||
for i := 0; i < typ.NumFields(); i++ {
|
||||
@@ -1083,7 +1068,7 @@ func (w *Walker) emitIfaceType(name string, typ *types.Interface) {
|
||||
return
|
||||
}
|
||||
|
||||
slices.Sort(methodNames)
|
||||
sort.Strings(methodNames)
|
||||
w.emitf("type %s interface { %s }", name, strings.Join(methodNames, ", "))
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user