mirror of
https://github.com/golang/go.git
synced 2026-02-03 09:25:06 +03:00
Compare commits
108 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cb4eee693c | ||
|
|
8c8adffd53 | ||
|
|
70a1aae67f | ||
|
|
2c88c1d599 | ||
|
|
4c50f9162c | ||
|
|
9e148a4150 | ||
|
|
4b27560db9 | ||
|
|
4e548f2c8e | ||
|
|
45f9ded1df | ||
|
|
49906f9575 | ||
|
|
ea96074191 | ||
|
|
8e1fdea831 | ||
|
|
c2d4f852ce | ||
|
|
3222951439 | ||
|
|
ceaf26ecce | ||
|
|
dfe4dbf8c0 | ||
|
|
3560cf0afb | ||
|
|
5159a7193a | ||
|
|
11b861e459 | ||
|
|
81fc616267 | ||
|
|
14f0251867 | ||
|
|
ab60a7bc18 | ||
|
|
4c97e883b5 | ||
|
|
179ccb7042 | ||
|
|
fe9b3c3399 | ||
|
|
b515c5208b | ||
|
|
ace5bb40d0 | ||
|
|
12d5810cdb | ||
|
|
745657509e | ||
|
|
95389d3d9d | ||
|
|
cf501ac0c5 | ||
|
|
cb55d1a0c8 | ||
|
|
3c96ae0870 | ||
|
|
6b89e7dc5a | ||
|
|
185457da9b | ||
|
|
3a84293118 | ||
|
|
362dcedfdb | ||
|
|
d4a81ec7ee | ||
|
|
dc8976dd0b | ||
|
|
00e6815208 | ||
|
|
adbfb672ba | ||
|
|
fa0292d252 | ||
|
|
947e43e371 | ||
|
|
9d2e28501c | ||
|
|
93d8777d24 | ||
|
|
3f4af1ff0e | ||
|
|
a7ff78d585 | ||
|
|
12c1177045 | ||
|
|
d6c972ad41 | ||
|
|
a65a2bbd8e | ||
|
|
dddf0ae40f | ||
|
|
e55d7cf843 | ||
|
|
4edf4bb2c6 | ||
|
|
2c6d106541 | ||
|
|
46587483e3 | ||
|
|
0a5b33a883 | ||
|
|
0c53f93faa | ||
|
|
abfd578156 | ||
|
|
7fb7acb82d | ||
|
|
e23707b59c | ||
|
|
3826650c99 | ||
|
|
e71b0b1fee | ||
|
|
9508eae5d1 | ||
|
|
35b1a146d9 | ||
|
|
db6097f8cb | ||
|
|
041a47712e | ||
|
|
3a855208e3 | ||
|
|
337b8e9cbf | ||
|
|
16830ab48a | ||
|
|
056b0edcb8 | ||
|
|
f73eba76a0 | ||
|
|
5330cd225b | ||
|
|
d8c4239f08 | ||
|
|
c33adf44ff | ||
|
|
3b71998078 | ||
|
|
8fe2ad6494 | ||
|
|
686662f3a4 | ||
|
|
6cbe522fe1 | ||
|
|
fb86598cd3 | ||
|
|
6fbd01a711 | ||
|
|
d6a271939f | ||
|
|
20107e05a6 | ||
|
|
53d1b73dff | ||
|
|
dd31ad7e9f | ||
|
|
a10e42f219 | ||
|
|
b0957cfcf9 | ||
|
|
58a35fe55b | ||
|
|
4c5517913c | ||
|
|
5d647ed9fc | ||
|
|
e34f6a9928 | ||
|
|
7b3786bbb1 | ||
|
|
9289b9c336 | ||
|
|
aa721d1e7d | ||
|
|
117d7b107e | ||
|
|
333ecd4b40 | ||
|
|
1e1da49105 | ||
|
|
817009da40 | ||
|
|
b29ec30780 | ||
|
|
10e9ab55c8 | ||
|
|
ba8e9e14f4 | ||
|
|
77e9c26960 | ||
|
|
fe55bbcfd1 | ||
|
|
9a70e17e0f | ||
|
|
66f8e1e817 | ||
|
|
fa72f3e034 | ||
|
|
fb23428a85 | ||
|
|
f06eaf0c4f | ||
|
|
796f59df92 |
2
.github/ISSUE_TEMPLATE/03-gopls.yml
vendored
2
.github/ISSUE_TEMPLATE/03-gopls.yml
vendored
@@ -6,7 +6,7 @@ body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: "Please answer these questions before submitting your issue. Thanks!"
|
||||
- type: textarea
|
||||
- type: input
|
||||
id: gopls-version
|
||||
attributes:
|
||||
label: "gopls version"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
name: Language Change Proposals
|
||||
description: Changes to the language
|
||||
labels: ["Proposal", "LanguageChange", "LanguageChangeReview"]
|
||||
title: "proposal: spec: proposal title"
|
||||
labels: ["Proposal", "v2", "LanguageChange"]
|
||||
title: "proposal: Go 2: proposal title"
|
||||
|
||||
|
||||
body:
|
||||
|
||||
74
.github/ISSUE_TEMPLATE/12-telemetry.yml
vendored
74
.github/ISSUE_TEMPLATE/12-telemetry.yml
vendored
@@ -1,30 +1,68 @@
|
||||
name: Go Telemetry Proposals
|
||||
description: Changes to the telemetry upload configuration
|
||||
description: New telemetry counter or update on an existing one
|
||||
title: "x/telemetry/config: proposal title"
|
||||
labels: ["Telemetry-Proposal"]
|
||||
projects: ["golang/29"]
|
||||
body:
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Summary
|
||||
description: >
|
||||
What change are you proposing to the upload configuration, and why?
|
||||
For new upload configuration, which new counters will be collected, what
|
||||
do they measure, and why is it important to collect them?
|
||||
Note that uploaded data must not carry sensitive user information.
|
||||
See [go.dev/doc/telemetry#proposals](https://go.dev/doc/telemetry#proposals)
|
||||
for more details on telemetry proposals.
|
||||
label: Counter names
|
||||
description: Names of counters to add or update.
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Proposed Config Change
|
||||
description: >
|
||||
A CL containing proposed changes to the
|
||||
[config.txt](https://go.googlesource.com/telemetry/+/master/internal/chartconfig/config.txt)
|
||||
chart configuration.
|
||||
See the [chartconfig](https://pkg.go.dev/golang.org/x/telemetry/internal/chartconfig)
|
||||
package for an explanation of the chart config format.
|
||||
For an example change, see [CL 564619](https://go.dev/cl/564619).
|
||||
label: Description
|
||||
description: What do these counters measure?
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Rationale
|
||||
description: |
|
||||
Why is the counter important?
|
||||
For example, what new insights will it provide, and how will that information be used?
|
||||
If this is about updating existing counters, why is the change necessary?
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Do the counters carry sensitive user information?
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: How?
|
||||
description: |
|
||||
How do we plan to compute the info?
|
||||
If available, include the code location or cl that uses the golang.org/x/telemetry/counter API.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Proposed Graph Config
|
||||
description: |
|
||||
Approved telemetry counters are maintained as [Go Telemetry Graph Config](https://golang.org/x/telemetry/internal/graphconfig) records.
|
||||
Please draft the record entry for your proposal here.
|
||||
If multiple records need to be included, separate them with `---` lines.
|
||||
You can check the list of the approved counters and their current configuration in [config.txt](https://go.googlesource.com/telemetry/+/master/internal/configgen/config.txt).
|
||||
render: Text
|
||||
value: |
|
||||
counter: gopls/bug
|
||||
title: Gopls bug reports
|
||||
description: Stacks of bugs encountered on the gopls server.
|
||||
type: partition, histogram, stack # choose only one.
|
||||
program: golang.org/x/tools/gopls
|
||||
counter: gopls/bug
|
||||
depth: 16 # only if type is stack.
|
||||
version: v0.13.0 # the first binary version containing this counter.
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: New or Update
|
||||
description: Is this a new counter? See [config.txt](https://go.googlesource.com/telemetry/+/master/internal/configgen/config.txt) for the list of approved counters.
|
||||
options:
|
||||
- New
|
||||
- Update
|
||||
default: 0
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -30,7 +30,6 @@ _testmain.go
|
||||
/misc/cgo/testso/main
|
||||
/pkg/
|
||||
/src/*.*/
|
||||
/src/_artifacts/
|
||||
/src/cmd/cgo/zdefaultcc.go
|
||||
/src/cmd/dist/dist
|
||||
/src/cmd/go/internal/cfg/zdefaultcc.go
|
||||
@@ -38,7 +37,7 @@ _testmain.go
|
||||
/src/go/build/zcgo.go
|
||||
/src/go/doc/headscan
|
||||
/src/internal/buildcfg/zbootstrap.go
|
||||
/src/internal/runtime/sys/zversion.go
|
||||
/src/runtime/internal/sys/zversion.go
|
||||
/src/unicode/maketables
|
||||
/src/time/tzdata/zzipdata.go
|
||||
/test.out
|
||||
|
||||
4
LICENSE
4
LICENSE
@@ -1,4 +1,4 @@
|
||||
Copyright 2009 The Go Authors.
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google LLC nor the names of its
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ Go is an open source programming language that makes it easy to build simple,
|
||||
reliable, and efficient software.
|
||||
|
||||

|
||||
*Gopher image by [Renee French][rf], licensed under [Creative Commons 4.0 Attribution license][cc4-by].*
|
||||
*Gopher image by [Renee French][rf], licensed under [Creative Commons 4.0 Attributions license][cc4-by].*
|
||||
|
||||
Our canonical Git repository is located at https://go.googlesource.com/go.
|
||||
There is a mirror of the repository at https://github.com/golang/go.
|
||||
|
||||
@@ -21,6 +21,3 @@ warning output from the go api tool. Each file should be named
|
||||
nnnnn.txt, after the issue number for the accepted proposal.
|
||||
(The #nnnnn suffix must also appear at the end of each line in the file;
|
||||
that will be preserved when next/*.txt is concatenated into go1.XX.txt.)
|
||||
|
||||
When you add a file to the api/next directory, you must add at least one file
|
||||
under doc/next. See doc/README.md for details.
|
||||
|
||||
@@ -503,7 +503,6 @@ pkg unicode, const Version = "10.0.0"
|
||||
pkg unicode, const Version = "11.0.0"
|
||||
pkg unicode, const Version = "12.0.0"
|
||||
pkg unicode, const Version = "13.0.0"
|
||||
pkg unicode, const Version = "15.0.0"
|
||||
pkg unicode, const Version = "6.2.0"
|
||||
pkg unicode, const Version = "6.3.0"
|
||||
pkg unicode, const Version = "7.0.0"
|
||||
@@ -599,7 +598,3 @@ pkg syscall (freebsd-arm64-cgo), const SYS_MKNODAT = 498
|
||||
pkg syscall (freebsd-arm64-cgo), const SYS_STAT = 188
|
||||
pkg syscall (freebsd-arm64-cgo), const SYS_STAT ideal-int
|
||||
pkg syscall (freebsd-arm64-cgo), const SYS_STATFS = 396
|
||||
pkg syscall (openbsd-386), const ELAST = 91
|
||||
pkg syscall (openbsd-386-cgo), const ELAST = 91
|
||||
pkg syscall (openbsd-amd64), const ELAST = 91
|
||||
pkg syscall (openbsd-amd64-cgo), const ELAST = 91
|
||||
|
||||
158
api/go1.23.txt
158
api/go1.23.txt
@@ -1,158 +0,0 @@
|
||||
pkg archive/tar, type FileInfoNames interface { Gname, IsDir, ModTime, Mode, Name, Size, Sys, Uname } #50102
|
||||
pkg archive/tar, type FileInfoNames interface, Gname() (string, error) #50102
|
||||
pkg archive/tar, type FileInfoNames interface, IsDir() bool #50102
|
||||
pkg archive/tar, type FileInfoNames interface, ModTime() time.Time #50102
|
||||
pkg archive/tar, type FileInfoNames interface, Mode() fs.FileMode #50102
|
||||
pkg archive/tar, type FileInfoNames interface, Name() string #50102
|
||||
pkg archive/tar, type FileInfoNames interface, Size() int64 #50102
|
||||
pkg archive/tar, type FileInfoNames interface, Sys() interface{} #50102
|
||||
pkg archive/tar, type FileInfoNames interface, Uname() (string, error) #50102
|
||||
pkg crypto/tls, const QUICResumeSession = 8 #63691
|
||||
pkg crypto/tls, const QUICResumeSession QUICEventKind #63691
|
||||
pkg crypto/tls, const QUICStoreSession = 9 #63691
|
||||
pkg crypto/tls, const QUICStoreSession QUICEventKind #63691
|
||||
pkg crypto/tls, method (*ECHRejectionError) Error() string #63369
|
||||
pkg crypto/tls, method (*QUICConn) StoreSession(*SessionState) error #63691
|
||||
pkg crypto/tls, type Config struct, EncryptedClientHelloConfigList []uint8 #63369
|
||||
pkg crypto/tls, type Config struct, EncryptedClientHelloRejectionVerify func(ConnectionState) error #63369
|
||||
pkg crypto/tls, type ConnectionState struct, ECHAccepted bool #63369
|
||||
pkg crypto/tls, type ECHRejectionError struct #63369
|
||||
pkg crypto/tls, type ECHRejectionError struct, RetryConfigList []uint8 #63369
|
||||
pkg crypto/tls, type QUICConfig struct, EnableSessionEvents bool #63691
|
||||
pkg crypto/tls, type QUICEvent struct, SessionState *SessionState #63691
|
||||
pkg crypto/tls, type QUICSessionTicketOptions struct, Extra [][]uint8 #63691
|
||||
pkg crypto/x509, func ParseOID(string) (OID, error) #66249
|
||||
pkg crypto/x509, method (*OID) UnmarshalBinary([]uint8) error #66249
|
||||
pkg crypto/x509, method (*OID) UnmarshalText([]uint8) error #66249
|
||||
pkg crypto/x509, method (OID) MarshalBinary() ([]uint8, error) #66249
|
||||
pkg crypto/x509, method (OID) MarshalText() ([]uint8, error) #66249
|
||||
pkg debug/elf, const PT_OPENBSD_NOBTCFI = 1705237480 #66054
|
||||
pkg debug/elf, const PT_OPENBSD_NOBTCFI ProgType #66054
|
||||
pkg debug/elf, const STT_GNU_IFUNC = 10 #66836
|
||||
pkg debug/elf, const STT_GNU_IFUNC SymType #66836
|
||||
pkg debug/elf, const STT_RELC = 8 #66836
|
||||
pkg debug/elf, const STT_RELC SymType #66836
|
||||
pkg debug/elf, const STT_SRELC = 9 #66836
|
||||
pkg debug/elf, const STT_SRELC SymType #66836
|
||||
pkg encoding/binary, func Append([]uint8, ByteOrder, interface{}) ([]uint8, error) #60023
|
||||
pkg encoding/binary, func Decode([]uint8, ByteOrder, interface{}) (int, error) #60023
|
||||
pkg encoding/binary, func Encode([]uint8, ByteOrder, interface{}) (int, error) #60023
|
||||
pkg go/ast, func Preorder(Node) iter.Seq[Node] #66339
|
||||
pkg go/types, method (*Alias) Origin() *Alias #67143
|
||||
pkg go/types, method (*Alias) Rhs() Type #66559
|
||||
pkg go/types, method (*Alias) SetTypeParams([]*TypeParam) #67143
|
||||
pkg go/types, method (*Alias) TypeArgs() *TypeList #67143
|
||||
pkg go/types, method (*Alias) TypeParams() *TypeParamList #67143
|
||||
pkg go/types, method (*Func) Signature() *Signature #65772
|
||||
pkg iter, func Pull2[$0 interface{}, $1 interface{}](Seq2[$0, $1]) (func() ($0, $1, bool), func()) #61897
|
||||
pkg iter, func Pull[$0 interface{}](Seq[$0]) (func() ($0, bool), func()) #61897
|
||||
pkg iter, type Seq2[$0 interface{}, $1 interface{}] func(func($0, $1) bool) #61897
|
||||
pkg iter, type Seq[$0 interface{}] func(func($0) bool) #61897
|
||||
pkg maps, func All[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0) iter.Seq2[$1, $2] #61900
|
||||
pkg maps, func Collect[$0 comparable, $1 interface{}](iter.Seq2[$0, $1]) map[$0]$1 #61900
|
||||
pkg maps, func Insert[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0, iter.Seq2[$1, $2]) #61900
|
||||
pkg maps, func Keys[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0) iter.Seq[$1] #61900
|
||||
pkg maps, func Values[$0 interface{ ~map[$1]$2 }, $1 comparable, $2 interface{}]($0) iter.Seq[$2] #61900
|
||||
pkg math/rand/v2, func Uint() uint #61716
|
||||
pkg math/rand/v2, method (*ChaCha8) Read([]uint8) (int, error) #67059
|
||||
pkg math/rand/v2, method (*Rand) Uint() uint #61716
|
||||
pkg net, method (*DNSError) Unwrap() error #63116
|
||||
pkg net, method (*TCPConn) SetKeepAliveConfig(KeepAliveConfig) error #62254
|
||||
pkg net, type DNSError struct, UnwrapErr error #63116
|
||||
pkg net, type Dialer struct, KeepAliveConfig KeepAliveConfig #62254
|
||||
pkg net, type KeepAliveConfig struct #62254
|
||||
pkg net, type KeepAliveConfig struct, Count int #62254
|
||||
pkg net, type KeepAliveConfig struct, Enable bool #62254
|
||||
pkg net, type KeepAliveConfig struct, Idle time.Duration #62254
|
||||
pkg net, type KeepAliveConfig struct, Interval time.Duration #62254
|
||||
pkg net, type ListenConfig struct, KeepAliveConfig KeepAliveConfig #62254
|
||||
pkg net/http, func ParseCookie(string) ([]*Cookie, error) #66008
|
||||
pkg net/http, func ParseSetCookie(string) (*Cookie, error) #66008
|
||||
pkg net/http, method (*Request) CookiesNamed(string) []*Cookie #61472
|
||||
pkg net/http, type Cookie struct, Partitioned bool #62490
|
||||
pkg net/http, type Cookie struct, Quoted bool #46443
|
||||
pkg net/http, type Request struct, Pattern string #66405
|
||||
pkg net/http/httptest, func NewRequestWithContext(context.Context, string, string, io.Reader) *http.Request #59473
|
||||
pkg os, func CopyFS(string, fs.FS) error #62484
|
||||
pkg path/filepath, func Localize(string) (string, error) #57151
|
||||
pkg reflect, func SliceAt(Type, unsafe.Pointer, int) Value #61308
|
||||
pkg reflect, method (Value) Seq() iter.Seq[Value] #66056
|
||||
pkg reflect, method (Value) Seq2() iter.Seq2[Value, Value] #66056
|
||||
pkg reflect, type Type interface, CanSeq() bool #66056
|
||||
pkg reflect, type Type interface, CanSeq2() bool #66056
|
||||
pkg reflect, type Type interface, OverflowComplex(complex128) bool #60427
|
||||
pkg reflect, type Type interface, OverflowFloat(float64) bool #60427
|
||||
pkg reflect, type Type interface, OverflowInt(int64) bool #60427
|
||||
pkg reflect, type Type interface, OverflowUint(uint64) bool #60427
|
||||
pkg runtime/debug, func SetCrashOutput(*os.File, CrashOptions) error #42888
|
||||
pkg runtime/debug, type CrashOptions struct #67182
|
||||
pkg slices, func All[$0 interface{ ~[]$1 }, $1 interface{}]($0) iter.Seq2[int, $1] #61899
|
||||
pkg slices, func AppendSeq[$0 interface{ ~[]$1 }, $1 interface{}]($0, iter.Seq[$1]) $0 #61899
|
||||
pkg slices, func Backward[$0 interface{ ~[]$1 }, $1 interface{}]($0) iter.Seq2[int, $1] #61899
|
||||
pkg slices, func Chunk[$0 interface{ ~[]$1 }, $1 interface{}]($0, int) iter.Seq[$0] #53987
|
||||
pkg slices, func Collect[$0 interface{}](iter.Seq[$0]) []$0 #61899
|
||||
pkg slices, func Repeat[$0 interface{ ~[]$1 }, $1 interface{}]($0, int) $0 #65238
|
||||
pkg slices, func SortedFunc[$0 interface{}](iter.Seq[$0], func($0, $0) int) []$0 #61899
|
||||
pkg slices, func SortedStableFunc[$0 interface{}](iter.Seq[$0], func($0, $0) int) []$0 #61899
|
||||
pkg slices, func Sorted[$0 cmp.Ordered](iter.Seq[$0]) []$0 #61899
|
||||
pkg slices, func Values[$0 interface{ ~[]$1 }, $1 interface{}]($0) iter.Seq[$1] #61899
|
||||
pkg structs, type HostLayout struct #66408
|
||||
pkg sync, method (*Map) Clear() #61696
|
||||
pkg sync/atomic, func AndInt32(*int32, int32) int32 #61395
|
||||
pkg sync/atomic, func AndInt64(*int64, int64) int64 #61395
|
||||
pkg sync/atomic, func AndUint32(*uint32, uint32) uint32 #61395
|
||||
pkg sync/atomic, func AndUint64(*uint64, uint64) uint64 #61395
|
||||
pkg sync/atomic, func AndUintptr(*uintptr, uintptr) uintptr #61395
|
||||
pkg sync/atomic, func OrInt32(*int32, int32) int32 #61395
|
||||
pkg sync/atomic, func OrInt64(*int64, int64) int64 #61395
|
||||
pkg sync/atomic, func OrUint32(*uint32, uint32) uint32 #61395
|
||||
pkg sync/atomic, func OrUint64(*uint64, uint64) uint64 #61395
|
||||
pkg sync/atomic, func OrUintptr(*uintptr, uintptr) uintptr #61395
|
||||
pkg sync/atomic, method (*Int32) And(int32) int32 #61395
|
||||
pkg sync/atomic, method (*Int32) Or(int32) int32 #61395
|
||||
pkg sync/atomic, method (*Int64) And(int64) int64 #61395
|
||||
pkg sync/atomic, method (*Int64) Or(int64) int64 #61395
|
||||
pkg sync/atomic, method (*Uint32) And(uint32) uint32 #61395
|
||||
pkg sync/atomic, method (*Uint32) Or(uint32) uint32 #61395
|
||||
pkg sync/atomic, method (*Uint64) And(uint64) uint64 #61395
|
||||
pkg sync/atomic, method (*Uint64) Or(uint64) uint64 #61395
|
||||
pkg sync/atomic, method (*Uintptr) And(uintptr) uintptr #61395
|
||||
pkg sync/atomic, method (*Uintptr) Or(uintptr) uintptr #61395
|
||||
pkg syscall (openbsd-386), const EBADMSG = 92 #67998
|
||||
pkg syscall (openbsd-386), const ELAST = 95 #67998
|
||||
pkg syscall (openbsd-386), const ENOTRECOVERABLE = 93 #67998
|
||||
pkg syscall (openbsd-386), const ENOTRECOVERABLE Errno #67998
|
||||
pkg syscall (openbsd-386), const EOWNERDEAD = 94 #67998
|
||||
pkg syscall (openbsd-386), const EOWNERDEAD Errno #67998
|
||||
pkg syscall (openbsd-386), const EPROTO = 95 #67998
|
||||
pkg syscall (openbsd-386-cgo), const EBADMSG = 92 #67998
|
||||
pkg syscall (openbsd-386-cgo), const ELAST = 95 #67998
|
||||
pkg syscall (openbsd-386-cgo), const ENOTRECOVERABLE = 93 #67998
|
||||
pkg syscall (openbsd-386-cgo), const ENOTRECOVERABLE Errno #67998
|
||||
pkg syscall (openbsd-386-cgo), const EOWNERDEAD = 94 #67998
|
||||
pkg syscall (openbsd-386-cgo), const EOWNERDEAD Errno #67998
|
||||
pkg syscall (openbsd-386-cgo), const EPROTO = 95 #67998
|
||||
pkg syscall (openbsd-amd64), const EBADMSG = 92 #67998
|
||||
pkg syscall (openbsd-amd64), const ELAST = 95 #67998
|
||||
pkg syscall (openbsd-amd64), const ENOTRECOVERABLE = 93 #67998
|
||||
pkg syscall (openbsd-amd64), const ENOTRECOVERABLE Errno #67998
|
||||
pkg syscall (openbsd-amd64), const EOWNERDEAD = 94 #67998
|
||||
pkg syscall (openbsd-amd64), const EOWNERDEAD Errno #67998
|
||||
pkg syscall (openbsd-amd64), const EPROTO = 95 #67998
|
||||
pkg syscall (openbsd-amd64-cgo), const EBADMSG = 92 #67998
|
||||
pkg syscall (openbsd-amd64-cgo), const ELAST = 95 #67998
|
||||
pkg syscall (openbsd-amd64-cgo), const ENOTRECOVERABLE = 93 #67998
|
||||
pkg syscall (openbsd-amd64-cgo), const ENOTRECOVERABLE Errno #67998
|
||||
pkg syscall (openbsd-amd64-cgo), const EOWNERDEAD = 94 #67998
|
||||
pkg syscall (openbsd-amd64-cgo), const EOWNERDEAD Errno #67998
|
||||
pkg syscall (openbsd-amd64-cgo), const EPROTO = 95 #67998
|
||||
pkg syscall (windows-386), const WSAENOPROTOOPT = 10042 #62254
|
||||
pkg syscall (windows-386), const WSAENOPROTOOPT Errno #62254
|
||||
pkg syscall (windows-amd64), const WSAENOPROTOOPT = 10042 #62254
|
||||
pkg syscall (windows-amd64), const WSAENOPROTOOPT Errno #62254
|
||||
pkg syscall, const EBADMSG Errno #67998
|
||||
pkg syscall, const EPROTO Errno #67998
|
||||
pkg unicode/utf16, func RuneLen(int32) int #44940
|
||||
pkg unique, func Make[$0 comparable]($0) Handle[$0] #62483
|
||||
pkg unique, method (Handle[$0]) Value() $0 #62483
|
||||
pkg unique, type Handle[$0 comparable] struct #62483
|
||||
223
api/go1.24.txt
223
api/go1.24.txt
@@ -1,223 +0,0 @@
|
||||
pkg bytes, func FieldsFuncSeq([]uint8, func(int32) bool) iter.Seq[[]uint8] #61901
|
||||
pkg bytes, func FieldsSeq([]uint8) iter.Seq[[]uint8] #61901
|
||||
pkg bytes, func Lines([]uint8) iter.Seq[[]uint8] #61901
|
||||
pkg bytes, func SplitAfterSeq([]uint8, []uint8) iter.Seq[[]uint8] #61901
|
||||
pkg bytes, func SplitSeq([]uint8, []uint8) iter.Seq[[]uint8] #61901
|
||||
pkg crypto/cipher, func NewCFBDecrypter //deprecated #69445
|
||||
pkg crypto/cipher, func NewCFBEncrypter //deprecated #69445
|
||||
pkg crypto/cipher, func NewGCMWithRandomNonce(Block) (AEAD, error) #69981
|
||||
pkg crypto/cipher, func NewOFB //deprecated #69445
|
||||
pkg crypto/fips140, func Enabled() bool #70123
|
||||
pkg crypto/hkdf, func Expand[$0 hash.Hash](func() $0, []uint8, string, int) ([]uint8, error) #61477
|
||||
pkg crypto/hkdf, func Extract[$0 hash.Hash](func() $0, []uint8, []uint8) ([]uint8, error) #61477
|
||||
pkg crypto/hkdf, func Key[$0 hash.Hash](func() $0, []uint8, []uint8, string, int) ([]uint8, error) #61477
|
||||
pkg crypto/mlkem, const CiphertextSize1024 = 1568 #70122
|
||||
pkg crypto/mlkem, const CiphertextSize1024 ideal-int #70122
|
||||
pkg crypto/mlkem, const CiphertextSize768 = 1088 #70122
|
||||
pkg crypto/mlkem, const CiphertextSize768 ideal-int #70122
|
||||
pkg crypto/mlkem, const EncapsulationKeySize1024 = 1568 #70122
|
||||
pkg crypto/mlkem, const EncapsulationKeySize1024 ideal-int #70122
|
||||
pkg crypto/mlkem, const EncapsulationKeySize768 = 1184 #70122
|
||||
pkg crypto/mlkem, const EncapsulationKeySize768 ideal-int #70122
|
||||
pkg crypto/mlkem, const SeedSize = 64 #70122
|
||||
pkg crypto/mlkem, const SeedSize ideal-int #70122
|
||||
pkg crypto/mlkem, const SharedKeySize = 32 #70122
|
||||
pkg crypto/mlkem, const SharedKeySize ideal-int #70122
|
||||
pkg crypto/mlkem, func GenerateKey1024() (*DecapsulationKey1024, error) #70122
|
||||
pkg crypto/mlkem, func GenerateKey768() (*DecapsulationKey768, error) #70122
|
||||
pkg crypto/mlkem, func NewDecapsulationKey1024([]uint8) (*DecapsulationKey1024, error) #70122
|
||||
pkg crypto/mlkem, func NewDecapsulationKey768([]uint8) (*DecapsulationKey768, error) #70122
|
||||
pkg crypto/mlkem, func NewEncapsulationKey1024([]uint8) (*EncapsulationKey1024, error) #70122
|
||||
pkg crypto/mlkem, func NewEncapsulationKey768([]uint8) (*EncapsulationKey768, error) #70122
|
||||
pkg crypto/mlkem, method (*DecapsulationKey1024) Bytes() []uint8 #70122
|
||||
pkg crypto/mlkem, method (*DecapsulationKey1024) Decapsulate([]uint8) ([]uint8, error) #70122
|
||||
pkg crypto/mlkem, method (*DecapsulationKey1024) EncapsulationKey() *EncapsulationKey1024 #70122
|
||||
pkg crypto/mlkem, method (*DecapsulationKey768) Bytes() []uint8 #70122
|
||||
pkg crypto/mlkem, method (*DecapsulationKey768) Decapsulate([]uint8) ([]uint8, error) #70122
|
||||
pkg crypto/mlkem, method (*DecapsulationKey768) EncapsulationKey() *EncapsulationKey768 #70122
|
||||
pkg crypto/mlkem, method (*EncapsulationKey1024) Bytes() []uint8 #70122
|
||||
pkg crypto/mlkem, method (*EncapsulationKey1024) Encapsulate() ([]uint8, []uint8) #70122
|
||||
pkg crypto/mlkem, method (*EncapsulationKey768) Bytes() []uint8 #70122
|
||||
pkg crypto/mlkem, method (*EncapsulationKey768) Encapsulate() ([]uint8, []uint8) #70122
|
||||
pkg crypto/mlkem, type DecapsulationKey1024 struct #70122
|
||||
pkg crypto/mlkem, type DecapsulationKey768 struct #70122
|
||||
pkg crypto/mlkem, type EncapsulationKey1024 struct #70122
|
||||
pkg crypto/mlkem, type EncapsulationKey768 struct #70122
|
||||
pkg crypto/pbkdf2, func Key[$0 hash.Hash](func() $0, string, []uint8, int, int) ([]uint8, error) #69488
|
||||
pkg crypto/rand, func Text() string #67057
|
||||
pkg crypto/sha3, func New224() *SHA3 #69982
|
||||
pkg crypto/sha3, func New256() *SHA3 #69982
|
||||
pkg crypto/sha3, func New384() *SHA3 #69982
|
||||
pkg crypto/sha3, func New512() *SHA3 #69982
|
||||
pkg crypto/sha3, func NewCSHAKE128([]uint8, []uint8) *SHAKE #69982
|
||||
pkg crypto/sha3, func NewCSHAKE256([]uint8, []uint8) *SHAKE #69982
|
||||
pkg crypto/sha3, func NewSHAKE128() *SHAKE #69982
|
||||
pkg crypto/sha3, func NewSHAKE256() *SHAKE #69982
|
||||
pkg crypto/sha3, func Sum224([]uint8) [28]uint8 #69982
|
||||
pkg crypto/sha3, func Sum256([]uint8) [32]uint8 #69982
|
||||
pkg crypto/sha3, func Sum384([]uint8) [48]uint8 #69982
|
||||
pkg crypto/sha3, func Sum512([]uint8) [64]uint8 #69982
|
||||
pkg crypto/sha3, func SumSHAKE128([]uint8, int) []uint8 #69982
|
||||
pkg crypto/sha3, func SumSHAKE256([]uint8, int) []uint8 #69982
|
||||
pkg crypto/sha3, method (*SHA3) AppendBinary([]uint8) ([]uint8, error) #69982
|
||||
pkg crypto/sha3, method (*SHA3) BlockSize() int #69982
|
||||
pkg crypto/sha3, method (*SHA3) MarshalBinary() ([]uint8, error) #69982
|
||||
pkg crypto/sha3, method (*SHA3) Reset() #69982
|
||||
pkg crypto/sha3, method (*SHA3) Size() int #69982
|
||||
pkg crypto/sha3, method (*SHA3) Sum([]uint8) []uint8 #69982
|
||||
pkg crypto/sha3, method (*SHA3) UnmarshalBinary([]uint8) error #69982
|
||||
pkg crypto/sha3, method (*SHA3) Write([]uint8) (int, error) #69982
|
||||
pkg crypto/sha3, method (*SHAKE) AppendBinary([]uint8) ([]uint8, error) #69982
|
||||
pkg crypto/sha3, method (*SHAKE) BlockSize() int #69982
|
||||
pkg crypto/sha3, method (*SHAKE) MarshalBinary() ([]uint8, error) #69982
|
||||
pkg crypto/sha3, method (*SHAKE) Read([]uint8) (int, error) #69982
|
||||
pkg crypto/sha3, method (*SHAKE) Reset() #69982
|
||||
pkg crypto/sha3, method (*SHAKE) UnmarshalBinary([]uint8) error #69982
|
||||
pkg crypto/sha3, method (*SHAKE) Write([]uint8) (int, error) #69982
|
||||
pkg crypto/sha3, type SHA3 struct #69982
|
||||
pkg crypto/sha3, type SHAKE struct #69982
|
||||
pkg crypto/subtle, func WithDataIndependentTiming(func()) #66450
|
||||
pkg crypto/tls, const X25519MLKEM768 = 4588 #69985
|
||||
pkg crypto/tls, const X25519MLKEM768 CurveID #69985
|
||||
pkg crypto/tls, type ClientHelloInfo struct, Extensions []uint16 #32936
|
||||
pkg crypto/tls, type Config struct, EncryptedClientHelloKeys []EncryptedClientHelloKey #68500
|
||||
pkg crypto/tls, type EncryptedClientHelloKey struct #68500
|
||||
pkg crypto/tls, type EncryptedClientHelloKey struct, Config []uint8 #68500
|
||||
pkg crypto/tls, type EncryptedClientHelloKey struct, PrivateKey []uint8 #68500
|
||||
pkg crypto/tls, type EncryptedClientHelloKey struct, SendAsRetry bool #68500
|
||||
pkg crypto/x509, const NoValidChains = 10 #68484
|
||||
pkg crypto/x509, const NoValidChains InvalidReason #68484
|
||||
pkg crypto/x509, method (OID) AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg crypto/x509, method (OID) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg crypto/x509, type Certificate struct, InhibitAnyPolicy int #68484
|
||||
pkg crypto/x509, type Certificate struct, InhibitAnyPolicyZero bool #68484
|
||||
pkg crypto/x509, type Certificate struct, InhibitPolicyMapping int #68484
|
||||
pkg crypto/x509, type Certificate struct, InhibitPolicyMappingZero bool #68484
|
||||
pkg crypto/x509, type Certificate struct, PolicyMappings []PolicyMapping #68484
|
||||
pkg crypto/x509, type Certificate struct, RequireExplicitPolicy int #68484
|
||||
pkg crypto/x509, type Certificate struct, RequireExplicitPolicyZero bool #68484
|
||||
pkg crypto/x509, type PolicyMapping struct #68484
|
||||
pkg crypto/x509, type PolicyMapping struct, IssuerDomainPolicy OID #68484
|
||||
pkg crypto/x509, type PolicyMapping struct, SubjectDomainPolicy OID #68484
|
||||
pkg crypto/x509, type VerifyOptions struct, CertificatePolicies []OID #68484
|
||||
pkg debug/elf, const VER_FLG_BASE = 1 #63952
|
||||
pkg debug/elf, const VER_FLG_BASE DynamicVersionFlag #63952
|
||||
pkg debug/elf, const VER_FLG_INFO = 4 #63952
|
||||
pkg debug/elf, const VER_FLG_INFO DynamicVersionFlag #63952
|
||||
pkg debug/elf, const VER_FLG_WEAK = 2 #63952
|
||||
pkg debug/elf, const VER_FLG_WEAK DynamicVersionFlag #63952
|
||||
pkg debug/elf, method (*File) DynamicVersionNeeds() ([]DynamicVersionNeed, error) #63952
|
||||
pkg debug/elf, method (*File) DynamicVersions() ([]DynamicVersion, error) #63952
|
||||
pkg debug/elf, type DynamicVersion struct #63952
|
||||
pkg debug/elf, type DynamicVersion struct, Deps []string #63952
|
||||
pkg debug/elf, type DynamicVersion struct, Flags DynamicVersionFlag #63952
|
||||
pkg debug/elf, type DynamicVersion struct, Name string #63952
|
||||
pkg debug/elf, type DynamicVersion struct, Index uint16 #63952
|
||||
pkg debug/elf, type DynamicVersionDep struct #63952
|
||||
pkg debug/elf, type DynamicVersionDep struct, Dep string #63952
|
||||
pkg debug/elf, type DynamicVersionDep struct, Flags DynamicVersionFlag #63952
|
||||
pkg debug/elf, type DynamicVersionDep struct, Index uint16 #63952
|
||||
pkg debug/elf, type DynamicVersionFlag uint16 #63952
|
||||
pkg debug/elf, type DynamicVersionNeed struct #63952
|
||||
pkg debug/elf, type DynamicVersionNeed struct, Name string #63952
|
||||
pkg debug/elf, type DynamicVersionNeed struct, Needs []DynamicVersionDep #63952
|
||||
pkg debug/elf, type Symbol struct, HasVersion bool #63952
|
||||
pkg debug/elf, type Symbol struct, VersionIndex VersionIndex #63952
|
||||
pkg debug/elf, method (VersionIndex) Index() uint16 #63952
|
||||
pkg debug/elf, method (VersionIndex) IsHidden() bool #63952
|
||||
pkg debug/elf, type VersionIndex uint16 #63952
|
||||
pkg encoding, type BinaryAppender interface { AppendBinary } #62384
|
||||
pkg encoding, type BinaryAppender interface, AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg encoding, type TextAppender interface { AppendText } #62384
|
||||
pkg encoding, type TextAppender interface, AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg go/types, method (*Interface) EmbeddedTypes() iter.Seq[Type] #66626
|
||||
pkg go/types, method (*Interface) ExplicitMethods() iter.Seq[*Func] #66626
|
||||
pkg go/types, method (*Interface) Methods() iter.Seq[*Func] #66626
|
||||
pkg go/types, method (*MethodSet) Methods() iter.Seq[*Selection] #66626
|
||||
pkg go/types, method (*Named) Methods() iter.Seq[*Func] #66626
|
||||
pkg go/types, method (*Scope) Children() iter.Seq[*Scope] #66626
|
||||
pkg go/types, method (*Struct) Fields() iter.Seq[*Var] #66626
|
||||
pkg go/types, method (*Tuple) Variables() iter.Seq[*Var] #66626
|
||||
pkg go/types, method (*TypeList) Types() iter.Seq[Type] #66626
|
||||
pkg go/types, method (*TypeParamList) TypeParams() iter.Seq[*TypeParam] #66626
|
||||
pkg go/types, method (*Union) Terms() iter.Seq[*Term] #66626
|
||||
pkg hash/maphash, func Comparable[$0 comparable](Seed, $0) uint64 #54670
|
||||
pkg hash/maphash, func WriteComparable[$0 comparable](*Hash, $0) #54670
|
||||
pkg log/slog, method (*LevelVar) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg log/slog, method (Level) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg log/slog, var DiscardHandler Handler #62005
|
||||
pkg math/big, method (*Float) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg math/big, method (*Int) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg math/big, method (*Rat) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg math/rand/v2, method (*ChaCha8) AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg math/rand/v2, method (*PCG) AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg net, method (IP) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg net/http, method (*Protocols) SetHTTP1(bool) #67814
|
||||
pkg net/http, method (*Protocols) SetHTTP2(bool) #67814
|
||||
pkg net/http, method (*Protocols) SetUnencryptedHTTP2(bool) #67816
|
||||
pkg net/http, method (Protocols) HTTP1() bool #67814
|
||||
pkg net/http, method (Protocols) HTTP2() bool #67814
|
||||
pkg net/http, method (Protocols) String() string #67814
|
||||
pkg net/http, method (Protocols) UnencryptedHTTP2() bool #67816
|
||||
pkg net/http, type HTTP2Config struct #67813
|
||||
pkg net/http, type HTTP2Config struct, CountError func(string) #67813
|
||||
pkg net/http, type HTTP2Config struct, MaxConcurrentStreams int #67813
|
||||
pkg net/http, type HTTP2Config struct, MaxDecoderHeaderTableSize int #67813
|
||||
pkg net/http, type HTTP2Config struct, MaxEncoderHeaderTableSize int #67813
|
||||
pkg net/http, type HTTP2Config struct, MaxReadFrameSize int #67813
|
||||
pkg net/http, type HTTP2Config struct, MaxReceiveBufferPerConnection int #67813
|
||||
pkg net/http, type HTTP2Config struct, MaxReceiveBufferPerStream int #67813
|
||||
pkg net/http, type HTTP2Config struct, PermitProhibitedCipherSuites bool #67813
|
||||
pkg net/http, type HTTP2Config struct, PingTimeout time.Duration #67813
|
||||
pkg net/http, type HTTP2Config struct, SendPingTimeout time.Duration #67813
|
||||
pkg net/http, type HTTP2Config struct, WriteByteTimeout time.Duration #67813
|
||||
pkg net/http, type Protocols struct #67814
|
||||
pkg net/http, type Server struct, HTTP2 *HTTP2Config #67813
|
||||
pkg net/http, type Server struct, Protocols *Protocols #67814
|
||||
pkg net/http, type Transport struct, HTTP2 *HTTP2Config #67813
|
||||
pkg net/http, type Transport struct, Protocols *Protocols #67814
|
||||
pkg net/netip, method (Addr) AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg net/netip, method (Addr) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg net/netip, method (AddrPort) AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg net/netip, method (AddrPort) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg net/netip, method (Prefix) AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg net/netip, method (Prefix) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg net/url, method (*URL) AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg os, func OpenInRoot(string, string) (*File, error) #67002
|
||||
pkg os, func OpenRoot(string) (*Root, error) #67002
|
||||
pkg os, method (*Root) Close() error #67002
|
||||
pkg os, method (*Root) Create(string) (*File, error) #67002
|
||||
pkg os, method (*Root) FS() fs.FS #67002
|
||||
pkg os, method (*Root) Lstat(string) (fs.FileInfo, error) #67002
|
||||
pkg os, method (*Root) Mkdir(string, fs.FileMode) error #67002
|
||||
pkg os, method (*Root) Name() string #67002
|
||||
pkg os, method (*Root) Open(string) (*File, error) #67002
|
||||
pkg os, method (*Root) OpenFile(string, int, fs.FileMode) (*File, error) #67002
|
||||
pkg os, method (*Root) OpenRoot(string) (*Root, error) #67002
|
||||
pkg os, method (*Root) Remove(string) error #67002
|
||||
pkg os, method (*Root) Stat(string) (fs.FileInfo, error) #67002
|
||||
pkg os, type Root struct #67002
|
||||
pkg regexp, method (*Regexp) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg runtime, func AddCleanup[$0 interface{}, $1 interface{}](*$0, func($1), $1) Cleanup #67535
|
||||
pkg runtime, func GOROOT //deprecated #51473
|
||||
pkg runtime, method (Cleanup) Stop() #67535
|
||||
pkg runtime, type Cleanup struct #67535
|
||||
pkg strings, func FieldsFuncSeq(string, func(int32) bool) iter.Seq[string] #61901
|
||||
pkg strings, func FieldsSeq(string) iter.Seq[string] #61901
|
||||
pkg strings, func Lines(string) iter.Seq[string] #61901
|
||||
pkg strings, func SplitAfterSeq(string, string) iter.Seq[string] #61901
|
||||
pkg strings, func SplitSeq(string, string) iter.Seq[string] #61901
|
||||
pkg testing, method (*B) Chdir(string) #62516
|
||||
pkg testing, method (*B) Context() context.Context #36532
|
||||
pkg testing, method (*B) Loop() bool #61515
|
||||
pkg testing, method (*F) Chdir(string) #62516
|
||||
pkg testing, method (*F) Context() context.Context #36532
|
||||
pkg testing, method (*T) Chdir(string) #62516
|
||||
pkg testing, method (*T) Context() context.Context #36532
|
||||
pkg testing, type TB interface, Chdir(string) #62516
|
||||
pkg testing, type TB interface, Context() context.Context #36532
|
||||
pkg time, method (Time) AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg time, method (Time) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg weak, func Make[$0 interface{}](*$0) Pointer[$0] #67552
|
||||
pkg weak, method (Pointer[$0]) Value() *$0 #67552
|
||||
pkg weak, type Pointer[$0 interface{}] struct #67552
|
||||
111
api/go1.25.txt
111
api/go1.25.txt
@@ -1,111 +0,0 @@
|
||||
pkg crypto, func SignMessage(Signer, io.Reader, []uint8, SignerOpts) ([]uint8, error) #63405
|
||||
pkg crypto, type MessageSigner interface { Public, Sign, SignMessage } #63405
|
||||
pkg crypto, type MessageSigner interface, Public() PublicKey #63405
|
||||
pkg crypto, type MessageSigner interface, Sign(io.Reader, []uint8, SignerOpts) ([]uint8, error) #63405
|
||||
pkg crypto, type MessageSigner interface, SignMessage(io.Reader, []uint8, SignerOpts) ([]uint8, error) #63405
|
||||
pkg crypto/ecdsa, func ParseRawPrivateKey(elliptic.Curve, []uint8) (*PrivateKey, error) #63963
|
||||
pkg crypto/ecdsa, func ParseUncompressedPublicKey(elliptic.Curve, []uint8) (*PublicKey, error) #63963
|
||||
pkg crypto/ecdsa, method (*PrivateKey) Bytes() ([]uint8, error) #63963
|
||||
pkg crypto/ecdsa, method (*PublicKey) Bytes() ([]uint8, error) #63963
|
||||
pkg crypto/sha3, method (*SHA3) Clone() (hash.Cloner, error) #69521
|
||||
pkg crypto/tls, type Config struct, GetEncryptedClientHelloKeys func(*ClientHelloInfo) ([]EncryptedClientHelloKey, error) #71920
|
||||
pkg crypto/tls, type ConnectionState struct, CurveID CurveID #67516
|
||||
pkg debug/elf, const PT_RISCV_ATTRIBUTES = 1879048195 #72843
|
||||
pkg debug/elf, const PT_RISCV_ATTRIBUTES ProgType #72843
|
||||
pkg debug/elf, const SHT_RISCV_ATTRIBUTES = 1879048195 #72843
|
||||
pkg debug/elf, const SHT_RISCV_ATTRIBUTES SectionType #72843
|
||||
pkg go/ast, const FilterFuncDuplicates //deprecated #73088
|
||||
pkg go/ast, const FilterImportDuplicates //deprecated #73088
|
||||
pkg go/ast, const FilterUnassociatedComments //deprecated #73088
|
||||
pkg go/ast, func FilterPackage //deprecated #73088
|
||||
pkg go/ast, func MergePackageFiles //deprecated #73088
|
||||
pkg go/ast, func PackageExports //deprecated #73088
|
||||
pkg go/ast, func PreorderStack(Node, []Node, func(Node, []Node) bool) #73319
|
||||
pkg go/ast, type MergeMode //deprecated #73088
|
||||
pkg go/parser, func ParseDir //deprecated #71122
|
||||
pkg go/token, method (*FileSet) AddExistingFiles(...*File) #73205
|
||||
pkg go/types, const FieldVar = 6 #70250
|
||||
pkg go/types, const FieldVar VarKind #70250
|
||||
pkg go/types, const LocalVar = 2 #70250
|
||||
pkg go/types, const LocalVar VarKind #70250
|
||||
pkg go/types, const PackageVar = 1 #70250
|
||||
pkg go/types, const PackageVar VarKind #70250
|
||||
pkg go/types, const ParamVar = 4 #70250
|
||||
pkg go/types, const ParamVar VarKind #70250
|
||||
pkg go/types, const RecvVar = 3 #70250
|
||||
pkg go/types, const RecvVar VarKind #70250
|
||||
pkg go/types, const ResultVar = 5 #70250
|
||||
pkg go/types, const ResultVar VarKind #70250
|
||||
pkg go/types, func LookupSelection(Type, bool, *Package, string) (Selection, bool) #70737
|
||||
pkg go/types, method (*Var) Kind() VarKind #70250
|
||||
pkg go/types, method (*Var) SetKind(VarKind) #70250
|
||||
pkg go/types, method (VarKind) String() string #70250
|
||||
pkg go/types, type VarKind uint8 #70250
|
||||
pkg hash, type Cloner interface { BlockSize, Clone, Reset, Size, Sum, Write } #69521
|
||||
pkg hash, type Cloner interface, BlockSize() int #69521
|
||||
pkg hash, type Cloner interface, Clone() (Cloner, error) #69521
|
||||
pkg hash, type Cloner interface, Reset() #69521
|
||||
pkg hash, type Cloner interface, Size() int #69521
|
||||
pkg hash, type Cloner interface, Sum([]uint8) []uint8 #69521
|
||||
pkg hash, type Cloner interface, Write([]uint8) (int, error) #69521
|
||||
pkg hash, type XOF interface { BlockSize, Read, Reset, Write } #69518
|
||||
pkg hash, type XOF interface, BlockSize() int #69518
|
||||
pkg hash, type XOF interface, Read([]uint8) (int, error) #69518
|
||||
pkg hash, type XOF interface, Reset() #69518
|
||||
pkg hash, type XOF interface, Write([]uint8) (int, error) #69518
|
||||
pkg hash/maphash, method (*Hash) Clone() (hash.Cloner, error) #69521
|
||||
pkg io/fs, func Lstat(FS, string) (FileInfo, error) #49580
|
||||
pkg io/fs, func ReadLink(FS, string) (string, error) #49580
|
||||
pkg io/fs, type ReadLinkFS interface { Lstat, Open, ReadLink } #49580
|
||||
pkg io/fs, type ReadLinkFS interface, Lstat(string) (FileInfo, error) #49580
|
||||
pkg io/fs, type ReadLinkFS interface, Open(string) (File, error) #49580
|
||||
pkg io/fs, type ReadLinkFS interface, ReadLink(string) (string, error) #49580
|
||||
pkg log/slog, func GroupAttrs(string, ...Attr) Attr #66365
|
||||
pkg log/slog, method (Record) Source() *Source #70280
|
||||
pkg mime/multipart, func FileContentDisposition(string, string) string #46771
|
||||
pkg net/http, func NewCrossOriginProtection() *CrossOriginProtection #73626
|
||||
pkg net/http, method (*CrossOriginProtection) AddInsecureBypassPattern(string) #73626
|
||||
pkg net/http, method (*CrossOriginProtection) AddTrustedOrigin(string) error #73626
|
||||
pkg net/http, method (*CrossOriginProtection) Check(*Request) error #73626
|
||||
pkg net/http, method (*CrossOriginProtection) Handler(Handler) Handler #73626
|
||||
pkg net/http, method (*CrossOriginProtection) SetDenyHandler(Handler) #73626
|
||||
pkg net/http, type CrossOriginProtection struct #73626
|
||||
pkg os, method (*Root) Chmod(string, fs.FileMode) error #67002
|
||||
pkg os, method (*Root) Chown(string, int, int) error #67002
|
||||
pkg os, method (*Root) Chtimes(string, time.Time, time.Time) error #67002
|
||||
pkg os, method (*Root) Lchown(string, int, int) error #67002
|
||||
pkg os, method (*Root) Link(string, string) error #67002
|
||||
pkg os, method (*Root) MkdirAll(string, fs.FileMode) error #67002
|
||||
pkg os, method (*Root) ReadFile(string) ([]uint8, error) #73126
|
||||
pkg os, method (*Root) Readlink(string) (string, error) #67002
|
||||
pkg os, method (*Root) RemoveAll(string) error #67002
|
||||
pkg os, method (*Root) Rename(string, string) error #67002
|
||||
pkg os, method (*Root) Symlink(string, string) error #67002
|
||||
pkg os, method (*Root) WriteFile(string, []uint8, fs.FileMode) error #73126
|
||||
pkg reflect, func TypeAssert[$0 interface{}](Value) ($0, bool) #62121
|
||||
pkg runtime, func SetDefaultGOMAXPROCS() #73193
|
||||
pkg runtime/trace, func NewFlightRecorder(FlightRecorderConfig) *FlightRecorder #63185
|
||||
pkg runtime/trace, method (*FlightRecorder) Enabled() bool #63185
|
||||
pkg runtime/trace, method (*FlightRecorder) Start() error #63185
|
||||
pkg runtime/trace, method (*FlightRecorder) Stop() #63185
|
||||
pkg runtime/trace, method (*FlightRecorder) WriteTo(io.Writer) (int64, error) #63185
|
||||
pkg runtime/trace, type FlightRecorder struct #63185
|
||||
pkg runtime/trace, type FlightRecorderConfig struct #63185
|
||||
pkg runtime/trace, type FlightRecorderConfig struct, MaxBytes uint64 #63185
|
||||
pkg runtime/trace, type FlightRecorderConfig struct, MinAge time.Duration #63185
|
||||
pkg sync, method (*WaitGroup) Go(func()) #63796
|
||||
pkg testing, method (*B) Attr(string, string) #43936
|
||||
pkg testing, method (*B) Output() io.Writer #59928
|
||||
pkg testing, method (*F) Attr(string, string) #43936
|
||||
pkg testing, method (*F) Output() io.Writer #59928
|
||||
pkg testing, method (*T) Attr(string, string) #43936
|
||||
pkg testing, method (*T) Output() io.Writer #59928
|
||||
pkg testing, type TB interface, Attr(string, string) #43936
|
||||
pkg testing, type TB interface, Output() io.Writer #59928
|
||||
pkg testing/fstest, method (MapFS) Lstat(string) (fs.FileInfo, error) #49580
|
||||
pkg testing/fstest, method (MapFS) ReadLink(string) (string, error) #49580
|
||||
pkg testing/synctest, func Test(*testing.T, func(*testing.T)) #67434
|
||||
pkg testing/synctest, func Wait() #67434
|
||||
pkg unicode, var CategoryAliases map[string]string #70780
|
||||
pkg unicode, var Cn *RangeTable #70780
|
||||
pkg unicode, var LC *RangeTable #70780
|
||||
180
api/go1.26.txt
180
api/go1.26.txt
@@ -1,180 +0,0 @@
|
||||
pkg bytes, method (*Buffer) Peek(int) ([]uint8, error) #73794
|
||||
pkg crypto, type Decapsulator interface { Decapsulate, Encapsulator } #75300
|
||||
pkg crypto, type Decapsulator interface, Decapsulate([]uint8) ([]uint8, error) #75300
|
||||
pkg crypto, type Decapsulator interface, Encapsulator() Encapsulator #75300
|
||||
pkg crypto, type Encapsulator interface { Bytes, Encapsulate } #75300
|
||||
pkg crypto, type Encapsulator interface, Bytes() []uint8 #75300
|
||||
pkg crypto, type Encapsulator interface, Encapsulate() ([]uint8, []uint8) #75300
|
||||
pkg crypto/ecdh, type KeyExchanger interface { Curve, ECDH, PublicKey } #75300
|
||||
pkg crypto/ecdh, type KeyExchanger interface, Curve() Curve #75300
|
||||
pkg crypto/ecdh, type KeyExchanger interface, ECDH(*PublicKey) ([]uint8, error) #75300
|
||||
pkg crypto/ecdh, type KeyExchanger interface, PublicKey() *PublicKey #75300
|
||||
pkg crypto/ecdsa, type PrivateKey struct, D //deprecated #63963
|
||||
pkg crypto/ecdsa, type PublicKey struct, X //deprecated #63963
|
||||
pkg crypto/ecdsa, type PublicKey struct, Y //deprecated #63963
|
||||
pkg crypto/fips140, func Enforced() bool #74630
|
||||
pkg crypto/fips140, func Version() string #75301
|
||||
pkg crypto/fips140, func WithoutEnforcement(func()) #74630
|
||||
pkg crypto/hpke, func AES128GCM() AEAD #75300
|
||||
pkg crypto/hpke, func AES256GCM() AEAD #75300
|
||||
pkg crypto/hpke, func ChaCha20Poly1305() AEAD #75300
|
||||
pkg crypto/hpke, func DHKEM(ecdh.Curve) KEM #75300
|
||||
pkg crypto/hpke, func ExportOnly() AEAD #75300
|
||||
pkg crypto/hpke, func HKDFSHA256() KDF #75300
|
||||
pkg crypto/hpke, func HKDFSHA384() KDF #75300
|
||||
pkg crypto/hpke, func HKDFSHA512() KDF #75300
|
||||
pkg crypto/hpke, func MLKEM1024() KEM #75300
|
||||
pkg crypto/hpke, func MLKEM1024P384() KEM #75300
|
||||
pkg crypto/hpke, func MLKEM768() KEM #75300
|
||||
pkg crypto/hpke, func MLKEM768P256() KEM #75300
|
||||
pkg crypto/hpke, func MLKEM768X25519() KEM #75300
|
||||
pkg crypto/hpke, func NewAEAD(uint16) (AEAD, error) #75300
|
||||
pkg crypto/hpke, func NewDHKEMPrivateKey(ecdh.KeyExchanger) (PrivateKey, error) #75300
|
||||
pkg crypto/hpke, func NewDHKEMPublicKey(*ecdh.PublicKey) (PublicKey, error) #75300
|
||||
pkg crypto/hpke, func NewHybridPrivateKey(crypto.Decapsulator, ecdh.KeyExchanger) (PrivateKey, error) #75300
|
||||
pkg crypto/hpke, func NewHybridPublicKey(crypto.Encapsulator, *ecdh.PublicKey) (PublicKey, error) #75300
|
||||
pkg crypto/hpke, func NewKDF(uint16) (KDF, error) #75300
|
||||
pkg crypto/hpke, func NewKEM(uint16) (KEM, error) #75300
|
||||
pkg crypto/hpke, func NewMLKEMPrivateKey(crypto.Decapsulator) (PrivateKey, error) #75300
|
||||
pkg crypto/hpke, func NewMLKEMPublicKey(crypto.Encapsulator) (PublicKey, error) #75300
|
||||
pkg crypto/hpke, func NewRecipient([]uint8, PrivateKey, KDF, AEAD, []uint8) (*Recipient, error) #75300
|
||||
pkg crypto/hpke, func NewSender(PublicKey, KDF, AEAD, []uint8) ([]uint8, *Sender, error) #75300
|
||||
pkg crypto/hpke, func Open(PrivateKey, KDF, AEAD, []uint8, []uint8) ([]uint8, error) #75300
|
||||
pkg crypto/hpke, func SHAKE128() KDF #75300
|
||||
pkg crypto/hpke, func SHAKE256() KDF #75300
|
||||
pkg crypto/hpke, func Seal(PublicKey, KDF, AEAD, []uint8, []uint8) ([]uint8, error) #75300
|
||||
pkg crypto/hpke, method (*Recipient) Export(string, int) ([]uint8, error) #75300
|
||||
pkg crypto/hpke, method (*Recipient) Open([]uint8, []uint8) ([]uint8, error) #75300
|
||||
pkg crypto/hpke, method (*Sender) Export(string, int) ([]uint8, error) #75300
|
||||
pkg crypto/hpke, method (*Sender) Seal([]uint8, []uint8) ([]uint8, error) #75300
|
||||
pkg crypto/hpke, type AEAD interface, ID() uint16 #75300
|
||||
pkg crypto/hpke, type AEAD interface, unexported methods #75300
|
||||
pkg crypto/hpke, type KDF interface, ID() uint16 #75300
|
||||
pkg crypto/hpke, type KDF interface, unexported methods #75300
|
||||
pkg crypto/hpke, type KEM interface, DeriveKeyPair([]uint8) (PrivateKey, error) #75300
|
||||
pkg crypto/hpke, type KEM interface, GenerateKey() (PrivateKey, error) #75300
|
||||
pkg crypto/hpke, type KEM interface, ID() uint16 #75300
|
||||
pkg crypto/hpke, type KEM interface, NewPrivateKey([]uint8) (PrivateKey, error) #75300
|
||||
pkg crypto/hpke, type KEM interface, NewPublicKey([]uint8) (PublicKey, error) #75300
|
||||
pkg crypto/hpke, type KEM interface, unexported methods #75300
|
||||
pkg crypto/hpke, type PrivateKey interface, Bytes() ([]uint8, error) #75300
|
||||
pkg crypto/hpke, type PrivateKey interface, KEM() KEM #75300
|
||||
pkg crypto/hpke, type PrivateKey interface, PublicKey() PublicKey #75300
|
||||
pkg crypto/hpke, type PrivateKey interface, unexported methods #75300
|
||||
pkg crypto/hpke, type PublicKey interface, Bytes() []uint8 #75300
|
||||
pkg crypto/hpke, type PublicKey interface, KEM() KEM #75300
|
||||
pkg crypto/hpke, type PublicKey interface, unexported methods #75300
|
||||
pkg crypto/hpke, type Recipient struct #75300
|
||||
pkg crypto/hpke, type Sender struct #75300
|
||||
pkg crypto/mlkem, method (*DecapsulationKey1024) Encapsulator() crypto.Encapsulator #75300
|
||||
pkg crypto/mlkem, method (*DecapsulationKey768) Encapsulator() crypto.Encapsulator #75300
|
||||
pkg crypto/mlkem/mlkemtest, func Encapsulate1024(*mlkem.EncapsulationKey1024, []uint8) ([]uint8, []uint8, error) #73627
|
||||
pkg crypto/mlkem/mlkemtest, func Encapsulate768(*mlkem.EncapsulationKey768, []uint8) ([]uint8, []uint8, error) #73627
|
||||
pkg crypto/rsa, func DecryptPKCS1v15 //deprecated #75302
|
||||
pkg crypto/rsa, func DecryptPKCS1v15SessionKey //deprecated #75302
|
||||
pkg crypto/rsa, func EncryptOAEPWithOptions(io.Reader, *PublicKey, []uint8, *OAEPOptions) ([]uint8, error) #65716
|
||||
pkg crypto/rsa, func EncryptPKCS1v15 //deprecated #75302
|
||||
pkg crypto/rsa, type PKCS1v15DecryptOptions //deprecated #75302
|
||||
pkg crypto/tls, const QUICErrorEvent = 10 #75108
|
||||
pkg crypto/tls, const QUICErrorEvent QUICEventKind #75108
|
||||
pkg crypto/tls, const SecP256r1MLKEM768 = 4587 #71206
|
||||
pkg crypto/tls, const SecP256r1MLKEM768 CurveID #71206
|
||||
pkg crypto/tls, const SecP384r1MLKEM1024 = 4589 #71206
|
||||
pkg crypto/tls, const SecP384r1MLKEM1024 CurveID #71206
|
||||
pkg crypto/tls, type ClientHelloInfo struct, HelloRetryRequest bool #74425
|
||||
pkg crypto/tls, type ConnectionState struct, HelloRetryRequest bool #74425
|
||||
pkg crypto/tls, type QUICEvent struct, Err error #75108
|
||||
pkg crypto/x509, func OIDFromASN1OID(asn1.ObjectIdentifier) (OID, error) #75325
|
||||
pkg crypto/x509, method (ExtKeyUsage) OID() OID #75325
|
||||
pkg crypto/x509, method (ExtKeyUsage) String() string #56866
|
||||
pkg crypto/x509, method (KeyUsage) String() string #56866
|
||||
pkg debug/elf, const R_LARCH_CALL36 = 110 #75562
|
||||
pkg debug/elf, const R_LARCH_CALL36 R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC32 = 13 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC32 R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC64 = 14 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC64 R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC64_HI12 = 118 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC64_HI12 R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC64_LO20 = 117 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC64_LO20 R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC64_PC_HI12 = 114 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC64_PC_HI12 R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC64_PC_LO20 = 113 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC64_PC_LO20 R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC_CALL = 120 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC_CALL R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC_HI20 = 115 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC_HI20 R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC_LD = 119 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC_LD R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC_LO12 = 116 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC_LO12 R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC_PCREL20_S2 = 126 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC_PCREL20_S2 R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC_PC_HI20 = 111 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC_PC_HI20 R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC_PC_LO12 = 112 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC_PC_LO12 R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_GD_PCREL20_S2 = 125 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_GD_PCREL20_S2 R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_LD_PCREL20_S2 = 124 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_LD_PCREL20_S2 R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_LE_ADD_R = 122 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_LE_ADD_R R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_LE_HI20_R = 121 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_LE_HI20_R R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_LE_LO12_R = 123 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_LE_LO12_R R_LARCH #75562
|
||||
pkg errors, func AsType[$0 error](error) ($0, bool) #51945
|
||||
pkg go/ast, func ParseDirective(token.Pos, string) (Directive, bool) #68021
|
||||
pkg go/ast, method (*Directive) End() token.Pos #68021
|
||||
pkg go/ast, method (*Directive) ParseArgs() ([]DirectiveArg, error) #68021
|
||||
pkg go/ast, method (*Directive) Pos() token.Pos #68021
|
||||
pkg go/ast, type BasicLit struct, ValueEnd token.Pos #76031
|
||||
pkg go/ast, type Directive struct #68021
|
||||
pkg go/ast, type Directive struct, Args string #68021
|
||||
pkg go/ast, type Directive struct, ArgsPos token.Pos #68021
|
||||
pkg go/ast, type Directive struct, Name string #68021
|
||||
pkg go/ast, type Directive struct, Slash token.Pos #68021
|
||||
pkg go/ast, type Directive struct, Tool string #68021
|
||||
pkg go/ast, type DirectiveArg struct #68021
|
||||
pkg go/ast, type DirectiveArg struct, Arg string #68021
|
||||
pkg go/ast, type DirectiveArg struct, Pos token.Pos #68021
|
||||
pkg go/token, method (*File) End() Pos #75849
|
||||
pkg log/slog, func NewMultiHandler(...Handler) *MultiHandler #65954
|
||||
pkg log/slog, method (*MultiHandler) Enabled(context.Context, Level) bool #65954
|
||||
pkg log/slog, method (*MultiHandler) Handle(context.Context, Record) error #65954
|
||||
pkg log/slog, method (*MultiHandler) WithAttrs([]Attr) Handler #65954
|
||||
pkg log/slog, method (*MultiHandler) WithGroup(string) Handler #65954
|
||||
pkg log/slog, type MultiHandler struct #65954
|
||||
pkg net, method (*Dialer) DialIP(context.Context, string, netip.Addr, netip.Addr) (*IPConn, error) #49097
|
||||
pkg net, method (*Dialer) DialTCP(context.Context, string, netip.AddrPort, netip.AddrPort) (*TCPConn, error) #49097
|
||||
pkg net, method (*Dialer) DialUDP(context.Context, string, netip.AddrPort, netip.AddrPort) (*UDPConn, error) #49097
|
||||
pkg net, method (*Dialer) DialUnix(context.Context, string, *UnixAddr, *UnixAddr) (*UnixConn, error) #49097
|
||||
pkg net/http, method (*ClientConn) Available() int #75772
|
||||
pkg net/http, method (*ClientConn) Close() error #75772
|
||||
pkg net/http, method (*ClientConn) Err() error #75772
|
||||
pkg net/http, method (*ClientConn) InFlight() int #75772
|
||||
pkg net/http, method (*ClientConn) Release() #75772
|
||||
pkg net/http, method (*ClientConn) Reserve() error #75772
|
||||
pkg net/http, method (*ClientConn) RoundTrip(*Request) (*Response, error) #75772
|
||||
pkg net/http, method (*ClientConn) SetStateHook(func(*ClientConn)) #75772
|
||||
pkg net/http, method (*Transport) NewClientConn(context.Context, string, string) (*ClientConn, error) #75772
|
||||
pkg net/http, type ClientConn struct #75772
|
||||
pkg net/http, type HTTP2Config struct, StrictMaxConcurrentRequests bool #67813
|
||||
pkg net/http/httputil, type ReverseProxy struct, Director //deprecated #73161
|
||||
pkg net/netip, method (Prefix) Compare(Prefix) int #61642
|
||||
pkg os, method (*Process) WithHandle(func(uintptr)) error #70352
|
||||
pkg os, var ErrNoHandle error #70352
|
||||
pkg reflect, method (Value) Fields() iter.Seq2[StructField, Value] #66631
|
||||
pkg reflect, method (Value) Methods() iter.Seq2[Method, Value] #66631
|
||||
pkg reflect, type Type interface, Fields() iter.Seq[StructField] #66631
|
||||
pkg reflect, type Type interface, Ins() iter.Seq[Type] #66631
|
||||
pkg reflect, type Type interface, Methods() iter.Seq[Method] #66631
|
||||
pkg reflect, type Type interface, Outs() iter.Seq[Type] #66631
|
||||
pkg testing, method (*B) ArtifactDir() string #71287
|
||||
pkg testing, method (*F) ArtifactDir() string #71287
|
||||
pkg testing, method (*T) ArtifactDir() string #71287
|
||||
pkg testing, type TB interface, ArtifactDir() string #71287
|
||||
pkg testing/cryptotest, func SetGlobalRandom(*testing.T, uint64) #70942
|
||||
@@ -1 +0,0 @@
|
||||
pkg go/scanner, method (*Scanner) End() token.Pos #74958
|
||||
@@ -1 +0,0 @@
|
||||
pkg net/http, type Server struct, DisableClientPriority bool #75500
|
||||
@@ -1 +0,0 @@
|
||||
pkg testing/synctest, func Sleep(time.Duration) #77169
|
||||
@@ -1,16 +0,0 @@
|
||||
pkg unicode, const Version = "17.0.0" #77266
|
||||
pkg unicode, var Beria_Erfe *RangeTable #77266
|
||||
pkg unicode, var Garay *RangeTable #77266
|
||||
pkg unicode, var Gurung_Khema *RangeTable #77266
|
||||
pkg unicode, var IDS_Unary_Operator *RangeTable #77266
|
||||
pkg unicode, var ID_Compat_Math_Continue *RangeTable #77266
|
||||
pkg unicode, var ID_Compat_Math_Start *RangeTable #77266
|
||||
pkg unicode, var Kirat_Rai *RangeTable #77266
|
||||
pkg unicode, var Modifier_Combining_Mark *RangeTable #77266
|
||||
pkg unicode, var Ol_Onal *RangeTable #77266
|
||||
pkg unicode, var Sidetic *RangeTable #77266
|
||||
pkg unicode, var Sunuwar *RangeTable #77266
|
||||
pkg unicode, var Tai_Yo *RangeTable #77266
|
||||
pkg unicode, var Todhri *RangeTable #77266
|
||||
pkg unicode, var Tolong_Siki *RangeTable #77266
|
||||
pkg unicode, var Tulu_Tigalari *RangeTable #77266
|
||||
@@ -1 +1,2 @@
|
||||
branch: master
|
||||
branch: release-branch.go1.22
|
||||
parent-branch: master
|
||||
|
||||
@@ -1,75 +0,0 @@
|
||||
# Release Notes
|
||||
|
||||
The `initial` and `next` subdirectories of this directory are for release notes.
|
||||
|
||||
## For developers
|
||||
|
||||
Release notes should be added to `next` by editing existing files or creating
|
||||
new files. **Do not add RELNOTE=yes comments in CLs.** Instead, add a file to
|
||||
the CL (or ask the author to do so).
|
||||
|
||||
At the end of the development cycle, the files will be merged by being
|
||||
concatenated in sorted order by pathname. Files in the directory matching the
|
||||
glob "*stdlib/*minor" are treated specially. They should be in subdirectories
|
||||
corresponding to standard library package paths, and headings for those package
|
||||
paths will be generated automatically.
|
||||
|
||||
Files in this repo's `api/next` directory must have corresponding files in
|
||||
`doc/next/*stdlib/*minor`.
|
||||
The files should be in the subdirectory for the package with the new
|
||||
API, and should be named after the issue number of the API proposal.
|
||||
For example, if the directory `6-stdlib/99-minor` is present,
|
||||
then an `api/next` file with the line
|
||||
|
||||
pkg net/http, function F #12345
|
||||
|
||||
should have a corresponding file named `doc/next/6-stdlib/99-minor/net/http/12345.md`.
|
||||
At a minimum, that file should contain either a full sentence or a TODO,
|
||||
ideally referring to a person with the responsibility to complete the note.
|
||||
|
||||
If your CL addresses an accepted proposal, mention the proposal issue number in
|
||||
your release note in the form `/issue/NUMBER`. A link to the issue in the text
|
||||
will have this form (see below). If you don't want to mention the issue in the
|
||||
text, add it as a comment:
|
||||
```
|
||||
<!-- go.dev/issue/12345 -->
|
||||
```
|
||||
If an accepted proposal is mentioned in a CL but not in the release notes, it will be
|
||||
flagged as a TODO by the automated tooling. That is true even for proposals that add API.
|
||||
|
||||
Use the following forms in your markdown:
|
||||
|
||||
[http.Request] # symbol documentation; auto-linked as in Go doc strings
|
||||
[Request] # short form, for symbols in the package being documented
|
||||
[net/http] # package link
|
||||
[#12345](/issue/12345) # GitHub issues
|
||||
[CL 6789](/cl/6789) # Gerrit changelists
|
||||
|
||||
To preview `next` content in merged form using a local instance of the website, run:
|
||||
|
||||
```
|
||||
go run golang.org/x/website/cmd/golangorg@latest -goroot=..
|
||||
```
|
||||
|
||||
Then open http://localhost:6060/doc/next. Refresh the page to see your latest edits.
|
||||
|
||||
## For the release team
|
||||
|
||||
The `relnote` tool, at `golang.org/x/build/cmd/relnote`, operates on the files
|
||||
in `doc/next`.
|
||||
|
||||
As a release cycle nears completion, run `relnote todo` to get a list of
|
||||
unfinished release note work.
|
||||
|
||||
To prepare the release notes for a release, run `relnote generate`.
|
||||
That will merge the `.md` files in `next` into a single file.
|
||||
Atomically (as close to it as possible) add that file to `_content/doc` directory
|
||||
of the website repository and remove the `doc/next` directory in this repository.
|
||||
|
||||
To begin the next release development cycle, populate the contents of `next`
|
||||
with those of `initial`. From the repo root:
|
||||
|
||||
> cd doc
|
||||
> cp -R initial/ next
|
||||
|
||||
Then edit `next/1-intro.md` to refer to the next version.
|
||||
@@ -1039,12 +1039,6 @@ The value of <code>GOMIPS64</code> environment variable (<code>hardfloat</code>
|
||||
<code>GOMIPS64_hardfloat</code> or <code>GOMIPS64_softfloat</code>.
|
||||
</p>
|
||||
|
||||
<h3 id="riscv64">RISCV64</h3>
|
||||
|
||||
<p>
|
||||
Reference: <a href="/pkg/cmd/internal/obj/riscv">Go RISCV64 Assembly Instructions Reference Manual</a>
|
||||
</p>
|
||||
|
||||
<h3 id="unsupported_opcodes">Unsupported opcodes</h3>
|
||||
|
||||
<p>
|
||||
|
||||
6864
doc/go1.17_spec.html
Normal file
6864
doc/go1.17_spec.html
Normal file
File diff suppressed because it is too large
Load Diff
@@ -82,7 +82,7 @@ while still insisting that races are errors and that tools can diagnose and repo
|
||||
<p>
|
||||
The following formal definition of Go's memory model closely follows
|
||||
the approach presented by Hans-J. Boehm and Sarita V. Adve in
|
||||
“<a href="https://dl.acm.org/doi/10.1145/1375581.1375591">Foundations of the C++ Concurrency Memory Model</a>”,
|
||||
“<a href="https://www.hpl.hp.com/techreports/2008/HPL-2008-56.pdf">Foundations of the C++ Concurrency Memory Model</a>”,
|
||||
published in PLDI 2008.
|
||||
The definition of data-race-free programs and the guarantee of sequential consistency
|
||||
for race-free programs are equivalent to the ones in that work.
|
||||
@@ -98,12 +98,12 @@ which in turn are made up of memory operations.
|
||||
A <i>memory operation</i> is modeled by four details:
|
||||
</p>
|
||||
<ul>
|
||||
<li>its kind, indicating whether it is an ordinary data read, an ordinary data write,
|
||||
or a <i>synchronizing operation</i> such as an atomic data access,
|
||||
a mutex operation, or a channel operation,</li>
|
||||
<li>its location in the program,</li>
|
||||
<li>the memory location or variable being accessed, and</li>
|
||||
<li>the values read or written by the operation.</li>
|
||||
<li>its kind, indicating whether it is an ordinary data read, an ordinary data write,
|
||||
or a <i>synchronizing operation</i> such as an atomic data access,
|
||||
a mutex operation, or a channel operation,
|
||||
<li>its location in the program,
|
||||
<li>the memory location or variable being accessed, and
|
||||
<li>the values read or written by the operation.
|
||||
</ul>
|
||||
<p>
|
||||
Some memory operations are <i>read-like</i>, including read, atomic read, mutex lock, and channel receive.
|
||||
@@ -162,8 +162,8 @@ where visible means that both of the following hold:
|
||||
</p>
|
||||
|
||||
<ol>
|
||||
<li><i>w</i> happens before <i>r</i>.</li>
|
||||
<li><i>w</i> does not happen before any other write <i>w'</i> (to <i>x</i>) that happens before <i>r</i>.</li>
|
||||
<li><i>w</i> happens before <i>r</i>.
|
||||
<li><i>w</i> does not happen before any other write <i>w'</i> (to <i>x</i>) that happens before <i>r</i>.
|
||||
</ol>
|
||||
|
||||
<p>
|
||||
@@ -231,7 +231,7 @@ do exactly this.
|
||||
|
||||
<p>
|
||||
A read of an array, struct, or complex number
|
||||
may be implemented as a read of each individual sub-value
|
||||
may by implemented as a read of each individual sub-value
|
||||
(array element, struct field, or real/imaginary component),
|
||||
in any order.
|
||||
Similarly, a write of an array, struct, or complex number
|
||||
@@ -453,7 +453,7 @@ crash, or do something else.)
|
||||
</p>
|
||||
|
||||
<p class="rule">
|
||||
The <i>k</i>th receive from a channel with capacity <i>C</i> is synchronized before the completion of the <i>k</i>+<i>C</i>th send on that channel.
|
||||
The <i>k</i>th receive on a channel with capacity <i>C</i> is synchronized before the completion of the <i>k</i>+<i>C</i>th send from that channel completes.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
|
||||
888
doc/go_spec.html
888
doc/go_spec.html
File diff suppressed because it is too large
Load Diff
268
doc/godebug.md
268
doc/godebug.md
@@ -34,7 +34,6 @@ For example, if a Go program is running in an environment that contains
|
||||
|
||||
then that Go program will disable the use of HTTP/2 by default in both
|
||||
the HTTP client and the HTTP server.
|
||||
Unrecognized settings in the `GODEBUG` environment variable are ignored.
|
||||
It is also possible to set the default `GODEBUG` for a given program
|
||||
(discussed below).
|
||||
|
||||
@@ -89,40 +88,14 @@ Because this method of setting GODEBUG defaults was introduced only in Go 1.21,
|
||||
programs listing versions of Go earlier than Go 1.20 are configured to match Go 1.20,
|
||||
not the older version.
|
||||
|
||||
To override these defaults, starting in Go 1.23, the work module's `go.mod`
|
||||
or the workspace's `go.work` can list one or more `godebug` lines:
|
||||
|
||||
godebug (
|
||||
default=go1.21
|
||||
panicnil=1
|
||||
asynctimerchan=0
|
||||
)
|
||||
|
||||
The special key `default` indicates a Go version to take unspecified
|
||||
settings from. This allows setting the GODEBUG defaults separately
|
||||
from the Go language version in the module.
|
||||
In this example, the program is asking for Go 1.21 semantics and
|
||||
then asking for the old pre-Go 1.21 `panic(nil)` behavior and the
|
||||
new Go 1.23 `asynctimerchan=0` behavior.
|
||||
|
||||
Only the work module's `go.mod` is consulted for `godebug` directives.
|
||||
Any directives in required dependency modules are ignored.
|
||||
It is an error to list a `godebug` with an unrecognized setting.
|
||||
(Toolchains older than Go 1.23 reject all `godebug` lines, since they do not
|
||||
understand `godebug` at all.) When a workspace is in use, `godebug`
|
||||
directives in `go.mod` files are ignored, and `go.work` will be consulted
|
||||
for `godebug` directives instead.
|
||||
|
||||
The defaults from the `go` and `godebug` lines apply to all main
|
||||
packages that are built. For more fine-grained control,
|
||||
starting in Go 1.21, a main package's source files
|
||||
To override these defaults, a main package's source files
|
||||
can include one or more `//go:debug` directives at the top of the file
|
||||
(preceding the `package` statement).
|
||||
The `godebug` lines in the previous example would be written:
|
||||
Continuing the `panicnil` example, if the module or workspace is updated
|
||||
to say `go` `1.21`, the program can opt back into the old `panic(nil)`
|
||||
behavior by including this directive:
|
||||
|
||||
//go:debug default=go1.21
|
||||
//go:debug panicnil=1
|
||||
//go:debug asynctimerchan=0
|
||||
|
||||
Starting in Go 1.21, the Go toolchain treats a `//go:debug` directive
|
||||
with an unrecognized GODEBUG setting as an invalid program.
|
||||
@@ -153,222 +126,6 @@ for example,
|
||||
see the [runtime documentation](/pkg/runtime#hdr-Environment_Variables)
|
||||
and the [go command documentation](/cmd/go#hdr-Build_and_test_caching).
|
||||
|
||||
### Go 1.26
|
||||
|
||||
Go 1.26 added a new `httpcookiemaxnum` setting that controls the maximum number
|
||||
of cookies that net/http will accept when parsing HTTP headers. If the number of
|
||||
cookie in a header exceeds the number set in `httpcookiemaxnum`, cookie parsing
|
||||
will fail early. The default value is `httpcookiemaxnum=3000`. Setting
|
||||
`httpcookiemaxnum=0` will allow the cookie parsing to accept an indefinite
|
||||
number of cookies. To avoid denial of service attacks, this setting and default
|
||||
was backported to Go 1.25.2 and Go 1.24.8.
|
||||
|
||||
Go 1.26 added a new `urlmaxqueryparams` setting that controls the maximum number
|
||||
of query parameters that net/url will accept when parsing a URL-encoded query string.
|
||||
If the number of parameters exceeds the number set in `urlmaxqueryparams`,
|
||||
parsing will fail early. The default value is `urlmaxqueryparams=10000`.
|
||||
Setting `urlmaxqueryparams=0` disables the limit. To avoid denial of service
|
||||
attacks, this setting and default was backported to Go 1.25.6 and Go 1.24.12.
|
||||
|
||||
Go 1.26 added a new `urlstrictcolons` setting that controls whether `net/url.Parse`
|
||||
allows malformed hostnames containing colons outside of a bracketed IPv6 address.
|
||||
The default `urlstrictcolons=1` rejects URLs such as `http://localhost:1:2` or `http://::1/`.
|
||||
Colons are permitted as part of a bracketed IPv6 address, such as `http://[::1]/`.
|
||||
|
||||
Go 1.26 enabled two additional post-quantum key exchange mechanisms:
|
||||
SecP256r1MLKEM768 and SecP384r1MLKEM1024. The default can be reverted using the
|
||||
[`tlssecpmlkem` setting](/pkg/crypto/tls/#Config.CurvePreferences).
|
||||
|
||||
Go 1.26 added a new `tracebacklabels` setting that controls the inclusion of
|
||||
goroutine labels set through the the `runtime/pprof` package. Setting `tracebacklabels=1`
|
||||
includes these key/value pairs in the goroutine status header of runtime
|
||||
tracebacks and debug=2 runtime/pprof stack dumps. This format may change in the future.
|
||||
(see go.dev/issue/76349)
|
||||
|
||||
Go 1.26 added a new `cryptocustomrand` setting that controls whether most crypto/...
|
||||
APIs ignore the random `io.Reader` parameter. For Go 1.26, it defaults
|
||||
to `cryptocustomrand=0`, ignoring the random parameters. Using `cryptocustomrand=1`
|
||||
reverts to the pre-Go 1.26 behavior.
|
||||
|
||||
### Go 1.25
|
||||
|
||||
Go 1.25 added a new `decoratemappings` setting that controls whether the Go
|
||||
runtime annotates OS anonymous memory mappings with context about their
|
||||
purpose. These annotations appear in /proc/self/maps and /proc/self/smaps as
|
||||
"[anon: Go: ...]". This setting is only used on Linux. For Go 1.25, it defaults
|
||||
to `decoratemappings=1`, enabling annotations. Using `decoratemappings=0`
|
||||
reverts to the pre-Go 1.25 behavior. This setting is fixed at program startup
|
||||
time, and can't be modified by changing the `GODEBUG` environment variable
|
||||
after the program starts.
|
||||
|
||||
Go 1.25 added a new `embedfollowsymlinks` setting that controls whether the
|
||||
Go command will follow symlinks to regular files embedding files.
|
||||
The default value `embedfollowsymlinks=0` does not allow following
|
||||
symlinks. `embedfollowsymlinks=1` will allow following symlinks.
|
||||
|
||||
Go 1.25 added a new `containermaxprocs` setting that controls whether the Go
|
||||
runtime will consider cgroup CPU limits when setting the default GOMAXPROCS.
|
||||
The default value `containermaxprocs=1` will use cgroup limits in addition to
|
||||
the total logical CPU count and CPU affinity. `containermaxprocs=0` will
|
||||
disable consideration of cgroup limits. This setting only affects Linux.
|
||||
|
||||
Go 1.25 added a new `updatemaxprocs` setting that controls whether the Go
|
||||
runtime will periodically update GOMAXPROCS for new CPU affinity or cgroup
|
||||
limits. The default value `updatemaxprocs=1` will enable periodic updates.
|
||||
`updatemaxprocs=0` will disable periodic updates.
|
||||
|
||||
Go 1.25 disabled SHA-1 signature algorithms in TLS 1.2 according to RFC 9155.
|
||||
The default can be reverted using the `tlssha1=1` setting.
|
||||
|
||||
Go 1.25 switched to SHA-256 to fill in missing SubjectKeyId in
|
||||
crypto/x509.CreateCertificate. The setting `x509sha256skid=0` reverts to SHA-1.
|
||||
|
||||
Go 1.25 corrected the semantics of contention reports for runtime-internal locks,
|
||||
and so removed the [`runtimecontentionstacks` setting](/pkg/runtime#hdr-Environment_Variables).
|
||||
|
||||
Go 1.25 (starting with Go 1.25 RC 2) disabled build information stamping when
|
||||
multiple VCS are detected due to concerns around VCS injection attacks. This
|
||||
behavior and setting was backported to Go 1.24.5 and Go 1.23.11. This behavior
|
||||
can be renabled with the setting `allowmultiplevcs=1`.
|
||||
|
||||
### Go 1.24
|
||||
|
||||
Go 1.24 added a new `fips140` setting that controls whether the Go
|
||||
Cryptographic Module operates in FIPS 140-3 mode.
|
||||
The possible values are:
|
||||
- "off": no special support for FIPS 140-3 mode. This is the default.
|
||||
- "on": the Go Cryptographic Module operates in FIPS 140-3 mode.
|
||||
- "only": like "on", but cryptographic algorithms not approved by
|
||||
FIPS 140-3 return an error or panic.
|
||||
For more information, see [FIPS 140-3 Compliance](/doc/security/fips140).
|
||||
This setting is fixed at program startup time, and can't be modified
|
||||
by changing the `GODEBUG` environment variable after the program starts.
|
||||
|
||||
Go 1.24 changed the global [`math/rand.Seed`](/pkg/math/rand/#Seed) to be a
|
||||
no-op. This behavior is controlled by the `randseednop` setting.
|
||||
For Go 1.24 it defaults to `randseednop=1`.
|
||||
Using `randseednop=0` reverts to the pre-Go 1.24 behavior.
|
||||
|
||||
Go 1.24 added new values for the `multipathtcp` setting.
|
||||
The possible values for `multipathtcp` are now:
|
||||
- "0": disable MPTCP on dialers and listeners by default
|
||||
- "1": enable MPTCP on dialers and listeners by default
|
||||
- "2": enable MPTCP on listeners only by default
|
||||
- "3": enable MPTCP on dialers only by default
|
||||
|
||||
For Go 1.24, it now defaults to multipathtcp="2", thus
|
||||
enabled by default on listeners. Using multipathtcp="0" reverts to the
|
||||
pre-Go 1.24 behavior.
|
||||
|
||||
Go 1.24 changed the behavior of `go test -json` to emit build errors as JSON
|
||||
instead of text.
|
||||
These new JSON events are distinguished by new `Action` values,
|
||||
but can still cause problems with CI systems that aren't robust to these events.
|
||||
This behavior can be controlled with the `gotestjsonbuildtext` setting.
|
||||
Using `gotestjsonbuildtext=1` restores the 1.23 behavior.
|
||||
This setting will be removed in a future release, Go 1.28 at the earliest.
|
||||
|
||||
Go 1.24 changed [`crypto/rsa`](/pkg/crypto/rsa) to require RSA keys to be at
|
||||
least 1024 bits. This behavior can be controlled with the `rsa1024min` setting.
|
||||
Using `rsa1024min=0` restores the Go 1.23 behavior.
|
||||
|
||||
Go 1.24 introduced a mechanism for enabling platform specific Data Independent
|
||||
Timing (DIT) modes in the [`crypto/subtle`](/pkg/crypto/subtle) package. This
|
||||
mode can be enabled for an entire program with the `dataindependenttiming` setting.
|
||||
For Go 1.24 it defaults to `dataindependenttiming=0`. There is no change in default
|
||||
behavior from Go 1.23 when `dataindependenttiming` is unset.
|
||||
Using `dataindependenttiming=1` enables the DIT mode for the entire Go program.
|
||||
When enabled, DIT will be enabled when calling into C from Go. When enabled,
|
||||
calling into Go code from C will enable DIT, and disable it before returning to
|
||||
C if it was not enabled when Go code was entered.
|
||||
This currently only affects arm64 programs. For all other platforms it is a no-op.
|
||||
|
||||
Go 1.24 removed the `x509sha1` setting. `crypto/x509` no longer supports verifying
|
||||
signatures on certificates that use SHA-1 based signature algorithms.
|
||||
|
||||
Go 1.24 changes the default value of the [`x509usepolicies`
|
||||
setting.](/pkg/crypto/x509/#CreateCertificate) from `0` to `1`. When marshalling
|
||||
certificates, policies are now taken from the
|
||||
[`Certificate.Policies`](/pkg/crypto/x509/#Certificate.Policies) field rather
|
||||
than the
|
||||
[`Certificate.PolicyIdentifiers`](/pkg/crypto/x509/#Certificate.PolicyIdentifiers)
|
||||
field by default.
|
||||
|
||||
Go 1.24 enabled the post-quantum key exchange mechanism
|
||||
X25519MLKEM768 by default. The default can be reverted using the
|
||||
[`tlsmlkem` setting](/pkg/crypto/tls/#Config.CurvePreferences).
|
||||
This can be useful when dealing with buggy TLS servers that do not handle large records correctly,
|
||||
causing a timeout during the handshake (see [TLS post-quantum TL;DR fail](https://tldr.fail/)).
|
||||
Go 1.24 also removed X25519Kyber768Draft00 and the Go 1.23 `tlskyber` setting.
|
||||
|
||||
Go 1.24 made [`ParsePKCS1PrivateKey`](/pkg/crypto/x509/#ParsePKCS1PrivateKey)
|
||||
use and validate the CRT parameters in the encoded private key. This behavior
|
||||
can be controlled with the `x509rsacrt` setting. Using `x509rsacrt=0` restores
|
||||
the Go 1.23 behavior.
|
||||
|
||||
### Go 1.23
|
||||
|
||||
Go 1.23 changed the channels created by package time to be unbuffered
|
||||
(synchronous), which makes correct use of the [`Timer.Stop`](/pkg/time/#Timer.Stop)
|
||||
and [`Timer.Reset`](/pkg/time/#Timer.Reset) method results much easier.
|
||||
The [`asynctimerchan` setting](/pkg/time/#NewTimer) disables this change.
|
||||
There are no runtime metrics for this change,
|
||||
This setting will be removed in Go 1.27.
|
||||
|
||||
Go 1.23 changed the mode bits reported by [`os.Lstat`](/pkg/os#Lstat) and [`os.Stat`](/pkg/os#Stat)
|
||||
for reparse points, which can be controlled with the `winsymlink` setting.
|
||||
As of Go 1.23 (`winsymlink=1`), mount points no longer have [`os.ModeSymlink`](/pkg/os#ModeSymlink)
|
||||
set, and reparse points that are not symlinks, Unix sockets, or dedup files now
|
||||
always have [`os.ModeIrregular`](/pkg/os#ModeIrregular) set. As a result of these changes,
|
||||
[`filepath.EvalSymlinks`](/pkg/path/filepath#EvalSymlinks) no longer evaluates
|
||||
mount points, which was a source of many inconsistencies and bugs.
|
||||
At previous versions (`winsymlink=0`), mount points are treated as symlinks,
|
||||
and other reparse points with non-default [`os.ModeType`](/pkg/os#ModeType) bits
|
||||
(such as [`os.ModeDir`](/pkg/os#ModeDir)) do not have the `ModeIrregular` bit set.
|
||||
|
||||
Go 1.23 changed [`os.Readlink`](/pkg/os#Readlink) and [`filepath.EvalSymlinks`](/pkg/path/filepath#EvalSymlinks)
|
||||
to avoid trying to normalize volumes to drive letters, which was not always even possible.
|
||||
This behavior is controlled by the `winreadlinkvolume` setting.
|
||||
For Go 1.23, it defaults to `winreadlinkvolume=1`.
|
||||
Previous versions default to `winreadlinkvolume=0`.
|
||||
|
||||
Go 1.23 enabled the experimental post-quantum key exchange mechanism
|
||||
X25519Kyber768Draft00 by default. The default can be reverted using the
|
||||
[`tlskyber` setting](/pkg/crypto/tls/#Config.CurvePreferences).
|
||||
This can be useful when dealing with buggy TLS servers that do not handle large records correctly,
|
||||
causing a timeout during the handshake (see [TLS post-quantum TL;DR fail](https://tldr.fail/)).
|
||||
|
||||
Go 1.23 changed the behavior of
|
||||
[crypto/x509.ParseCertificate](/pkg/crypto/x509/#ParseCertificate) to reject
|
||||
serial numbers that are negative. This change can be reverted with
|
||||
the [`x509negativeserial` setting](/pkg/crypto/x509/#ParseCertificate).
|
||||
|
||||
Go 1.23 re-enabled support in html/template for ECMAScript 6 template literals by default.
|
||||
The [`jstmpllitinterp` setting](/pkg/html/template#hdr-Security_Model) no longer has
|
||||
any effect.
|
||||
|
||||
Go 1.23 changed the default TLS cipher suites used by clients and servers when
|
||||
not explicitly configured, removing 3DES cipher suites. The default can be reverted
|
||||
using the [`tls3des` setting](/pkg/crypto/tls/#Config.CipherSuites).
|
||||
This setting will be removed in Go 1.27.
|
||||
|
||||
Go 1.23 changed the behavior of [`tls.X509KeyPair`](/pkg/crypto/tls#X509KeyPair)
|
||||
and [`tls.LoadX509KeyPair`](/pkg/crypto/tls#LoadX509KeyPair) to populate the
|
||||
Leaf field of the returned [`tls.Certificate`](/pkg/crypto/tls#Certificate).
|
||||
This behavior is controlled by the `x509keypairleaf` setting. For Go 1.23, it
|
||||
defaults to `x509keypairleaf=1`. Previous versions default to
|
||||
`x509keypairleaf=0`.
|
||||
This setting will be removed in Go 1.27.
|
||||
|
||||
Go 1.23 changed
|
||||
[`net/http.ServeContent`](/pkg/net/http#ServeContent),
|
||||
[`net/http.ServeFile`](/pkg/net/http#ServeFile), and
|
||||
[`net/http.ServeFS`](/pkg/net/http#ServeFS) to
|
||||
remove Cache-Control, Content-Encoding, Etag, and Last-Modified headers
|
||||
when serving an error. This behavior is controlled by
|
||||
the [`httpservecontentkeepheaders` setting](/pkg/net/http#ServeContent).
|
||||
Using `httpservecontentkeepheaders=1` restores the pre-Go 1.23 behavior.
|
||||
|
||||
### Go 1.22
|
||||
|
||||
Go 1.22 adds a configurable limit to control the maximum acceptable RSA key size
|
||||
@@ -391,32 +148,29 @@ for the explicit representation of [type aliases](/ref/spec#Type_declarations).
|
||||
Whether the type checker produces `Alias` types or not is controlled by the
|
||||
[`gotypesalias` setting](/pkg/go/types#Alias).
|
||||
For Go 1.22 it defaults to `gotypesalias=0`.
|
||||
For Go 1.23, `gotypesalias=1` will become the default.
|
||||
This setting will be removed in Go 1.27.
|
||||
For Go 1.23, `gotypealias=1` will become the default.
|
||||
This setting will be removed in a future release, Go 1.24 at the earliest.
|
||||
|
||||
Go 1.22 changed the default minimum TLS version supported by both servers
|
||||
and clients to TLS 1.2. The default can be reverted to TLS 1.0 using the
|
||||
[`tls10server` setting](/pkg/crypto/tls/#Config).
|
||||
This setting will be removed in Go 1.27.
|
||||
|
||||
Go 1.22 changed the default TLS cipher suites used by clients and servers when
|
||||
not explicitly configured, removing the cipher suites which used RSA based key
|
||||
exchange. The default can be reverted using the [`tlsrsakex` setting](/pkg/crypto/tls/#Config).
|
||||
This setting will be removed in Go 1.27.
|
||||
exchange. The default can be revert using the [`tlsrsakex` setting](/pkg/crypto/tls/#Config).
|
||||
|
||||
Go 1.22 disabled
|
||||
[`ConnectionState.ExportKeyingMaterial`](/pkg/crypto/tls/#ConnectionState.ExportKeyingMaterial)
|
||||
when the connection supports neither TLS 1.3 nor Extended Master Secret
|
||||
(implemented in Go 1.21). It can be reenabled with the [`tlsunsafeekm`
|
||||
setting](/pkg/crypto/tls/#ConnectionState.ExportKeyingMaterial).
|
||||
This setting will be removed in Go 1.27.
|
||||
|
||||
Go 1.22 changed how the runtime interacts with transparent huge pages on Linux.
|
||||
In particular, a common default Linux kernel configuration can result in
|
||||
significant memory overheads, and Go 1.22 no longer works around this default.
|
||||
To work around this issue without adjusting kernel settings, transparent huge
|
||||
pages can be disabled for Go memory with the
|
||||
[`disablethp` setting](/pkg/runtime#hdr-Environment_Variables).
|
||||
[`disablethp` setting](/pkg/runtime#hdr-Environment_Variable).
|
||||
This behavior was backported to Go 1.21.1, but the setting is only available
|
||||
starting with Go 1.21.6.
|
||||
This setting may be removed in a future release, and users impacted by this issue
|
||||
@@ -428,7 +182,7 @@ Go 1.22 added contention on runtime-internal locks to the [`mutex`
|
||||
profile](/pkg/runtime/pprof#Profile). Contention on these locks is always
|
||||
reported at `runtime._LostContendedRuntimeLock`. Complete stack traces of
|
||||
runtime locks can be enabled with the [`runtimecontentionstacks`
|
||||
setting](/pkg/runtime#hdr-Environment_Variables). These stack traces have
|
||||
setting](/pkg/runtime#hdr-Environment_Variable). These stack traces have
|
||||
non-standard semantics, see setting documentation for details.
|
||||
|
||||
Go 1.22 added a new [`crypto/x509.Certificate`](/pkg/crypto/x509/#Certificate)
|
||||
@@ -437,7 +191,7 @@ certificate policy OIDs with components larger than 31 bits. By default this
|
||||
field is only used during parsing, when it is populated with policy OIDs, but
|
||||
not used during marshaling. It can be used to marshal these larger OIDs, instead
|
||||
of the existing PolicyIdentifiers field, by using the
|
||||
[`x509usepolicies` setting](/pkg/crypto/x509/#CreateCertificate).
|
||||
[`x509usepolicies` setting.](/pkg/crypto/x509/#CreateCertificate).
|
||||
|
||||
|
||||
### Go 1.21
|
||||
@@ -505,7 +259,7 @@ There is no plan to remove this setting.
|
||||
|
||||
Go 1.18 removed support for SHA1 in most X.509 certificates,
|
||||
controlled by the [`x509sha1` setting](/pkg/crypto/x509#InsecureAlgorithmError).
|
||||
This setting was removed in Go 1.24.
|
||||
This setting will be removed in a future release, Go 1.22 at the earliest.
|
||||
|
||||
### Go 1.10
|
||||
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
<style>
|
||||
main ul li { margin: 0.5em 0; }
|
||||
</style>
|
||||
|
||||
## DRAFT RELEASE NOTES — Introduction to Go 1.N {#introduction}
|
||||
|
||||
**Go 1.N is not yet released. These are work-in-progress release notes.
|
||||
Go 1.N is expected to be released in {Month} {Year}.**
|
||||
@@ -1,3 +0,0 @@
|
||||
## Changes to the language {#language}
|
||||
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
## Tools {#tools}
|
||||
|
||||
### Go command {#go-command}
|
||||
|
||||
### Cgo {#cgo}
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
## Runtime {#runtime}
|
||||
@@ -1,7 +0,0 @@
|
||||
## Compiler {#compiler}
|
||||
|
||||
## Assembler {#assembler}
|
||||
|
||||
## Linker {#linker}
|
||||
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
## Standard library {#library}
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
### Minor changes to the library {#minor_library_changes}
|
||||
@@ -1 +0,0 @@
|
||||
API changes and other small changes to the standard library go here.
|
||||
@@ -1,2 +0,0 @@
|
||||
## Ports {#ports}
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
<style>
|
||||
main ul li { margin: 0.5em 0; }
|
||||
</style>
|
||||
|
||||
## DRAFT RELEASE NOTES — Introduction to Go 1.27 {#introduction}
|
||||
|
||||
**Go 1.27 is not yet released. These are work-in-progress release notes.
|
||||
Go 1.27 is expected to be released in August 2026.**
|
||||
@@ -1,3 +0,0 @@
|
||||
## Changes to the language {#language}
|
||||
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
## Tools {#tools}
|
||||
|
||||
### Go command {#go-command}
|
||||
|
||||
### Cgo {#cgo}
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
## Runtime {#runtime}
|
||||
@@ -1,7 +0,0 @@
|
||||
## Compiler {#compiler}
|
||||
|
||||
## Assembler {#assembler}
|
||||
|
||||
## Linker {#linker}
|
||||
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
## Standard library {#library}
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
### Minor changes to the library {#minor_library_changes}
|
||||
@@ -1 +0,0 @@
|
||||
API changes and other small changes to the standard library go here.
|
||||
@@ -1 +0,0 @@
|
||||
The scanner now allows retrieving the end position of a token via the new [Scanner.End] method.
|
||||
@@ -1,4 +0,0 @@
|
||||
HTTP/2 server now accepts client priority signals, as defined in RFC 9218,
|
||||
allowing it to prioritize serving HTTP/2 streams with higher priority. If the
|
||||
old behavior is preferred, where streams are served in a round-robin manner
|
||||
regardless of priority, [Server.DisableClientPriority] can be set to `true`.
|
||||
@@ -1 +0,0 @@
|
||||
The new [Sleep] helper function combines [time.Sleep] and [testing/synctest.Wait].
|
||||
@@ -1,4 +0,0 @@
|
||||
The unicode package and associated support throughout the system has been upgraded from Unicode 15 to Unicode 17.
|
||||
See the [Unicode 16.0.0](https://www.unicode.org/versions/Unicode16.0.0/) and
|
||||
[Unicode 17.0.0](https://www.unicode.org/versions/Unicode17.0.0/)
|
||||
release notes for information about the changes.
|
||||
@@ -1,8 +0,0 @@
|
||||
## Ports {#ports}
|
||||
|
||||
### Darwin {#darwin}
|
||||
|
||||
<!-- go.dev/issue/75836 -->
|
||||
As [announced](go1.26#darwin) in the Go 1.26 release notes,
|
||||
Go 1.27 requires macOS 13 Ventura or later;
|
||||
support for previous versions has been discontinued.
|
||||
@@ -1,46 +0,0 @@
|
||||
# Copyright 2024 The Go Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file.
|
||||
|
||||
# Rules for building and testing new FIPS snapshots.
|
||||
# For example:
|
||||
#
|
||||
# make v1.2.3.zip
|
||||
# make v1.2.3.test
|
||||
#
|
||||
# and then if changes are needed, check them into master
|
||||
# and run 'make v1.2.3.rm' and repeat.
|
||||
#
|
||||
# Note that once published a snapshot zip file should never
|
||||
# be modified. We record the sha256 hashes of the zip files
|
||||
# in fips140.sum, and the cmd/go/internal/fips140 test checks
|
||||
# that the zips match.
|
||||
#
|
||||
# When the zip file is finalized, run 'make updatesum' to update
|
||||
# fips140.sum.
|
||||
|
||||
default:
|
||||
@echo nothing to make
|
||||
|
||||
# make v1.2.3.zip builds a v1.2.3.zip file
|
||||
# from the current origin/master.
|
||||
# copy and edit the 'go run' command by hand to use a different branch.
|
||||
v%.zip:
|
||||
git fetch origin master
|
||||
go run ../../src/cmd/go/internal/fips140/mkzip.go v$*
|
||||
|
||||
# normally mkzip refuses to overwrite an existing zip file.
|
||||
# make v1.2.3.rm removes the zip file and unpacked
|
||||
# copy from the module cache.
|
||||
v%.rm:
|
||||
rm -f v$*.zip
|
||||
chmod -R u+w $$(go env GOMODCACHE)/golang.org/fips140@v$* 2>/dev/null || true
|
||||
rm -rf $$(go env GOMODCACHE)/golang.org/fips140@v$*
|
||||
|
||||
# make v1.2.3.test runs the crypto tests using that snapshot.
|
||||
v%.test:
|
||||
GOFIPS140=v$* go test -short crypto...
|
||||
|
||||
# make updatesum updates the fips140.sum file.
|
||||
updatesum:
|
||||
go test cmd/go/internal/fips140 -update
|
||||
@@ -1,9 +0,0 @@
|
||||
This directory holds snapshots of the crypto/internal/fips140 tree
|
||||
that are being validated and certified for FIPS-140 use.
|
||||
The file x.txt (for example, inprocess.txt, certified.txt)
|
||||
defines the meaning of the FIPS version alias x, listing
|
||||
the exact version to use.
|
||||
|
||||
The zip files are created by cmd/go/internal/fips140/mkzip.go.
|
||||
The fips140.sum file lists checksums for the zip files.
|
||||
See the Makefile for recipes.
|
||||
@@ -1,13 +0,0 @@
|
||||
# SHA256 checksums of snapshot zip files in this directory.
|
||||
# These checksums are included in the FIPS security policy
|
||||
# (validation instructions sent to the lab) and MUST NOT CHANGE.
|
||||
# That is, the zip files themselves must not change.
|
||||
#
|
||||
# It is okay to add new zip files to the list, and it is okay to
|
||||
# remove zip files from the list when they are removed from
|
||||
# this directory. To update this file:
|
||||
#
|
||||
# go test cmd/go/internal/fips140 -update
|
||||
#
|
||||
v1.0.0-c2097c7c.zip daf3614e0406f67ae6323c902db3f953a1effb199142362a039e7526dfb9368b
|
||||
v1.26.0.zip 9b28f847fdf1db4a36cb2b2f8ec09443c039383f085630a03ecfaddf6db7ea23
|
||||
@@ -1 +0,0 @@
|
||||
v1.0.0-c2097c7c
|
||||
Binary file not shown.
@@ -1 +0,0 @@
|
||||
v1.0.0-c2097c7c
|
||||
Binary file not shown.
@@ -1,64 +0,0 @@
|
||||
# Copyright 2025 The Go Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file.
|
||||
|
||||
# Mercurial extension to add a 'goreposum' command that
|
||||
# computes a hash of a remote repo's tag state.
|
||||
# Tag definitions can come from the .hgtags file stored in
|
||||
# any head of any branch, and the server protocol does not
|
||||
# expose the tags directly. However, the protocol does expose
|
||||
# the hashes of all the branch heads, so we can use a hash of
|
||||
# all those branch names and heads as a conservative snapshot
|
||||
# of the entire remote repo state, and use that as the tag sum.
|
||||
# Any change on the server then invalidates the tag sum,
|
||||
# even if it didn't have anything to do with tags, but at least
|
||||
# we will avoid re-cloning a server when there have been no
|
||||
# changes at all.
|
||||
#
|
||||
# Separately, this extension also adds a 'golookup' command that
|
||||
# returns the hash of a specific reference, like 'default' or a tag.
|
||||
# And golookup of a hash confirms that it still exists on the server.
|
||||
# We can use that to revalidate that specific versions still exist and
|
||||
# have the same meaning they did the last time we checked.
|
||||
#
|
||||
# Usage:
|
||||
#
|
||||
# hg --config "extensions.goreposum=$GOROOT/lib/hg/goreposum.py" goreposum REPOURL
|
||||
|
||||
import base64, hashlib, sys
|
||||
from mercurial import registrar, ui, hg, node
|
||||
from mercurial.i18n import _
|
||||
cmdtable = {}
|
||||
command = registrar.command(cmdtable)
|
||||
@command(b'goreposum', [], _('url'), norepo=True)
|
||||
def goreposum(ui, url):
|
||||
"""
|
||||
goreposum computes a checksum of all the named state in the remote repo.
|
||||
It hashes together all the branch names and hashes
|
||||
and then all the bookmark names and hashes.
|
||||
Tags are stored in .hgtags files in any of the branches,
|
||||
so the branch metadata includes the tags as well.
|
||||
"""
|
||||
h = hashlib.sha256()
|
||||
peer = hg.peer(ui, {}, url)
|
||||
for name, revs in peer.branchmap().items():
|
||||
h.update(name)
|
||||
for r in revs:
|
||||
h.update(b' ')
|
||||
h.update(r)
|
||||
h.update(b'\n')
|
||||
if (b'bookmarks' in peer.listkeys(b'namespaces')):
|
||||
for name, rev in peer.listkeys(b'bookmarks').items():
|
||||
h.update(name)
|
||||
h.update(b'=')
|
||||
h.update(rev)
|
||||
h.update(b'\n')
|
||||
print('r1:'+base64.standard_b64encode(h.digest()).decode('utf-8'))
|
||||
|
||||
@command(b'golookup', [], _('url rev'), norepo=True)
|
||||
def golookup(ui, url, rev):
|
||||
"""
|
||||
golookup looks up a single identifier in the repo,
|
||||
printing its hash.
|
||||
"""
|
||||
print(node.hex(hg.peer(ui, {}, url).lookup(rev)).decode('utf-8'))
|
||||
@@ -31,7 +31,7 @@ import (
|
||||
)
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, "usage: go run mkzip.go zoneinfo.zip\n")
|
||||
fmt.Fprintf(os.Stderr, "usage: go run mkzip.go ../../zoneinfo.zip\n")
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
|
||||
@@ -24,8 +24,8 @@
|
||||
# in the CL match the update.bash in the CL.
|
||||
|
||||
# Versions to use.
|
||||
CODE=2025c
|
||||
DATA=2025c
|
||||
CODE=2023d
|
||||
DATA=2023d
|
||||
|
||||
set -e
|
||||
|
||||
@@ -40,12 +40,7 @@ curl -sS -L -O https://www.iana.org/time-zones/repository/releases/tzdata$DATA.t
|
||||
tar xzf tzcode$CODE.tar.gz
|
||||
tar xzf tzdata$DATA.tar.gz
|
||||
|
||||
# The PACKRATLIST and PACKRATDATA options are copied from Ubuntu:
|
||||
# https://git.launchpad.net/ubuntu/+source/tzdata/tree/debian/rules?h=debian/sid
|
||||
#
|
||||
# You can see the description of these make variables in the tzdata Makefile:
|
||||
# https://github.com/eggert/tz/blob/main/Makefile
|
||||
if ! make CFLAGS=-DSTD_INSPIRED AWK=awk TZDIR=zoneinfo PACKRATDATA=backzone PACKRATLIST=zone.tab posix_only >make.out 2>&1; then
|
||||
if ! make CFLAGS=-DSTD_INSPIRED AWK=awk TZDIR=zoneinfo posix_only >make.out 2>&1; then
|
||||
cat make.out
|
||||
exit 2
|
||||
fi
|
||||
|
||||
Binary file not shown.
@@ -1,23 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright 2023 The Go Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file.
|
||||
|
||||
case "$GOWASIRUNTIME" in
|
||||
"wasmedge")
|
||||
exec wasmedge --dir=/ --env PWD="$PWD" --env PATH="$PATH" ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}"
|
||||
;;
|
||||
"wasmer")
|
||||
exec wasmer run --dir=/ --env PWD="$PWD" --env PATH="$PATH" ${GOWASIRUNTIMEARGS:-} "$1" -- "${@:2}"
|
||||
;;
|
||||
"wazero")
|
||||
exec wazero run -mount /:/ -env-inherit -cachedir "${TMPDIR:-/tmp}"/wazero ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}"
|
||||
;;
|
||||
"wasmtime" | "")
|
||||
exec wasmtime run --dir=/ --env PWD="$PWD" --env PATH="$PATH" -W max-wasm-stack=8388608 ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}"
|
||||
;;
|
||||
*)
|
||||
echo "Unknown Go WASI runtime specified: $GOWASIRUNTIME"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
@@ -1,22 +1,19 @@
|
||||
#!/bin/sh
|
||||
|
||||
# This script configures clang to target the iOS simulator. If you'd like to
|
||||
# build for real iOS devices, change SDK to "iphoneos" and PLATFORM to "ios".
|
||||
# This uses the latest available iOS SDK, which is recommended. To select a
|
||||
# specific SDK, run 'xcodebuild -showsdks' to see the available SDKs and replace
|
||||
# iphonesimulator with one of them.
|
||||
|
||||
SDK=iphonesimulator
|
||||
PLATFORM=ios-simulator
|
||||
|
||||
# This uses the latest available iOS SDK, which is recommended.
|
||||
# To select a specific SDK, run 'xcodebuild -showsdks'
|
||||
# to see the available SDKs and replace iphoneos with one of them.
|
||||
if [ "$GOARCH" == "arm64" ]; then
|
||||
SDK=iphoneos
|
||||
PLATFORM=ios
|
||||
CLANGARCH="arm64"
|
||||
else
|
||||
SDK=iphonesimulator
|
||||
PLATFORM=ios-simulator
|
||||
CLANGARCH="x86_64"
|
||||
fi
|
||||
|
||||
SDK_PATH=`xcrun --sdk $SDK --show-sdk-path`
|
||||
|
||||
export IPHONEOS_DEPLOYMENT_TARGET=5.1
|
||||
# cmd/cgo doesn't support llvm-gcc-4.2, so we have to use clang.
|
||||
CLANG=`xcrun --sdk $SDK --find clang`
|
||||
|
||||
|
||||
@@ -1,21 +1,44 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This program can be used as go_ios_$GOARCH_exec by the Go tool. It executes
|
||||
// binaries on the iOS Simulator using the XCode toolchain.
|
||||
// This program can be used as go_ios_$GOARCH_exec by the Go tool.
|
||||
// It executes binaries on an iOS device using the XCode toolchain
|
||||
// and the ios-deploy program: https://github.com/phonegap/ios-deploy
|
||||
//
|
||||
// This script supports an extra flag, -lldb, that pauses execution
|
||||
// just before the main program begins and allows the user to control
|
||||
// the remote lldb session. This flag is appended to the end of the
|
||||
// script's arguments and is not passed through to the underlying
|
||||
// binary.
|
||||
//
|
||||
// This script requires that three environment variables be set:
|
||||
//
|
||||
// GOIOS_DEV_ID: The codesigning developer id or certificate identifier
|
||||
// GOIOS_APP_ID: The provisioning app id prefix. Must support wildcard app ids.
|
||||
// GOIOS_TEAM_ID: The team id that owns the app id prefix.
|
||||
//
|
||||
// $GOROOT/misc/ios contains a script, detect.go, that attempts to autodetect these.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
const debug = false
|
||||
@@ -87,8 +110,18 @@ func runMain() (int, error) {
|
||||
return 1, err
|
||||
}
|
||||
|
||||
err = runOnSimulator(appdir)
|
||||
if goarch := os.Getenv("GOARCH"); goarch == "arm64" {
|
||||
err = runOnDevice(appdir)
|
||||
} else {
|
||||
err = runOnSimulator(appdir)
|
||||
}
|
||||
if err != nil {
|
||||
// If the lldb driver completed with an exit code, use that.
|
||||
if err, ok := err.(*exec.ExitError); ok {
|
||||
if ws, ok := err.Sys().(interface{ ExitStatus() int }); ok {
|
||||
return ws.ExitStatus(), nil
|
||||
}
|
||||
}
|
||||
return 1, err
|
||||
}
|
||||
return 0, nil
|
||||
@@ -102,6 +135,61 @@ func runOnSimulator(appdir string) error {
|
||||
return runSimulator(appdir, bundleID, os.Args[2:])
|
||||
}
|
||||
|
||||
func runOnDevice(appdir string) error {
|
||||
// e.g. B393DDEB490947F5A463FD074299B6C0AXXXXXXX
|
||||
devID = getenv("GOIOS_DEV_ID")
|
||||
|
||||
// e.g. Z8B3JBXXXX.org.golang.sample, Z8B3JBXXXX prefix is available at
|
||||
// https://developer.apple.com/membercenter/index.action#accountSummary as Team ID.
|
||||
appID = getenv("GOIOS_APP_ID")
|
||||
|
||||
// e.g. Z8B3JBXXXX, available at
|
||||
// https://developer.apple.com/membercenter/index.action#accountSummary as Team ID.
|
||||
teamID = getenv("GOIOS_TEAM_ID")
|
||||
|
||||
// Device IDs as listed with ios-deploy -c.
|
||||
deviceID = os.Getenv("GOIOS_DEVICE_ID")
|
||||
|
||||
if _, id, ok := strings.Cut(appID, "."); ok {
|
||||
bundleID = id
|
||||
}
|
||||
|
||||
if err := signApp(appdir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := uninstallDevice(bundleID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := installDevice(appdir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := mountDevImage(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Kill any hanging debug bridges that might take up port 3222.
|
||||
exec.Command("killall", "idevicedebugserverproxy").Run()
|
||||
|
||||
closer, err := startDebugBridge()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
return runDevice(appdir, bundleID, os.Args[2:])
|
||||
}
|
||||
|
||||
func getenv(envvar string) string {
|
||||
s := os.Getenv(envvar)
|
||||
if s == "" {
|
||||
log.Fatalf("%s not set\nrun $GOROOT/misc/ios/detect.go to attempt to autodetect", envvar)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func assembleApp(appdir, bin string) error {
|
||||
if err := os.MkdirAll(appdir, 0755); err != nil {
|
||||
return err
|
||||
@@ -129,6 +217,236 @@ func assembleApp(appdir, bin string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func signApp(appdir string) error {
|
||||
entitlementsPath := filepath.Join(tmpdir, "Entitlements.plist")
|
||||
cmd := exec.Command(
|
||||
"codesign",
|
||||
"-f",
|
||||
"-s", devID,
|
||||
"--entitlements", entitlementsPath,
|
||||
appdir,
|
||||
)
|
||||
if debug {
|
||||
log.Println(strings.Join(cmd.Args, " "))
|
||||
}
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("codesign: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// mountDevImage ensures a developer image is mounted on the device.
|
||||
// The image contains the device lldb server for idevicedebugserverproxy
|
||||
// to connect to.
|
||||
func mountDevImage() error {
|
||||
// Check for existing mount.
|
||||
cmd := idevCmd(exec.Command("ideviceimagemounter", "-l", "-x"))
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
os.Stderr.Write(out)
|
||||
return fmt.Errorf("ideviceimagemounter: %v", err)
|
||||
}
|
||||
var info struct {
|
||||
Dict struct {
|
||||
Data []byte `xml:",innerxml"`
|
||||
} `xml:"dict"`
|
||||
}
|
||||
if err := xml.Unmarshal(out, &info); err != nil {
|
||||
return fmt.Errorf("mountDevImage: failed to decode mount information: %v", err)
|
||||
}
|
||||
dict, err := parsePlistDict(info.Dict.Data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mountDevImage: failed to parse mount information: %v", err)
|
||||
}
|
||||
if dict["ImagePresent"] == "true" && dict["Status"] == "Complete" {
|
||||
return nil
|
||||
}
|
||||
// Some devices only give us an ImageSignature key.
|
||||
if _, exists := dict["ImageSignature"]; exists {
|
||||
return nil
|
||||
}
|
||||
// No image is mounted. Find a suitable image.
|
||||
imgPath, err := findDevImage()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sigPath := imgPath + ".signature"
|
||||
cmd = idevCmd(exec.Command("ideviceimagemounter", imgPath, sigPath))
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
os.Stderr.Write(out)
|
||||
return fmt.Errorf("ideviceimagemounter: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// findDevImage use the device iOS version and build to locate a suitable
|
||||
// developer image.
|
||||
func findDevImage() (string, error) {
|
||||
cmd := idevCmd(exec.Command("ideviceinfo"))
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("ideviceinfo: %v", err)
|
||||
}
|
||||
var iosVer, buildVer string
|
||||
lines := bytes.Split(out, []byte("\n"))
|
||||
for _, line := range lines {
|
||||
key, val, ok := strings.Cut(string(line), ": ")
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
switch key {
|
||||
case "ProductVersion":
|
||||
iosVer = val
|
||||
case "BuildVersion":
|
||||
buildVer = val
|
||||
}
|
||||
}
|
||||
if iosVer == "" || buildVer == "" {
|
||||
return "", errors.New("failed to parse ideviceinfo output")
|
||||
}
|
||||
verSplit := strings.Split(iosVer, ".")
|
||||
if len(verSplit) > 2 {
|
||||
// Developer images are specific to major.minor ios version.
|
||||
// Cut off the patch version.
|
||||
iosVer = strings.Join(verSplit[:2], ".")
|
||||
}
|
||||
sdkBase := "/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/DeviceSupport"
|
||||
patterns := []string{fmt.Sprintf("%s (%s)", iosVer, buildVer), fmt.Sprintf("%s (*)", iosVer), fmt.Sprintf("%s*", iosVer)}
|
||||
for _, pattern := range patterns {
|
||||
matches, err := filepath.Glob(filepath.Join(sdkBase, pattern, "DeveloperDiskImage.dmg"))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("findDevImage: %v", err)
|
||||
}
|
||||
if len(matches) > 0 {
|
||||
return matches[0], nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("failed to find matching developer image for iOS version %s build %s", iosVer, buildVer)
|
||||
}
|
||||
|
||||
// startDebugBridge ensures that the idevicedebugserverproxy runs on
|
||||
// port 3222.
|
||||
func startDebugBridge() (func(), error) {
|
||||
errChan := make(chan error, 1)
|
||||
cmd := idevCmd(exec.Command("idevicedebugserverproxy", "3222"))
|
||||
var stderr bytes.Buffer
|
||||
cmd.Stderr = &stderr
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, fmt.Errorf("idevicedebugserverproxy: %v", err)
|
||||
}
|
||||
go func() {
|
||||
if err := cmd.Wait(); err != nil {
|
||||
if _, ok := err.(*exec.ExitError); ok {
|
||||
errChan <- fmt.Errorf("idevicedebugserverproxy: %s", stderr.Bytes())
|
||||
} else {
|
||||
errChan <- fmt.Errorf("idevicedebugserverproxy: %v", err)
|
||||
}
|
||||
}
|
||||
errChan <- nil
|
||||
}()
|
||||
closer := func() {
|
||||
cmd.Process.Kill()
|
||||
<-errChan
|
||||
}
|
||||
// Dial localhost:3222 to ensure the proxy is ready.
|
||||
delay := time.Second / 4
|
||||
for attempt := 0; attempt < 5; attempt++ {
|
||||
conn, err := net.DialTimeout("tcp", "localhost:3222", 5*time.Second)
|
||||
if err == nil {
|
||||
conn.Close()
|
||||
return closer, nil
|
||||
}
|
||||
select {
|
||||
case <-time.After(delay):
|
||||
delay *= 2
|
||||
case err := <-errChan:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
closer()
|
||||
return nil, errors.New("failed to set up idevicedebugserverproxy")
|
||||
}
|
||||
|
||||
// findDeviceAppPath returns the device path to the app with the
|
||||
// given bundle ID. It parses the output of ideviceinstaller -l -o xml,
|
||||
// looking for the bundle ID and the corresponding Path value.
|
||||
func findDeviceAppPath(bundleID string) (string, error) {
|
||||
cmd := idevCmd(exec.Command("ideviceinstaller", "-l", "-o", "xml"))
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
os.Stderr.Write(out)
|
||||
return "", fmt.Errorf("ideviceinstaller: -l -o xml %v", err)
|
||||
}
|
||||
var list struct {
|
||||
Apps []struct {
|
||||
Data []byte `xml:",innerxml"`
|
||||
} `xml:"array>dict"`
|
||||
}
|
||||
if err := xml.Unmarshal(out, &list); err != nil {
|
||||
return "", fmt.Errorf("failed to parse ideviceinstaller output: %v", err)
|
||||
}
|
||||
for _, app := range list.Apps {
|
||||
values, err := parsePlistDict(app.Data)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("findDeviceAppPath: failed to parse app dict: %v", err)
|
||||
}
|
||||
if values["CFBundleIdentifier"] == bundleID {
|
||||
if path, ok := values["Path"]; ok {
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("failed to find device path for bundle: %s", bundleID)
|
||||
}
|
||||
|
||||
// Parse an xml encoded plist. Plist values are mapped to string.
|
||||
func parsePlistDict(dict []byte) (map[string]string, error) {
|
||||
d := xml.NewDecoder(bytes.NewReader(dict))
|
||||
values := make(map[string]string)
|
||||
var key string
|
||||
var hasKey bool
|
||||
for {
|
||||
tok, err := d.Token()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tok, ok := tok.(xml.StartElement); ok {
|
||||
if tok.Name.Local == "key" {
|
||||
if err := d.DecodeElement(&key, &tok); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hasKey = true
|
||||
} else if hasKey {
|
||||
var val string
|
||||
var err error
|
||||
switch n := tok.Name.Local; n {
|
||||
case "true", "false":
|
||||
// Bools are represented as <true/> and <false/>.
|
||||
val = n
|
||||
err = d.Skip()
|
||||
default:
|
||||
err = d.DecodeElement(&val, &tok)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
values[key] = val
|
||||
hasKey = false
|
||||
} else {
|
||||
if err := d.Skip(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func installSimulator(appdir string) error {
|
||||
cmd := exec.Command(
|
||||
"xcrun", "simctl", "install",
|
||||
@@ -142,20 +460,138 @@ func installSimulator(appdir string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func runSimulator(appdir, bundleID string, args []string) error {
|
||||
xcrunArgs := []string{"simctl", "spawn",
|
||||
"booted",
|
||||
appdir + "/gotest",
|
||||
func uninstallDevice(bundleID string) error {
|
||||
cmd := idevCmd(exec.Command(
|
||||
"ideviceinstaller",
|
||||
"-U", bundleID,
|
||||
))
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
os.Stderr.Write(out)
|
||||
return fmt.Errorf("ideviceinstaller -U %q: %s", bundleID, err)
|
||||
}
|
||||
xcrunArgs = append(xcrunArgs, args...)
|
||||
cmd := exec.Command("xcrun", xcrunArgs...)
|
||||
cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr
|
||||
err := cmd.Run()
|
||||
return nil
|
||||
}
|
||||
|
||||
func installDevice(appdir string) error {
|
||||
attempt := 0
|
||||
for {
|
||||
cmd := idevCmd(exec.Command(
|
||||
"ideviceinstaller",
|
||||
"-i", appdir,
|
||||
))
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
// Sometimes, installing the app fails for some reason.
|
||||
// Give the device a few seconds and try again.
|
||||
if attempt < 5 {
|
||||
time.Sleep(5 * time.Second)
|
||||
attempt++
|
||||
continue
|
||||
}
|
||||
os.Stderr.Write(out)
|
||||
return fmt.Errorf("ideviceinstaller -i %q: %v (%d attempts)", appdir, err, attempt)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func idevCmd(cmd *exec.Cmd) *exec.Cmd {
|
||||
if deviceID != "" {
|
||||
// Inject -u device_id after the executable, but before the arguments.
|
||||
args := []string{cmd.Args[0], "-u", deviceID}
|
||||
cmd.Args = append(args, cmd.Args[1:]...)
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runSimulator(appdir, bundleID string, args []string) error {
|
||||
cmd := exec.Command(
|
||||
"xcrun", "simctl", "launch",
|
||||
"--wait-for-debugger",
|
||||
"booted",
|
||||
bundleID,
|
||||
)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
os.Stderr.Write(out)
|
||||
return fmt.Errorf("xcrun simctl launch booted %q: %v", bundleID, err)
|
||||
}
|
||||
var processID int
|
||||
var ignore string
|
||||
if _, err := fmt.Sscanf(string(out), "%s %d", &ignore, &processID); err != nil {
|
||||
return fmt.Errorf("runSimulator: couldn't find processID from `simctl launch`: %v (%q)", err, out)
|
||||
}
|
||||
_, err = runLLDB("ios-simulator", appdir, strconv.Itoa(processID), args)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
func runDevice(appdir, bundleID string, args []string) error {
|
||||
attempt := 0
|
||||
for {
|
||||
// The device app path reported by the device might be stale, so retry
|
||||
// the lookup of the device path along with the lldb launching below.
|
||||
deviceapp, err := findDeviceAppPath(bundleID)
|
||||
if err != nil {
|
||||
// The device app path might not yet exist for a newly installed app.
|
||||
if attempt == 5 {
|
||||
return err
|
||||
}
|
||||
attempt++
|
||||
time.Sleep(5 * time.Second)
|
||||
continue
|
||||
}
|
||||
out, err := runLLDB("remote-ios", appdir, deviceapp, args)
|
||||
// If the program was not started it can be retried without papering over
|
||||
// real test failures.
|
||||
started := bytes.HasPrefix(out, []byte("lldb: running program"))
|
||||
if started || err == nil || attempt == 5 {
|
||||
return err
|
||||
}
|
||||
// Sometimes, the app was not yet ready to launch or the device path was
|
||||
// stale. Retry.
|
||||
attempt++
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func runLLDB(target, appdir, deviceapp string, args []string) ([]byte, error) {
|
||||
var env []string
|
||||
for _, e := range os.Environ() {
|
||||
// Don't override TMPDIR, HOME, GOCACHE on the device.
|
||||
if strings.HasPrefix(e, "TMPDIR=") || strings.HasPrefix(e, "HOME=") || strings.HasPrefix(e, "GOCACHE=") {
|
||||
continue
|
||||
}
|
||||
env = append(env, e)
|
||||
}
|
||||
lldb := exec.Command(
|
||||
"python",
|
||||
"-", // Read script from stdin.
|
||||
target,
|
||||
appdir,
|
||||
deviceapp,
|
||||
)
|
||||
lldb.Args = append(lldb.Args, args...)
|
||||
lldb.Env = env
|
||||
lldb.Stdin = strings.NewReader(lldbDriver)
|
||||
lldb.Stdout = os.Stdout
|
||||
var out bytes.Buffer
|
||||
lldb.Stderr = io.MultiWriter(&out, os.Stderr)
|
||||
err := lldb.Start()
|
||||
if err == nil {
|
||||
// Forward SIGQUIT to the lldb driver which in turn will forward
|
||||
// to the running program.
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, syscall.SIGQUIT)
|
||||
proc := lldb.Process
|
||||
go func() {
|
||||
for sig := range sigs {
|
||||
proc.Signal(sig)
|
||||
}
|
||||
}()
|
||||
err = lldb.Wait()
|
||||
signal.Stop(sigs)
|
||||
close(sigs)
|
||||
}
|
||||
return out.Bytes(), err
|
||||
}
|
||||
|
||||
func copyLocalDir(dst, src string) error {
|
||||
@@ -212,7 +648,7 @@ func copyLocalData(dstbase string) (pkgpath string, err error) {
|
||||
// Copy all immediate files and testdata directories between
|
||||
// the package being tested and the source root.
|
||||
pkgpath = ""
|
||||
for element := range strings.SplitSeq(finalPkgpath, string(filepath.Separator)) {
|
||||
for _, element := range strings.Split(finalPkgpath, string(filepath.Separator)) {
|
||||
if debug {
|
||||
log.Printf("copying %s", pkgpath)
|
||||
}
|
||||
@@ -364,3 +800,112 @@ const resourceRules = `<?xml version="1.0" encoding="UTF-8"?>
|
||||
</dict>
|
||||
</plist>
|
||||
`
|
||||
|
||||
const lldbDriver = `
|
||||
import sys
|
||||
import os
|
||||
import signal
|
||||
|
||||
platform, exe, device_exe_or_pid, args = sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4:]
|
||||
|
||||
env = []
|
||||
for k, v in os.environ.items():
|
||||
env.append(k + "=" + v)
|
||||
|
||||
sys.path.append('/Applications/Xcode.app/Contents/SharedFrameworks/LLDB.framework/Resources/Python')
|
||||
|
||||
import lldb
|
||||
|
||||
debugger = lldb.SBDebugger.Create()
|
||||
debugger.SetAsync(True)
|
||||
debugger.SkipLLDBInitFiles(True)
|
||||
|
||||
err = lldb.SBError()
|
||||
target = debugger.CreateTarget(exe, None, platform, True, err)
|
||||
if not target.IsValid() or not err.Success():
|
||||
sys.stderr.write("lldb: failed to setup up target: %s\n" % (err))
|
||||
sys.exit(1)
|
||||
|
||||
listener = debugger.GetListener()
|
||||
|
||||
if platform == 'remote-ios':
|
||||
target.modules[0].SetPlatformFileSpec(lldb.SBFileSpec(device_exe_or_pid))
|
||||
process = target.ConnectRemote(listener, 'connect://localhost:3222', None, err)
|
||||
else:
|
||||
process = target.AttachToProcessWithID(listener, int(device_exe_or_pid), err)
|
||||
|
||||
if not err.Success():
|
||||
sys.stderr.write("lldb: failed to connect to remote target %s: %s\n" % (device_exe_or_pid, err))
|
||||
sys.exit(1)
|
||||
|
||||
# Don't stop on signals.
|
||||
sigs = process.GetUnixSignals()
|
||||
for i in range(0, sigs.GetNumSignals()):
|
||||
sig = sigs.GetSignalAtIndex(i)
|
||||
sigs.SetShouldStop(sig, False)
|
||||
sigs.SetShouldNotify(sig, False)
|
||||
|
||||
event = lldb.SBEvent()
|
||||
running = False
|
||||
prev_handler = None
|
||||
|
||||
def signal_handler(signal, frame):
|
||||
process.Signal(signal)
|
||||
|
||||
def run_program():
|
||||
# Forward SIGQUIT to the program.
|
||||
prev_handler = signal.signal(signal.SIGQUIT, signal_handler)
|
||||
# Tell the Go driver that the program is running and should not be retried.
|
||||
sys.stderr.write("lldb: running program\n")
|
||||
running = True
|
||||
# Process is stopped at attach/launch. Let it run.
|
||||
process.Continue()
|
||||
|
||||
if platform != 'remote-ios':
|
||||
# For the local emulator the program is ready to run.
|
||||
# For remote device runs, we need to wait for eStateConnected,
|
||||
# below.
|
||||
run_program()
|
||||
|
||||
while True:
|
||||
if not listener.WaitForEvent(1, event):
|
||||
continue
|
||||
if not lldb.SBProcess.EventIsProcessEvent(event):
|
||||
continue
|
||||
if running:
|
||||
# Pass through stdout and stderr.
|
||||
while True:
|
||||
out = process.GetSTDOUT(8192)
|
||||
if not out:
|
||||
break
|
||||
sys.stdout.write(out)
|
||||
while True:
|
||||
out = process.GetSTDERR(8192)
|
||||
if not out:
|
||||
break
|
||||
sys.stderr.write(out)
|
||||
state = process.GetStateFromEvent(event)
|
||||
if state in [lldb.eStateCrashed, lldb.eStateDetached, lldb.eStateUnloaded, lldb.eStateExited]:
|
||||
if running:
|
||||
signal.signal(signal.SIGQUIT, prev_handler)
|
||||
break
|
||||
elif state == lldb.eStateConnected:
|
||||
if platform == 'remote-ios':
|
||||
process.RemoteLaunch(args, env, None, None, None, None, 0, False, err)
|
||||
if not err.Success():
|
||||
sys.stderr.write("lldb: failed to launch remote process: %s\n" % (err))
|
||||
process.Kill()
|
||||
debugger.Terminate()
|
||||
sys.exit(1)
|
||||
run_program()
|
||||
|
||||
exitStatus = process.GetExitStatus()
|
||||
exitDesc = process.GetExitDescription()
|
||||
process.Kill()
|
||||
debugger.Terminate()
|
||||
if exitStatus == 0 and exitDesc is not None:
|
||||
# Ensure tests fail when killed by a signal.
|
||||
exitStatus = 123
|
||||
|
||||
sys.exit(exitStatus)
|
||||
`
|
||||
|
||||
191
misc/linkcheck/linkcheck.go
Normal file
191
misc/linkcheck/linkcheck.go
Normal file
@@ -0,0 +1,191 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// The linkcheck command finds missing links in the godoc website.
|
||||
// It crawls a URL recursively and notes URLs and URL fragments
|
||||
// that it's seen and prints a report of missing links at the end.
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
root = flag.String("root", "http://localhost:6060", "Root to crawl")
|
||||
verbose = flag.Bool("verbose", false, "verbose")
|
||||
)
|
||||
|
||||
var wg sync.WaitGroup // outstanding fetches
|
||||
var urlq = make(chan string) // URLs to crawl
|
||||
|
||||
// urlFrag is a URL and its optional #fragment (without the #)
|
||||
type urlFrag struct {
|
||||
url, frag string
|
||||
}
|
||||
|
||||
var (
|
||||
mu sync.Mutex
|
||||
crawled = make(map[string]bool) // URL without fragment -> true
|
||||
neededFrags = make(map[urlFrag][]string) // URL#frag -> who needs it
|
||||
)
|
||||
|
||||
var aRx = regexp.MustCompile(`<a href=['"]?(/[^\s'">]+)`)
|
||||
|
||||
// Owned by crawlLoop goroutine:
|
||||
var (
|
||||
linkSources = make(map[string][]string) // url no fragment -> sources
|
||||
fragExists = make(map[urlFrag]bool)
|
||||
problems []string
|
||||
)
|
||||
|
||||
func localLinks(body string) (links []string) {
|
||||
seen := map[string]bool{}
|
||||
mv := aRx.FindAllStringSubmatch(body, -1)
|
||||
for _, m := range mv {
|
||||
ref := m[1]
|
||||
if strings.HasPrefix(ref, "/src/") {
|
||||
continue
|
||||
}
|
||||
if !seen[ref] {
|
||||
seen[ref] = true
|
||||
links = append(links, m[1])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var idRx = regexp.MustCompile(`\bid=['"]?([^\s'">]+)`)
|
||||
|
||||
func pageIDs(body string) (ids []string) {
|
||||
mv := idRx.FindAllStringSubmatch(body, -1)
|
||||
for _, m := range mv {
|
||||
ids = append(ids, m[1])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// url may contain a #fragment, and the fragment is then noted as needing to exist.
|
||||
func crawl(url string, sourceURL string) {
|
||||
if strings.Contains(url, "/devel/release") {
|
||||
return
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if u, frag, ok := strings.Cut(url, "#"); ok {
|
||||
url = u
|
||||
if frag != "" {
|
||||
uf := urlFrag{url, frag}
|
||||
neededFrags[uf] = append(neededFrags[uf], sourceURL)
|
||||
}
|
||||
}
|
||||
if crawled[url] {
|
||||
return
|
||||
}
|
||||
crawled[url] = true
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
urlq <- url
|
||||
}()
|
||||
}
|
||||
|
||||
func addProblem(url, errmsg string) {
|
||||
msg := fmt.Sprintf("Error on %s: %s (from %s)", url, errmsg, linkSources[url])
|
||||
if *verbose {
|
||||
log.Print(msg)
|
||||
}
|
||||
problems = append(problems, msg)
|
||||
}
|
||||
|
||||
func crawlLoop() {
|
||||
for url := range urlq {
|
||||
if err := doCrawl(url); err != nil {
|
||||
addProblem(url, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func doCrawl(url string) error {
|
||||
defer wg.Done()
|
||||
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := http.DefaultTransport.RoundTrip(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Handle redirects.
|
||||
if res.StatusCode/100 == 3 {
|
||||
newURL, err := res.Location()
|
||||
if err != nil {
|
||||
return fmt.Errorf("resolving redirect: %v", err)
|
||||
}
|
||||
if !strings.HasPrefix(newURL.String(), *root) {
|
||||
// Skip off-site redirects.
|
||||
return nil
|
||||
}
|
||||
crawl(newURL.String(), url)
|
||||
return nil
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
return errors.New(res.Status)
|
||||
}
|
||||
slurp, err := io.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
if err != nil {
|
||||
log.Fatalf("Error reading %s body: %v", url, err)
|
||||
}
|
||||
if *verbose {
|
||||
log.Printf("Len of %s: %d", url, len(slurp))
|
||||
}
|
||||
body := string(slurp)
|
||||
for _, ref := range localLinks(body) {
|
||||
if *verbose {
|
||||
log.Printf(" links to %s", ref)
|
||||
}
|
||||
dest := *root + ref
|
||||
linkSources[dest] = append(linkSources[dest], url)
|
||||
crawl(dest, url)
|
||||
}
|
||||
for _, id := range pageIDs(body) {
|
||||
if *verbose {
|
||||
log.Printf(" url %s has #%s", url, id)
|
||||
}
|
||||
fragExists[urlFrag{url, id}] = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
go crawlLoop()
|
||||
crawl(*root, "")
|
||||
|
||||
wg.Wait()
|
||||
close(urlq)
|
||||
for uf, needers := range neededFrags {
|
||||
if !fragExists[uf] {
|
||||
problems = append(problems, fmt.Sprintf("Missing fragment for %+v from %v", uf, needers))
|
||||
}
|
||||
}
|
||||
|
||||
for _, s := range problems {
|
||||
fmt.Println(s)
|
||||
}
|
||||
if len(problems) > 0 {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
31
misc/wasm/go_wasip1_wasm_exec
Executable file
31
misc/wasm/go_wasip1_wasm_exec
Executable file
@@ -0,0 +1,31 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright 2023 The Go Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file.
|
||||
|
||||
case "$GOWASIRUNTIME" in
|
||||
"wasmedge")
|
||||
exec wasmedge --dir=/ --env PWD="$PWD" --env PATH="$PATH" ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}"
|
||||
;;
|
||||
"wasmer")
|
||||
exec wasmer run --dir=/ --env PWD="$PWD" --env PATH="$PATH" ${GOWASIRUNTIMEARGS:-} "$1" -- "${@:2}"
|
||||
;;
|
||||
"wazero")
|
||||
exec wazero run -mount /:/ -env-inherit -cachedir "${TMPDIR:-/tmp}"/wazero ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}"
|
||||
;;
|
||||
"wasmtime" | "")
|
||||
# Match the major version in "wasmtime-cli 14.0.0". For versions before 14
|
||||
# we need to use the old CLI. This requires Bash v3.0 and above.
|
||||
# TODO(johanbrandhorst): Remove this condition once 1.22 is released.
|
||||
# From 1.23 onwards we'll only support the new wasmtime CLI.
|
||||
if [[ "$(wasmtime --version)" =~ wasmtime-cli[[:space:]]([0-9]+)\.[0-9]+\.[0-9]+ && "${BASH_REMATCH[1]}" -lt 14 ]]; then
|
||||
exec wasmtime run --dir=/ --env PWD="$PWD" --env PATH="$PATH" --max-wasm-stack 1048576 ${GOWASIRUNTIMEARGS:-} "$1" -- "${@:2}"
|
||||
else
|
||||
exec wasmtime run --dir=/ --env PWD="$PWD" --env PATH="$PATH" -W max-wasm-stack=1048576 ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}"
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
echo "Unknown Go WASI runtime specified: $GOWASIRUNTIME"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
@@ -17,7 +17,7 @@ license that can be found in the LICENSE file.
|
||||
<script src="https://cdn.jsdelivr.net/npm/text-encoding@0.7.0/lib/encoding.min.js"></script>
|
||||
(see https://caniuse.com/#feat=textencoder)
|
||||
-->
|
||||
<script src="../../lib/wasm/wasm_exec.js"></script>
|
||||
<script src="wasm_exec.js"></script>
|
||||
<script>
|
||||
if (!WebAssembly.instantiateStreaming) { // polyfill
|
||||
WebAssembly.instantiateStreaming = async (resp, importObject) => {
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
if (!globalThis.fs) {
|
||||
let outputBuf = "";
|
||||
globalThis.fs = {
|
||||
constants: { O_WRONLY: -1, O_RDWR: -1, O_CREAT: -1, O_TRUNC: -1, O_APPEND: -1, O_EXCL: -1, O_DIRECTORY: -1 }, // unused
|
||||
constants: { O_WRONLY: -1, O_RDWR: -1, O_CREAT: -1, O_TRUNC: -1, O_APPEND: -1, O_EXCL: -1 }, // unused
|
||||
writeSync(fd, buf) {
|
||||
outputBuf += decoder.decode(buf);
|
||||
const nl = outputBuf.lastIndexOf("\n");
|
||||
@@ -73,14 +73,6 @@
|
||||
}
|
||||
}
|
||||
|
||||
if (!globalThis.path) {
|
||||
globalThis.path = {
|
||||
resolve(...pathSegments) {
|
||||
return pathSegments.join("/");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!globalThis.crypto) {
|
||||
throw new Error("globalThis.crypto is not available, polyfill required (crypto.getRandomValues only)");
|
||||
}
|
||||
@@ -216,16 +208,10 @@
|
||||
return decoder.decode(new DataView(this._inst.exports.mem.buffer, saddr, len));
|
||||
}
|
||||
|
||||
const testCallExport = (a, b) => {
|
||||
this._inst.exports.testExport0();
|
||||
return this._inst.exports.testExport(a, b);
|
||||
}
|
||||
|
||||
const timeOrigin = Date.now() - performance.now();
|
||||
this.importObject = {
|
||||
_gotest: {
|
||||
add: (a, b) => a + b,
|
||||
callExport: testCallExport,
|
||||
},
|
||||
gojs: {
|
||||
// Go's SP does not change as long as no Go code is running. Some operations (e.g. calls, getters and setters)
|
||||
@@ -11,7 +11,6 @@ if (process.argv.length < 3) {
|
||||
|
||||
globalThis.require = require;
|
||||
globalThis.fs = require("fs");
|
||||
globalThis.path = require("path");
|
||||
globalThis.TextEncoder = require("util").TextEncoder;
|
||||
globalThis.TextDecoder = require("util").TextDecoder;
|
||||
|
||||
@@ -31,22 +31,16 @@ Maintaining vendor directories
|
||||
|
||||
Before updating vendor directories, ensure that module mode is enabled.
|
||||
Make sure that GO111MODULE is not set in the environment, or that it is
|
||||
set to 'on' or 'auto', and if you use a go.work file, set GOWORK=off.
|
||||
|
||||
Also, ensure that 'go env GOROOT' shows the root of this Go source
|
||||
tree. Otherwise, the results are undefined. It's recommended to build
|
||||
Go from source and use that 'go' binary to update its source tree.
|
||||
set to 'on' or 'auto'.
|
||||
|
||||
Requirements may be added, updated, and removed with 'go get'.
|
||||
The vendor directory may be updated with 'go mod vendor'.
|
||||
Tree inconsistencies are reported by 'go test cmd/internal/moddeps'.
|
||||
A typical sequence might be:
|
||||
|
||||
cd src # or src/cmd
|
||||
cd src
|
||||
go get golang.org/x/net@master
|
||||
go mod tidy
|
||||
go mod vendor
|
||||
go test cmd/internal/moddeps
|
||||
|
||||
Use caution when passing '-u' to 'go get'. The '-u' flag updates
|
||||
modules providing all transitively imported packages, not only
|
||||
@@ -55,22 +49,3 @@ the module providing the target package.
|
||||
Note that 'go mod vendor' only copies packages that are transitively
|
||||
imported by packages in the current module. If a new package is needed,
|
||||
it should be imported before running 'go mod vendor'.
|
||||
|
||||
Go release cycle considerations
|
||||
===============================
|
||||
|
||||
Applying changes to packages that are vendored follows the considerations
|
||||
written down at go.dev/s/release.
|
||||
|
||||
When the Go tree is open for development, a specific change may be pulled in
|
||||
at any time that it is needed. During the release freeze, the bar for changes
|
||||
in vendored packages is the same as it is for changes in non-vendored packages.
|
||||
After a major release is out, minor Go releases follow a more involved process
|
||||
documented at go.dev/wiki/MinorReleases#cherry-pick-cls-for-vendored-golangorgx-packages.
|
||||
|
||||
In addition to individual updates that happen on demand, all dependencies in
|
||||
the std and cmd modules are updated to their latest available versions at least
|
||||
twice during every major release cycle. This is done to avoid the possibility of
|
||||
some dependencies being left on very old versions and in turn make their eventual
|
||||
update more disruptive. This recurring process is tracked in go.dev/issue/36905.
|
||||
The golang.org/x/build/cmd/updatestd command exists to assist with that process.
|
||||
|
||||
@@ -10,4 +10,4 @@ if [ ! -f make.bash ]; then
|
||||
fi
|
||||
. ./make.bash "$@" --no-banner
|
||||
bash run.bash --no-rebuild
|
||||
../bin/go tool dist banner # print build info
|
||||
$GOTOOLDIR/dist banner # print build info
|
||||
|
||||
20
src/all.bat
20
src/all.bat
@@ -6,11 +6,17 @@
|
||||
|
||||
setlocal
|
||||
|
||||
if not exist make.bat (
|
||||
echo all.bat must be run from go\src
|
||||
exit /b 1
|
||||
)
|
||||
if exist make.bat goto ok
|
||||
echo all.bat must be run from go\src
|
||||
:: cannot exit: would kill parent command interpreter
|
||||
goto end
|
||||
:ok
|
||||
|
||||
call .\make.bat --no-banner || exit /b 1
|
||||
call .\run.bat --no-rebuild || exit /b 1
|
||||
..\bin\go tool dist banner
|
||||
call .\make.bat --no-banner --no-local
|
||||
if %GOBUILDFAIL%==1 goto end
|
||||
call .\run.bat --no-rebuild --no-local
|
||||
if %GOBUILDFAIL%==1 goto end
|
||||
"%GOTOOLDIR%/dist" banner
|
||||
|
||||
:end
|
||||
if x%GOBUILDEXIT%==x1 exit %GOBUILDFAIL%
|
||||
|
||||
@@ -13,4 +13,4 @@ if(! test -f make.rc){
|
||||
. ./make.rc --no-banner $*
|
||||
bind -b $GOROOT/bin /bin
|
||||
./run.rc --no-rebuild
|
||||
../bin/go tool dist banner # print build info
|
||||
$GOTOOLDIR/dist banner # print build info
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"fmt"
|
||||
"internal/godebug"
|
||||
"io/fs"
|
||||
"maps"
|
||||
"math"
|
||||
"path"
|
||||
"reflect"
|
||||
@@ -39,7 +38,6 @@ var (
|
||||
errMissData = errors.New("archive/tar: sparse file references non-existent data")
|
||||
errUnrefData = errors.New("archive/tar: sparse file contains unreferenced data")
|
||||
errWriteHole = errors.New("archive/tar: write non-NUL byte in sparse hole")
|
||||
errSparseTooLong = errors.New("archive/tar: sparse map too long")
|
||||
)
|
||||
|
||||
type headerError []string
|
||||
@@ -614,7 +612,7 @@ func (fi headerFileInfo) String() string {
|
||||
}
|
||||
|
||||
// sysStat, if non-nil, populates h from system-dependent fields of fi.
|
||||
var sysStat func(fi fs.FileInfo, h *Header, doNameLookups bool) error
|
||||
var sysStat func(fi fs.FileInfo, h *Header) error
|
||||
|
||||
const (
|
||||
// Mode constants from the USTAR spec:
|
||||
@@ -641,10 +639,6 @@ const (
|
||||
// Since fs.FileInfo's Name method only returns the base name of
|
||||
// the file it describes, it may be necessary to modify Header.Name
|
||||
// to provide the full path name of the file.
|
||||
//
|
||||
// If fi implements [FileInfoNames]
|
||||
// Header.Gname and Header.Uname
|
||||
// are provided by the methods of the interface.
|
||||
func FileInfoHeader(fi fs.FileInfo, link string) (*Header, error) {
|
||||
if fi == nil {
|
||||
return nil, errors.New("archive/tar: FileInfo is nil")
|
||||
@@ -698,45 +692,31 @@ func FileInfoHeader(fi fs.FileInfo, link string) (*Header, error) {
|
||||
h.Gname = sys.Gname
|
||||
h.AccessTime = sys.AccessTime
|
||||
h.ChangeTime = sys.ChangeTime
|
||||
h.Xattrs = maps.Clone(sys.Xattrs)
|
||||
if sys.Xattrs != nil {
|
||||
h.Xattrs = make(map[string]string)
|
||||
for k, v := range sys.Xattrs {
|
||||
h.Xattrs[k] = v
|
||||
}
|
||||
}
|
||||
if sys.Typeflag == TypeLink {
|
||||
// hard link
|
||||
h.Typeflag = TypeLink
|
||||
h.Size = 0
|
||||
h.Linkname = sys.Linkname
|
||||
}
|
||||
h.PAXRecords = maps.Clone(sys.PAXRecords)
|
||||
}
|
||||
var doNameLookups = true
|
||||
if iface, ok := fi.(FileInfoNames); ok {
|
||||
doNameLookups = false
|
||||
var err error
|
||||
h.Gname, err = iface.Gname()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
h.Uname, err = iface.Uname()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if sys.PAXRecords != nil {
|
||||
h.PAXRecords = make(map[string]string)
|
||||
for k, v := range sys.PAXRecords {
|
||||
h.PAXRecords[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
if sysStat != nil {
|
||||
return h, sysStat(fi, h, doNameLookups)
|
||||
return h, sysStat(fi, h)
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// FileInfoNames extends [fs.FileInfo].
|
||||
// Passing an instance of this to [FileInfoHeader] permits the caller
|
||||
// to avoid a system-dependent name lookup by specifying the Uname and Gname directly.
|
||||
type FileInfoNames interface {
|
||||
fs.FileInfo
|
||||
// Uname should give a user name.
|
||||
Uname() (string, error)
|
||||
// Gname should give a group name.
|
||||
Gname() (string, error)
|
||||
}
|
||||
|
||||
// isHeaderOnlyType checks if the given type flag is of the type that has no
|
||||
// data section even if a size is specified.
|
||||
func isHeaderOnlyType(flag byte) bool {
|
||||
|
||||
@@ -531,17 +531,12 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) {
|
||||
cntNewline int64
|
||||
buf bytes.Buffer
|
||||
blk block
|
||||
totalSize int
|
||||
)
|
||||
|
||||
// feedTokens copies data in blocks from r into buf until there are
|
||||
// at least cnt newlines in buf. It will not read more blocks than needed.
|
||||
feedTokens := func(n int64) error {
|
||||
for cntNewline < n {
|
||||
totalSize += len(blk)
|
||||
if totalSize > maxSpecialFileSize {
|
||||
return errSparseTooLong
|
||||
}
|
||||
if _, err := mustReadFull(r, blk[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -574,8 +569,8 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) {
|
||||
}
|
||||
|
||||
// Parse for all member entries.
|
||||
// numEntries is trusted after this since feedTokens limits the number of
|
||||
// tokens based on maxSpecialFileSize.
|
||||
// numEntries is trusted after this since a potential attacker must have
|
||||
// committed resources proportional to what this library used.
|
||||
if err := feedTokens(2 * numEntries); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -816,7 +811,9 @@ func (sr sparseFileReader) physicalRemaining() int64 {
|
||||
type zeroReader struct{}
|
||||
|
||||
func (zeroReader) Read(b []byte) (int, error) {
|
||||
clear(b)
|
||||
for i := range b {
|
||||
b[i] = 0
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
|
||||
@@ -7,17 +7,14 @@ package tar
|
||||
import (
|
||||
"bytes"
|
||||
"compress/bzip2"
|
||||
"crypto/md5"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"internal/obscuretestdata"
|
||||
"io"
|
||||
"maps"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -26,11 +23,10 @@ import (
|
||||
|
||||
func TestReader(t *testing.T) {
|
||||
vectors := []struct {
|
||||
file string // Test input file
|
||||
obscured bool // Obscured with obscuretestdata package
|
||||
headers []*Header // Expected output headers
|
||||
chksums []string // CRC32 checksum of files, leave as nil if not checked
|
||||
err error // Expected error to occur
|
||||
file string // Test input file
|
||||
headers []*Header // Expected output headers
|
||||
chksums []string // MD5 checksum of files, leave as nil if not checked
|
||||
err error // Expected error to occur
|
||||
}{{
|
||||
file: "testdata/gnu.tar",
|
||||
headers: []*Header{{
|
||||
@@ -57,8 +53,8 @@ func TestReader(t *testing.T) {
|
||||
Format: FormatGNU,
|
||||
}},
|
||||
chksums: []string{
|
||||
"6cbd88fc",
|
||||
"ddac04b3",
|
||||
"e38b27eaccb4391bdec553a7f3ae6b2f",
|
||||
"c65bd2e50a56a2138bf1716f2fd56fe9",
|
||||
},
|
||||
}, {
|
||||
file: "testdata/sparse-formats.tar",
|
||||
@@ -151,11 +147,11 @@ func TestReader(t *testing.T) {
|
||||
Format: FormatGNU,
|
||||
}},
|
||||
chksums: []string{
|
||||
"5375e1d2",
|
||||
"5375e1d2",
|
||||
"5375e1d2",
|
||||
"5375e1d2",
|
||||
"8eb179ba",
|
||||
"6f53234398c2449fe67c1812d993012f",
|
||||
"6f53234398c2449fe67c1812d993012f",
|
||||
"6f53234398c2449fe67c1812d993012f",
|
||||
"6f53234398c2449fe67c1812d993012f",
|
||||
"b0061974914468de549a2af8ced10316",
|
||||
},
|
||||
}, {
|
||||
file: "testdata/star.tar",
|
||||
@@ -272,7 +268,7 @@ func TestReader(t *testing.T) {
|
||||
Format: FormatPAX,
|
||||
}},
|
||||
chksums: []string{
|
||||
"5fd7e86a",
|
||||
"0afb597b283fe61b5d4879669a350556",
|
||||
},
|
||||
}, {
|
||||
file: "testdata/pax-records.tar",
|
||||
@@ -525,9 +521,8 @@ func TestReader(t *testing.T) {
|
||||
file: "testdata/pax-nul-path.tar",
|
||||
err: ErrHeader,
|
||||
}, {
|
||||
file: "testdata/neg-size.tar.base64",
|
||||
obscured: true,
|
||||
err: ErrHeader,
|
||||
file: "testdata/neg-size.tar",
|
||||
err: ErrHeader,
|
||||
}, {
|
||||
file: "testdata/issue10968.tar",
|
||||
err: ErrHeader,
|
||||
@@ -624,32 +619,18 @@ func TestReader(t *testing.T) {
|
||||
},
|
||||
Format: FormatPAX,
|
||||
}},
|
||||
}, {
|
||||
// Small compressed file that uncompresses to
|
||||
// a file with a very large GNU 1.0 sparse map.
|
||||
file: "testdata/gnu-sparse-many-zeros.tar.bz2",
|
||||
err: errSparseTooLong,
|
||||
}}
|
||||
|
||||
for _, v := range vectors {
|
||||
t.Run(strings.TrimSuffix(path.Base(v.file), ".base64"), func(t *testing.T) {
|
||||
path := v.file
|
||||
if v.obscured {
|
||||
tf, err := obscuretestdata.DecodeToTempFile(path)
|
||||
if err != nil {
|
||||
t.Fatalf("obscuredtestdata.DecodeToTempFile(%s): %v", path, err)
|
||||
}
|
||||
path = tf
|
||||
}
|
||||
|
||||
f, err := os.Open(path)
|
||||
t.Run(path.Base(v.file), func(t *testing.T) {
|
||||
f, err := os.Open(v.file)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var fr io.Reader = f
|
||||
if strings.HasSuffix(v.file, ".bz2") || strings.HasSuffix(v.file, ".bz2.base64") {
|
||||
if strings.HasSuffix(v.file, ".bz2") {
|
||||
fr = bzip2.NewReader(fr)
|
||||
}
|
||||
|
||||
@@ -674,7 +655,7 @@ func TestReader(t *testing.T) {
|
||||
if v.chksums == nil {
|
||||
continue
|
||||
}
|
||||
h := crc32.NewIEEE()
|
||||
h := md5.New()
|
||||
_, err = io.CopyBuffer(h, tr, rdbuf) // Effectively an incremental read
|
||||
if err != nil {
|
||||
break
|
||||
@@ -787,7 +768,7 @@ type readBadSeeker struct{ io.ReadSeeker }
|
||||
|
||||
func (rbs *readBadSeeker) Seek(int64, int) (int64, error) { return 0, fmt.Errorf("illegal seek") }
|
||||
|
||||
// TestReadTruncation tests the ending condition on various truncated files and
|
||||
// TestReadTruncation test the ending condition on various truncated files and
|
||||
// that truncated files are still detected even if the underlying io.Reader
|
||||
// satisfies io.Seeker.
|
||||
func TestReadTruncation(t *testing.T) {
|
||||
@@ -1036,7 +1017,7 @@ func TestParsePAX(t *testing.T) {
|
||||
for i, v := range vectors {
|
||||
r := strings.NewReader(v.in)
|
||||
got, err := parsePAX(r)
|
||||
if !maps.Equal(got, v.want) && !(len(got) == 0 && len(v.want) == 0) {
|
||||
if !reflect.DeepEqual(got, v.want) && !(len(got) == 0 && len(v.want) == 0) {
|
||||
t.Errorf("test %d, parsePAX():\ngot %v\nwant %v", i, got, v.want)
|
||||
}
|
||||
if ok := err == nil; ok != v.ok {
|
||||
@@ -1153,7 +1134,7 @@ func TestReadOldGNUSparseMap(t *testing.T) {
|
||||
v.input = v.input[copy(blk[:], v.input):]
|
||||
tr := Reader{r: bytes.NewReader(v.input)}
|
||||
got, err := tr.readOldGNUSparseMap(&hdr, &blk)
|
||||
if !slices.Equal(got, v.wantMap) {
|
||||
if !equalSparseEntries(got, v.wantMap) {
|
||||
t.Errorf("test %d, readOldGNUSparseMap(): got %v, want %v", i, got, v.wantMap)
|
||||
}
|
||||
if err != v.wantErr {
|
||||
@@ -1344,7 +1325,7 @@ func TestReadGNUSparsePAXHeaders(t *testing.T) {
|
||||
r := strings.NewReader(v.inputData + "#") // Add canary byte
|
||||
tr := Reader{curr: ®FileReader{r, int64(r.Len())}}
|
||||
got, err := tr.readGNUSparsePAXHeaders(&hdr)
|
||||
if !slices.Equal(got, v.wantMap) {
|
||||
if !equalSparseEntries(got, v.wantMap) {
|
||||
t.Errorf("test %d, readGNUSparsePAXHeaders(): got %v, want %v", i, got, v.wantMap)
|
||||
}
|
||||
if err != v.wantErr {
|
||||
|
||||
@@ -19,34 +19,34 @@ func init() {
|
||||
sysStat = statUnix
|
||||
}
|
||||
|
||||
// userMap and groupMap cache UID and GID lookups for performance reasons.
|
||||
// userMap and groupMap caches UID and GID lookups for performance reasons.
|
||||
// The downside is that renaming uname or gname by the OS never takes effect.
|
||||
var userMap, groupMap sync.Map // map[int]string
|
||||
|
||||
func statUnix(fi fs.FileInfo, h *Header, doNameLookups bool) error {
|
||||
func statUnix(fi fs.FileInfo, h *Header) error {
|
||||
sys, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
h.Uid = int(sys.Uid)
|
||||
h.Gid = int(sys.Gid)
|
||||
if doNameLookups {
|
||||
// Best effort at populating Uname and Gname.
|
||||
// The os/user functions may fail for any number of reasons
|
||||
// (not implemented on that platform, cgo not enabled, etc).
|
||||
if u, ok := userMap.Load(h.Uid); ok {
|
||||
h.Uname = u.(string)
|
||||
} else if u, err := user.LookupId(strconv.Itoa(h.Uid)); err == nil {
|
||||
h.Uname = u.Username
|
||||
userMap.Store(h.Uid, h.Uname)
|
||||
}
|
||||
if g, ok := groupMap.Load(h.Gid); ok {
|
||||
h.Gname = g.(string)
|
||||
} else if g, err := user.LookupGroupId(strconv.Itoa(h.Gid)); err == nil {
|
||||
h.Gname = g.Name
|
||||
groupMap.Store(h.Gid, h.Gname)
|
||||
}
|
||||
|
||||
// Best effort at populating Uname and Gname.
|
||||
// The os/user functions may fail for any number of reasons
|
||||
// (not implemented on that platform, cgo not enabled, etc).
|
||||
if u, ok := userMap.Load(h.Uid); ok {
|
||||
h.Uname = u.(string)
|
||||
} else if u, err := user.LookupId(strconv.Itoa(h.Uid)); err == nil {
|
||||
h.Uname = u.Username
|
||||
userMap.Store(h.Uid, h.Uname)
|
||||
}
|
||||
if g, ok := groupMap.Load(h.Gid); ok {
|
||||
h.Gname = g.(string)
|
||||
} else if g, err := user.LookupGroupId(strconv.Itoa(h.Gid)); err == nil {
|
||||
h.Gname = g.Name
|
||||
groupMap.Store(h.Gid, h.Gname)
|
||||
}
|
||||
|
||||
h.AccessTime = statAtime(sys)
|
||||
h.ChangeTime = statCtime(sys)
|
||||
|
||||
|
||||
@@ -213,17 +213,15 @@ func parsePAXTime(s string) (time.Time, error) {
|
||||
}
|
||||
|
||||
// Parse the nanoseconds.
|
||||
// Initialize an array with '0's to handle right padding automatically.
|
||||
nanoDigits := [maxNanoSecondDigits]byte{'0', '0', '0', '0', '0', '0', '0', '0', '0'}
|
||||
for i := range len(sn) {
|
||||
switch c := sn[i]; {
|
||||
case c < '0' || c > '9':
|
||||
return time.Time{}, ErrHeader
|
||||
case i < len(nanoDigits):
|
||||
nanoDigits[i] = c
|
||||
}
|
||||
if strings.Trim(sn, "0123456789") != "" {
|
||||
return time.Time{}, ErrHeader
|
||||
}
|
||||
nsecs, _ := strconv.ParseInt(string(nanoDigits[:]), 10, 64) // Must succeed after validation
|
||||
if len(sn) < maxNanoSecondDigits {
|
||||
sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad
|
||||
} else {
|
||||
sn = sn[:maxNanoSecondDigits] // Right truncate
|
||||
}
|
||||
nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed
|
||||
if len(ss) > 0 && ss[0] == '-' {
|
||||
return time.Unix(secs, -1*nsecs), nil // Negative correction
|
||||
}
|
||||
@@ -312,7 +310,7 @@ func formatPAXRecord(k, v string) (string, error) {
|
||||
// "%d %s=%s\n" % (size, key, value)
|
||||
//
|
||||
// Keys and values should be UTF-8, but the number of bad writers out there
|
||||
// forces us to be more liberal.
|
||||
// forces us to be a more liberal.
|
||||
// Thus, we only reject all keys with NUL, and only reject NULs in values
|
||||
// for the PAX version of the USTAR string fields.
|
||||
// The key must not contain an '=' character.
|
||||
|
||||
@@ -439,66 +439,3 @@ func TestFormatPAXRecord(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkParsePAXTime(b *testing.B) {
|
||||
tests := []struct {
|
||||
name string
|
||||
in string
|
||||
want time.Time
|
||||
ok bool
|
||||
}{
|
||||
{
|
||||
name: "NoNanos",
|
||||
in: "123456",
|
||||
want: time.Unix(123456, 0),
|
||||
ok: true,
|
||||
},
|
||||
{
|
||||
name: "ExactNanos",
|
||||
in: "1.123456789",
|
||||
want: time.Unix(1, 123456789),
|
||||
ok: true,
|
||||
},
|
||||
{
|
||||
name: "WithNanoPadding",
|
||||
in: "1.123",
|
||||
want: time.Unix(1, 123000000),
|
||||
ok: true,
|
||||
},
|
||||
{
|
||||
name: "WithNanoTruncate",
|
||||
in: "1.123456789123",
|
||||
want: time.Unix(1, 123456789),
|
||||
ok: true,
|
||||
},
|
||||
{
|
||||
name: "TrailingError",
|
||||
in: "1.123abc",
|
||||
want: time.Time{},
|
||||
ok: false,
|
||||
},
|
||||
{
|
||||
name: "LeadingError",
|
||||
in: "1.abc123",
|
||||
want: time.Time{},
|
||||
ok: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
b.Run(tt.name, func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for b.Loop() {
|
||||
ts, err := parsePAXTime(tt.in)
|
||||
if (err == nil) != tt.ok {
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.Fatal("expected error")
|
||||
}
|
||||
if !ts.Equal(tt.want) {
|
||||
b.Fatalf("time mismatch: got %v, want %v", ts, tt.want)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,13 +11,11 @@ import (
|
||||
"internal/testenv"
|
||||
"io"
|
||||
"io/fs"
|
||||
"maps"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -100,6 +98,10 @@ func (f *testFile) Seek(pos int64, whence int) (int64, error) {
|
||||
return f.pos, nil
|
||||
}
|
||||
|
||||
func equalSparseEntries(x, y []sparseEntry) bool {
|
||||
return (len(x) == 0 && len(y) == 0) || reflect.DeepEqual(x, y)
|
||||
}
|
||||
|
||||
func TestSparseEntries(t *testing.T) {
|
||||
vectors := []struct {
|
||||
in []sparseEntry
|
||||
@@ -196,11 +198,11 @@ func TestSparseEntries(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
gotAligned := alignSparseEntries(append([]sparseEntry{}, v.in...), v.size)
|
||||
if !slices.Equal(gotAligned, v.wantAligned) {
|
||||
if !equalSparseEntries(gotAligned, v.wantAligned) {
|
||||
t.Errorf("test %d, alignSparseEntries():\ngot %v\nwant %v", i, gotAligned, v.wantAligned)
|
||||
}
|
||||
gotInverted := invertSparseEntries(append([]sparseEntry{}, v.in...), v.size)
|
||||
if !slices.Equal(gotInverted, v.wantInverted) {
|
||||
if !equalSparseEntries(gotInverted, v.wantInverted) {
|
||||
t.Errorf("test %d, inverseSparseEntries():\ngot %v\nwant %v", i, gotInverted, v.wantInverted)
|
||||
}
|
||||
}
|
||||
@@ -742,7 +744,7 @@ func TestHeaderAllowedFormats(t *testing.T) {
|
||||
if formats != v.formats {
|
||||
t.Errorf("test %d, allowedFormats(): got %v, want %v", i, formats, v.formats)
|
||||
}
|
||||
if formats&FormatPAX > 0 && !maps.Equal(paxHdrs, v.paxHdrs) && !(len(paxHdrs) == 0 && len(v.paxHdrs) == 0) {
|
||||
if formats&FormatPAX > 0 && !reflect.DeepEqual(paxHdrs, v.paxHdrs) && !(len(paxHdrs) == 0 && len(v.paxHdrs) == 0) {
|
||||
t.Errorf("test %d, allowedFormats():\ngot %v\nwant %s", i, paxHdrs, v.paxHdrs)
|
||||
}
|
||||
if (formats != FormatUnknown) && (err != nil) {
|
||||
@@ -846,53 +848,3 @@ func Benchmark(b *testing.B) {
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
var _ fileInfoNames = fileInfoNames{}
|
||||
|
||||
type fileInfoNames struct{}
|
||||
|
||||
func (f *fileInfoNames) Name() string {
|
||||
return "tmp"
|
||||
}
|
||||
|
||||
func (f *fileInfoNames) Size() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (f *fileInfoNames) Mode() fs.FileMode {
|
||||
return 0777
|
||||
}
|
||||
|
||||
func (f *fileInfoNames) ModTime() time.Time {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
func (f *fileInfoNames) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (f *fileInfoNames) Sys() any {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fileInfoNames) Uname() (string, error) {
|
||||
return "Uname", nil
|
||||
}
|
||||
|
||||
func (f *fileInfoNames) Gname() (string, error) {
|
||||
return "Gname", nil
|
||||
}
|
||||
|
||||
func TestFileInfoHeaderUseFileInfoNames(t *testing.T) {
|
||||
info := &fileInfoNames{}
|
||||
header, err := FileInfoHeader(info, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if header.Uname != "Uname" {
|
||||
t.Fatalf("header.Uname: got %s, want %s", header.Uname, "Uname")
|
||||
}
|
||||
if header.Gname != "Gname" {
|
||||
t.Fatalf("header.Gname: got %s, want %s", header.Gname, "Gname")
|
||||
}
|
||||
}
|
||||
|
||||
BIN
src/archive/tar/testdata/gnu-sparse-big.tar
vendored
Normal file
BIN
src/archive/tar/testdata/gnu-sparse-big.tar
vendored
Normal file
Binary file not shown.
@@ -1,90 +0,0 @@
|
||||
Z251LXNwYXJzZQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAwMDAwMDAAMDAwMDAw
|
||||
MAAwMDAwMDAwADAwMDAwMDA2MDAwADAwMDAwMDAwMDAwADAyMjIyNwAgUwAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB1c3RhciAgAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwMDAwMDAwADAwMDAw
|
||||
MDAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAlQL4gAw
|
||||
MDAwMDAwMTAwMACAAAAAAAAABKgXxgAwMDAwMDAwMTAwMACAAAAAAAAABvwjqgAwMDAwMDAwMTAw
|
||||
MACAAAAAAAAACVAvjgAwMDAwMDAwMTAwMAABgAAAAAAAAA34R1gAAAAAAAAAAAAAAAAAAAAAAACA
|
||||
AAAAAAAAC6Q7cgAwMDAwMDAwMTAwMACAAAAAAAAADfhHVgAwMDAwMDAwMTAwMAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1
|
||||
Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5AAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAADAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2
|
||||
Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAMDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3
|
||||
ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OQAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4
|
||||
OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5AAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAADAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5
|
||||
MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAMDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkw
|
||||
MTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OQAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||
Binary file not shown.
BIN
src/archive/tar/testdata/neg-size.tar
vendored
Normal file
BIN
src/archive/tar/testdata/neg-size.tar
vendored
Normal file
Binary file not shown.
9
src/archive/tar/testdata/neg-size.tar.base64
vendored
9
src/archive/tar/testdata/neg-size.tar.base64
vendored
@@ -1,9 +0,0 @@
|
||||
EwMwMBMDLTgyMTk1MDI5NnQTE4NzfINzEzAwcXfhZrsDMDAwAAAAEDAxMRNz9DEwMTAwdBMTg3N8
|
||||
g3NzADATc3yDc3P0eFMTc/QxMDEwMHQTE4NzfINzc/QwE3N8g3Nz9HFTMNR0MBMwMHEw9DAAAAAQ
|
||||
MDGAABAwMTETc/QxMDH0MHQTMDBx1OFmuwMAAAD/gICAADExNTYyMQAgMBP///+AMTAwdHhTMDB0
|
||||
MBMwMHF3MDEwMTAwdBMTg3N8g3Nz9HhTMDB0MBMwMHF34Wa7AzAwMAAAABAwMTETc/QxMDEwMHQT
|
||||
E4NzfINzc/QwE3N8g3Nz9HhTE3P0MTAxMDB0ExODc3yDc3MwMBNzfINzczB4UzAwdDATMDBxMDAA
|
||||
gAAAEDAxc/QxMDEwMHQTAAAAIOFmuwMwNAAAABAwMTET////gDEwMHR4UzAwdDATMDBxd+FmuwMw
|
||||
MDAAAAAQMDExE3ODc3P0eFMTc/QxMDEwMHQTE4NzfINzc/QzMTEwMzM2MjQ4NDYxMjgzODBzfINz
|
||||
c/R4UzAwdDATMDBxMDAwAAAAEDAxAAAQMDExE3P0MTAxMDB0EzAwcdThZrsDMDQAAAAQg3N8g3Nz
|
||||
9DATc3yDc3P0eFMwMHQwEzAwcTAwMAAAABAwMQAAEDAxMRNz9DEwMTAwdBMwMHgw4Wa7AwAAEDA=
|
||||
BIN
src/archive/tar/testdata/pax-sparse-big.tar
vendored
Normal file
BIN
src/archive/tar/testdata/pax-sparse-big.tar
vendored
Normal file
Binary file not shown.
108
src/archive/tar/testdata/pax-sparse-big.tar.base64
vendored
108
src/archive/tar/testdata/pax-sparse-big.tar.base64
vendored
@@ -1,108 +0,0 @@
|
||||
UGF4SGVhZGVycy4wL3BheC1zcGFyc2UAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAwMDAwMDAAMDAwMDAw
|
||||
MAAwMDAwMDAwADAwMDAwMDAwMTcyADAwMDAwMDAwMDAwADAxMjIyNwAgeAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB1c3RhcgAwMAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAy
|
||||
MiBHTlUuc3BhcnNlLm1ham9yPTEKMjIgR05VLnNwYXJzZS5taW5vcj0wCjMwIEdOVS5zcGFyc2Uu
|
||||
bmFtZT1wYXgtc3BhcnNlCjM1IEdOVS5zcGFyc2UucmVhbHNpemU9NjAwMDAwMDAwMDAKMTMgc2l6
|
||||
ZT0zNTg0CgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEdO
|
||||
VVNwYXJzZUZpbGUuMC9wYXgtc3BhcnNlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwMDAwMDAwADAwMDAwMDAA
|
||||
MDAwMDAwMAAwMDAwMDAwNzAwMAAwMDAwMDAwMDAwMAAwMTM3MzcAIDAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAdXN0YXIAMDAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMDAwMDAwMAAwMDAwMDAw
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAANgo5
|
||||
OTk5OTk5NDg4CjUxMgoxOTk5OTk5OTQ4OAo1MTIKMjk5OTk5OTk0ODgKNTEyCjM5OTk5OTk5NDg4
|
||||
CjUxMgo0OTk5OTk5OTQ4OAo1MTIKNTk5OTk5OTk0ODgKNTEyCgAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAMDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3
|
||||
ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OQAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4
|
||||
OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5AAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAADAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5
|
||||
MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAMDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkw
|
||||
MTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OQAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAx
|
||||
MjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5AAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAADAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEy
|
||||
MzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
BIN
src/archive/tar/testdata/writer-big-long.tar
vendored
Normal file
BIN
src/archive/tar/testdata/writer-big-long.tar
vendored
Normal file
Binary file not shown.
@@ -1,27 +0,0 @@
|
||||
bG9uZ25hbWUvbG9uZ25hbWUvbG9uZ25hbWUvbG9uZ25hbWUvbG9uZ25hbWUvbG9uZ25hbWUvbG9u
|
||||
Z25hbWUvbG9uZ25hbWUvbG9uZ25hbWUvbG9uZ25hbWUvbG9uZ25hbWUvbDAwMDAwMDAAMDAwMDAw
|
||||
MAAwMDAwMDAwADAwMDAwMDAwMjU2ADAwMDAwMDAwMDAwADAzMTQyMAAgeAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB1c3RhcgAwMAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAx
|
||||
NTQgcGF0aD1sb25nbmFtZS9sb25nbmFtZS9sb25nbmFtZS9sb25nbmFtZS9sb25nbmFtZS9sb25n
|
||||
bmFtZS9sb25nbmFtZS9sb25nbmFtZS9sb25nbmFtZS9sb25nbmFtZS9sb25nbmFtZS9sb25nbmFt
|
||||
ZS9sb25nbmFtZS9sb25nbmFtZS9sb25nbmFtZS8xNmdpZy50eHQKMjAgc2l6ZT0xNzE3OTg2OTE4
|
||||
NAoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGxv
|
||||
bmduYW1lL2xvbmduYW1lL2xvbmduYW1lL2xvbmduYW1lL2xvbmduYW1lL2xvbmduYW1lL2xvbmdu
|
||||
YW1lL2xvbmduYW1lL2xvbmduYW1lL2xvbmduYW1lL2xvbmduYW1lL2wwMDAwNjQ0ADAwMDE3NTAA
|
||||
MDAwMTc1MAAwMDAwMDAwMDAwMAAxMjMzMjc3MDUwNwAwMzY0NjIAIDAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAdXN0YXIAMDBndWlsbGF1bWUAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAGd1aWxsYXVtZQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMDAwMDAwMAAwMDAwMDAw
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
@@ -9,9 +9,8 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"maps"
|
||||
"path"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
@@ -170,10 +169,16 @@ func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error {
|
||||
// Write PAX records to the output.
|
||||
isGlobal := hdr.Typeflag == TypeXGlobalHeader
|
||||
if len(paxHdrs) > 0 || isGlobal {
|
||||
// Sort keys for deterministic ordering.
|
||||
var keys []string
|
||||
for k := range paxHdrs {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
// Write each record to a buffer.
|
||||
var buf strings.Builder
|
||||
// Sort keys for deterministic ordering.
|
||||
for _, k := range slices.Sorted(maps.Keys(paxHdrs)) {
|
||||
for _, k := range keys {
|
||||
rec, err := formatPAXRecord(k, paxHdrs[k])
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -408,37 +413,25 @@ func (tw *Writer) AddFS(fsys fs.FS) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if name == "." {
|
||||
if d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
info, err := d.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
linkTarget := ""
|
||||
if typ := d.Type(); typ == fs.ModeSymlink {
|
||||
var err error
|
||||
linkTarget, err = fs.ReadLink(fsys, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if !typ.IsRegular() && typ != fs.ModeDir {
|
||||
// TODO(#49580): Handle symlinks when fs.ReadLinkFS is available.
|
||||
if !info.Mode().IsRegular() {
|
||||
return errors.New("tar: cannot add non-regular file")
|
||||
}
|
||||
h, err := FileInfoHeader(info, linkTarget)
|
||||
h, err := FileInfoHeader(info, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h.Name = name
|
||||
if d.IsDir() {
|
||||
h.Name += "/"
|
||||
}
|
||||
if err := tw.WriteHeader(h); err != nil {
|
||||
return err
|
||||
}
|
||||
if !d.Type().IsRegular() {
|
||||
return nil
|
||||
}
|
||||
f, err := fsys.Open(name)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -675,7 +668,6 @@ func (sw *sparseFileWriter) ReadFrom(r io.Reader) (n int64, err error) {
|
||||
func (sw sparseFileWriter) logicalRemaining() int64 {
|
||||
return sw.sp[len(sw.sp)-1].endOffset() - sw.pos
|
||||
}
|
||||
|
||||
func (sw sparseFileWriter) physicalRemaining() int64 {
|
||||
return sw.fw.physicalRemaining()
|
||||
}
|
||||
|
||||
@@ -8,13 +8,11 @@ import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"internal/obscuretestdata"
|
||||
"io"
|
||||
"io/fs"
|
||||
"maps"
|
||||
"os"
|
||||
"path"
|
||||
"slices"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -75,9 +73,8 @@ func TestWriter(t *testing.T) {
|
||||
)
|
||||
|
||||
vectors := []struct {
|
||||
file string // Optional filename of expected output
|
||||
obscured bool // Whether file is obscured
|
||||
tests []testFnc
|
||||
file string // Optional filename of expected output
|
||||
tests []testFnc
|
||||
}{{
|
||||
// The writer test file was produced with this command:
|
||||
// tar (GNU tar) 1.26
|
||||
@@ -153,8 +150,7 @@ func TestWriter(t *testing.T) {
|
||||
// bsdtar -xvf writer-big-long.tar
|
||||
//
|
||||
// This file is in PAX format.
|
||||
file: "testdata/writer-big-long.tar.base64",
|
||||
obscured: true,
|
||||
file: "testdata/writer-big-long.tar",
|
||||
tests: []testFnc{
|
||||
testHeader{Header{
|
||||
Typeflag: TypeReg,
|
||||
@@ -396,8 +392,7 @@ func TestWriter(t *testing.T) {
|
||||
testClose{},
|
||||
},
|
||||
}, {
|
||||
file: "testdata/gnu-sparse-big.tar.base64",
|
||||
obscured: true,
|
||||
file: "testdata/gnu-sparse-big.tar",
|
||||
tests: []testFnc{
|
||||
testHeader{Header{
|
||||
Typeflag: TypeGNUSparse,
|
||||
@@ -429,8 +424,7 @@ func TestWriter(t *testing.T) {
|
||||
testClose{nil},
|
||||
},
|
||||
}, {
|
||||
file: "testdata/pax-sparse-big.tar.base64",
|
||||
obscured: true,
|
||||
file: "testdata/pax-sparse-big.tar",
|
||||
tests: []testFnc{
|
||||
testHeader{Header{
|
||||
Typeflag: TypeReg,
|
||||
@@ -488,7 +482,7 @@ func TestWriter(t *testing.T) {
|
||||
return x == y
|
||||
}
|
||||
for _, v := range vectors {
|
||||
t.Run(strings.TrimSuffix(path.Base(v.file), ".base64"), func(t *testing.T) {
|
||||
t.Run(path.Base(v.file), func(t *testing.T) {
|
||||
const maxSize = 10 << 10 // 10KiB
|
||||
buf := new(bytes.Buffer)
|
||||
tw := NewWriter(iotest.TruncateWriter(buf, maxSize))
|
||||
@@ -527,16 +521,7 @@ func TestWriter(t *testing.T) {
|
||||
}
|
||||
|
||||
if v.file != "" {
|
||||
path := v.file
|
||||
if v.obscured {
|
||||
tf, err := obscuretestdata.DecodeToTempFile(path)
|
||||
if err != nil {
|
||||
t.Fatalf("obscuretestdata.DecodeToTempFile(%s): %v", path, err)
|
||||
}
|
||||
path = tf
|
||||
}
|
||||
|
||||
want, err := os.ReadFile(path)
|
||||
want, err := os.ReadFile(v.file)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadFile() = %v, want nil", err)
|
||||
}
|
||||
@@ -596,10 +581,10 @@ func TestPaxSymlink(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
hdr.Typeflag = TypeSymlink
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat:1 %v", err)
|
||||
}
|
||||
hdr.Typeflag = TypeSymlink
|
||||
// Force a PAX long linkname to be written
|
||||
longLinkname := strings.Repeat("1234567890/1234567890", 10)
|
||||
hdr.Linkname = longLinkname
|
||||
@@ -717,7 +702,7 @@ func TestPaxXattrs(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !maps.Equal(hdr.Xattrs, xattrs) {
|
||||
if !reflect.DeepEqual(hdr.Xattrs, xattrs) {
|
||||
t.Fatalf("xattrs did not survive round trip: got %+v, want %+v",
|
||||
hdr.Xattrs, xattrs)
|
||||
}
|
||||
@@ -764,7 +749,7 @@ func TestPaxHeadersSorted(t *testing.T) {
|
||||
bytes.Index(buf.Bytes(), []byte("foo=foo")),
|
||||
bytes.Index(buf.Bytes(), []byte("qux=qux")),
|
||||
}
|
||||
if !slices.IsSorted(indices) {
|
||||
if !sort.IntsAreSorted(indices) {
|
||||
t.Fatal("PAX headers are not sorted")
|
||||
}
|
||||
}
|
||||
@@ -776,10 +761,10 @@ func TestUSTARLongName(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
hdr.Typeflag = TypeDir
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat:1 %v", err)
|
||||
}
|
||||
hdr.Typeflag = TypeDir
|
||||
// Force a PAX long name to be written. The name was taken from a practical example
|
||||
// that fails and replaced ever char through numbers to anonymize the sample.
|
||||
longName := "/0000_0000000/00000-000000000/0000_0000000/00000-0000000000000/0000_0000000/00000-0000000-00000000/0000_0000000/00000000/0000_0000000/000/0000_0000000/00000000v00/0000_0000000/000000/0000_0000000/0000000/0000_0000000/00000y-00/0000/0000/00000000/0x000000/"
|
||||
@@ -1353,41 +1338,29 @@ func TestFileWriter(t *testing.T) {
|
||||
|
||||
func TestWriterAddFS(t *testing.T) {
|
||||
fsys := fstest.MapFS{
|
||||
"emptyfolder": {Mode: 0o755 | os.ModeDir},
|
||||
"file.go": {Data: []byte("hello")},
|
||||
"subfolder/another.go": {Data: []byte("world")},
|
||||
"symlink.go": {Mode: 0o777 | os.ModeSymlink, Data: []byte("file.go")},
|
||||
// Notably missing here is the "subfolder" directory. This makes sure even
|
||||
// if we don't have a subfolder directory listed.
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
tw := NewWriter(&buf)
|
||||
if err := tw.AddFS(fsys); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Add subfolder into fsys to match what we'll read from the tar.
|
||||
fsys["subfolder"] = &fstest.MapFile{Mode: 0o555 | os.ModeDir}
|
||||
|
||||
// Test that we can get the files back from the archive
|
||||
tr := NewReader(&buf)
|
||||
|
||||
names := make([]string, 0, len(fsys))
|
||||
for name := range fsys {
|
||||
names = append(names, name)
|
||||
entries, err := fsys.ReadDir(".")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sort.Strings(names)
|
||||
|
||||
entriesLeft := len(fsys)
|
||||
for _, name := range names {
|
||||
entriesLeft--
|
||||
|
||||
entryInfo, err := fsys.Lstat(name)
|
||||
if err != nil {
|
||||
t.Fatalf("getting entry info error: %v", err)
|
||||
var curfname string
|
||||
for _, entry := range entries {
|
||||
curfname = entry.Name()
|
||||
if entry.IsDir() {
|
||||
curfname += "/"
|
||||
continue
|
||||
}
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
@@ -1397,42 +1370,22 @@ func TestWriterAddFS(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tmpName := name
|
||||
if entryInfo.IsDir() {
|
||||
tmpName += "/"
|
||||
}
|
||||
if hdr.Name != tmpName {
|
||||
t.Errorf("test fs has filename %v; archive header has %v",
|
||||
name, hdr.Name)
|
||||
data, err := io.ReadAll(tr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if entryInfo.Mode() != hdr.FileInfo().Mode() {
|
||||
t.Errorf("%s: test fs has mode %v; archive header has %v",
|
||||
name, entryInfo.Mode(), hdr.FileInfo().Mode())
|
||||
if hdr.Name != curfname {
|
||||
t.Fatalf("got filename %v, want %v",
|
||||
curfname, hdr.Name)
|
||||
}
|
||||
|
||||
switch entryInfo.Mode().Type() {
|
||||
case fs.ModeDir:
|
||||
// No additional checks necessary.
|
||||
case fs.ModeSymlink:
|
||||
origtarget := string(fsys[name].Data)
|
||||
if hdr.Linkname != origtarget {
|
||||
t.Fatalf("test fs has link content %s; archive header %v", origtarget, hdr.Linkname)
|
||||
}
|
||||
default:
|
||||
data, err := io.ReadAll(tr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
origdata := fsys[name].Data
|
||||
if string(data) != string(origdata) {
|
||||
t.Fatalf("test fs has file content %v; archive header has %v", origdata, data)
|
||||
}
|
||||
origdata := fsys[curfname].Data
|
||||
if string(data) != string(origdata) {
|
||||
t.Fatalf("got file content %v, want %v",
|
||||
data, origdata)
|
||||
}
|
||||
}
|
||||
if entriesLeft > 0 {
|
||||
t.Fatalf("not all entries are in the archive")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriterAddFSNonRegularFiles(t *testing.T) {
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"bufio"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"internal/godebug"
|
||||
@@ -17,7 +16,7 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -805,9 +804,6 @@ func toValidName(name string) string {
|
||||
|
||||
func (r *Reader) initFileList() {
|
||||
r.fileListOnce.Do(func() {
|
||||
// Preallocate the minimum size of the index.
|
||||
// We may also synthesize additional directory entries.
|
||||
r.fileList = make([]fileListEntry, 0, len(r.File))
|
||||
// files and knownDirs map from a file/directory name
|
||||
// to an index into the r.fileList entry that we are
|
||||
// building. They are used to mark duplicate entries.
|
||||
@@ -834,16 +830,7 @@ func (r *Reader) initFileList() {
|
||||
continue
|
||||
}
|
||||
|
||||
dir := name
|
||||
for {
|
||||
if idx := strings.LastIndex(dir, "/"); idx < 0 {
|
||||
break
|
||||
} else {
|
||||
dir = dir[:idx]
|
||||
}
|
||||
if dirs[dir] {
|
||||
break
|
||||
}
|
||||
for dir := path.Dir(name); dir != "."; dir = path.Dir(dir) {
|
||||
dirs[dir] = true
|
||||
}
|
||||
|
||||
@@ -875,19 +862,14 @@ func (r *Reader) initFileList() {
|
||||
}
|
||||
}
|
||||
|
||||
slices.SortFunc(r.fileList, func(a, b fileListEntry) int {
|
||||
return fileEntryCompare(a.name, b.name)
|
||||
})
|
||||
sort.Slice(r.fileList, func(i, j int) bool { return fileEntryLess(r.fileList[i].name, r.fileList[j].name) })
|
||||
})
|
||||
}
|
||||
|
||||
func fileEntryCompare(x, y string) int {
|
||||
func fileEntryLess(x, y string) bool {
|
||||
xdir, xelem, _ := split(x)
|
||||
ydir, yelem, _ := split(y)
|
||||
if xdir != ydir {
|
||||
return strings.Compare(xdir, ydir)
|
||||
}
|
||||
return strings.Compare(xelem, yelem)
|
||||
return xdir < ydir || xdir == ydir && xelem < yelem
|
||||
}
|
||||
|
||||
// Open opens the named file in the ZIP archive,
|
||||
@@ -915,8 +897,14 @@ func (r *Reader) Open(name string) (fs.File, error) {
|
||||
}
|
||||
|
||||
func split(name string) (dir, elem string, isDir bool) {
|
||||
name, isDir = strings.CutSuffix(name, "/")
|
||||
i := strings.LastIndexByte(name, '/')
|
||||
if len(name) > 0 && name[len(name)-1] == '/' {
|
||||
isDir = true
|
||||
name = name[:len(name)-1]
|
||||
}
|
||||
i := len(name) - 1
|
||||
for i >= 0 && name[i] != '/' {
|
||||
i--
|
||||
}
|
||||
if i < 0 {
|
||||
return ".", name, isDir
|
||||
}
|
||||
@@ -932,12 +920,9 @@ func (r *Reader) openLookup(name string) *fileListEntry {
|
||||
|
||||
dir, elem, _ := split(name)
|
||||
files := r.fileList
|
||||
i, _ := slices.BinarySearchFunc(files, dir, func(a fileListEntry, dir string) (ret int) {
|
||||
idir, ielem, _ := split(a.name)
|
||||
if dir != idir {
|
||||
return strings.Compare(idir, dir)
|
||||
}
|
||||
return strings.Compare(ielem, elem)
|
||||
i := sort.Search(len(files), func(i int) bool {
|
||||
idir, ielem, _ := split(files[i].name)
|
||||
return idir > dir || idir == dir && ielem >= elem
|
||||
})
|
||||
if i < len(files) {
|
||||
fname := files[i].name
|
||||
@@ -950,21 +935,13 @@ func (r *Reader) openLookup(name string) *fileListEntry {
|
||||
|
||||
func (r *Reader) openReadDir(dir string) []fileListEntry {
|
||||
files := r.fileList
|
||||
i, _ := slices.BinarySearchFunc(files, dir, func(a fileListEntry, dir string) int {
|
||||
idir, _, _ := split(a.name)
|
||||
if dir != idir {
|
||||
return strings.Compare(idir, dir)
|
||||
}
|
||||
// find the first entry with dir
|
||||
return +1
|
||||
i := sort.Search(len(files), func(i int) bool {
|
||||
idir, _, _ := split(files[i].name)
|
||||
return idir >= dir
|
||||
})
|
||||
j, _ := slices.BinarySearchFunc(files, dir, func(a fileListEntry, dir string) int {
|
||||
jdir, _, _ := split(a.name)
|
||||
if dir != jdir {
|
||||
return strings.Compare(jdir, dir)
|
||||
}
|
||||
// find the last entry with dir
|
||||
return -1
|
||||
j := sort.Search(len(files), func(j int) bool {
|
||||
jdir, _, _ := split(files[j].name)
|
||||
return jdir > dir
|
||||
})
|
||||
return files[i:j]
|
||||
}
|
||||
@@ -998,12 +975,6 @@ func (d *openDir) ReadDir(count int) ([]fs.DirEntry, error) {
|
||||
s, err := d.files[d.offset+i].stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if s.Name() == "." || !fs.ValidPath(s.Name()) {
|
||||
return nil, &fs.PathError{
|
||||
Op: "readdir",
|
||||
Path: d.e.name,
|
||||
Err: fmt.Errorf("invalid file name: %v", d.files[d.offset+i].name),
|
||||
}
|
||||
}
|
||||
list[i] = s
|
||||
}
|
||||
|
||||
@@ -8,15 +8,13 @@ import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"internal/obscuretestdata"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
"testing/fstest"
|
||||
@@ -914,7 +912,9 @@ func returnRecursiveZip() (r io.ReaderAt, size int64) {
|
||||
// type zeros struct{}
|
||||
//
|
||||
// func (zeros) Read(b []byte) (int, error) {
|
||||
// clear(b)
|
||||
// for i := range b {
|
||||
// b[i] = 0
|
||||
// }
|
||||
// return len(b), nil
|
||||
// }
|
||||
//
|
||||
@@ -1214,6 +1214,7 @@ func TestFS(t *testing.T) {
|
||||
[]string{"a/b/c"},
|
||||
},
|
||||
} {
|
||||
test := test
|
||||
t.Run(test.file, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
z, err := OpenReader(test.file)
|
||||
@@ -1247,6 +1248,7 @@ func TestFSWalk(t *testing.T) {
|
||||
wantErr: true,
|
||||
},
|
||||
} {
|
||||
test := test
|
||||
t.Run(test.file, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
z, err := OpenReader(test.file)
|
||||
@@ -1274,56 +1276,13 @@ func TestFSWalk(t *testing.T) {
|
||||
} else if !test.wantErr && sawErr {
|
||||
t.Error("unexpected error")
|
||||
}
|
||||
if test.want != nil && !slices.Equal(files, test.want) {
|
||||
if test.want != nil && !reflect.DeepEqual(files, test.want) {
|
||||
t.Errorf("got %v want %v", files, test.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFSWalkBadFile(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var buf bytes.Buffer
|
||||
zw := NewWriter(&buf)
|
||||
hdr := &FileHeader{Name: "."}
|
||||
hdr.SetMode(fs.ModeDir | 0o755)
|
||||
w, err := zw.CreateHeader(hdr)
|
||||
if err != nil {
|
||||
t.Fatalf("create zip header: %v", err)
|
||||
}
|
||||
_, err = w.Write([]byte("some data"))
|
||||
if err != nil {
|
||||
t.Fatalf("write zip contents: %v", err)
|
||||
|
||||
}
|
||||
err = zw.Close()
|
||||
if err != nil {
|
||||
t.Fatalf("close zip writer: %v", err)
|
||||
|
||||
}
|
||||
|
||||
zr, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
|
||||
if err != nil {
|
||||
t.Fatalf("create zip reader: %v", err)
|
||||
|
||||
}
|
||||
var count int
|
||||
var errRepeat = errors.New("repeated call to path")
|
||||
err = fs.WalkDir(zr, ".", func(p string, d fs.DirEntry, err error) error {
|
||||
count++
|
||||
if count > 2 { // once for directory read, once for the error
|
||||
return errRepeat
|
||||
}
|
||||
return err
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("expected error from invalid file name")
|
||||
} else if errors.Is(err, errRepeat) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFSModTime(t *testing.T) {
|
||||
t.Parallel()
|
||||
z, err := OpenReader("testdata/subdir.zip")
|
||||
@@ -1623,7 +1582,7 @@ func TestCVE202141772(t *testing.T) {
|
||||
t.Errorf("Opening %q with fs.FS API succeeded", f.Name)
|
||||
}
|
||||
}
|
||||
if !slices.Equal(names, entryNames) {
|
||||
if !reflect.DeepEqual(names, entryNames) {
|
||||
t.Errorf("Unexpected file entries: %q", names)
|
||||
}
|
||||
if _, err := r.Open(""); err == nil {
|
||||
@@ -1736,7 +1695,7 @@ func TestInsecurePaths(t *testing.T) {
|
||||
for _, f := range zr.File {
|
||||
gotPaths = append(gotPaths, f.Name)
|
||||
}
|
||||
if !slices.Equal(gotPaths, []string{path}) {
|
||||
if !reflect.DeepEqual(gotPaths, []string{path}) {
|
||||
t.Errorf("NewReader for archive with file %q: got files %q", path, gotPaths)
|
||||
continue
|
||||
}
|
||||
@@ -1761,7 +1720,7 @@ func TestDisableInsecurePathCheck(t *testing.T) {
|
||||
for _, f := range zr.File {
|
||||
gotPaths = append(gotPaths, f.Name)
|
||||
}
|
||||
if want := []string{name}; !slices.Equal(gotPaths, want) {
|
||||
if want := []string{name}; !reflect.DeepEqual(gotPaths, want) {
|
||||
t.Errorf("NewReader with zipinsecurepath=1: got files %q, want %q", gotPaths, want)
|
||||
}
|
||||
}
|
||||
@@ -1875,83 +1834,3 @@ func TestBaseOffsetPlusOverflow(t *testing.T) {
|
||||
// as the section reader offset & size were < 0.
|
||||
NewReader(bytes.NewReader(data), int64(len(data))+1875)
|
||||
}
|
||||
|
||||
func BenchmarkReaderOneDeepDir(b *testing.B) {
|
||||
var buf bytes.Buffer
|
||||
zw := NewWriter(&buf)
|
||||
|
||||
for i := range 4000 {
|
||||
name := strings.Repeat("a/", i) + "data"
|
||||
zw.CreateHeader(&FileHeader{
|
||||
Name: name,
|
||||
Method: Store,
|
||||
})
|
||||
}
|
||||
|
||||
if err := zw.Close(); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
data := buf.Bytes()
|
||||
|
||||
for b.Loop() {
|
||||
zr, err := NewReader(bytes.NewReader(data), int64(len(data)))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
zr.Open("does-not-exist")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReaderManyDeepDirs(b *testing.B) {
|
||||
var buf bytes.Buffer
|
||||
zw := NewWriter(&buf)
|
||||
|
||||
for i := range 2850 {
|
||||
name := fmt.Sprintf("%x", i)
|
||||
name = strings.Repeat("/"+name, i+1)[1:]
|
||||
|
||||
zw.CreateHeader(&FileHeader{
|
||||
Name: name,
|
||||
Method: Store,
|
||||
})
|
||||
}
|
||||
|
||||
if err := zw.Close(); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
data := buf.Bytes()
|
||||
|
||||
for b.Loop() {
|
||||
zr, err := NewReader(bytes.NewReader(data), int64(len(data)))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
zr.Open("does-not-exist")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReaderManyShallowFiles(b *testing.B) {
|
||||
var buf bytes.Buffer
|
||||
zw := NewWriter(&buf)
|
||||
|
||||
for i := range 310000 {
|
||||
name := fmt.Sprintf("%v", i)
|
||||
zw.CreateHeader(&FileHeader{
|
||||
Name: name,
|
||||
Method: Store,
|
||||
})
|
||||
}
|
||||
|
||||
if err := zw.Close(); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
data := buf.Bytes()
|
||||
|
||||
for b.Loop() {
|
||||
zr, err := NewReader(bytes.NewReader(data), int64(len(data)))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
zr.Open("does-not-exist")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -143,9 +143,9 @@ type FileHeader struct {
|
||||
// Deprecated: Use CompressedSize64 instead.
|
||||
CompressedSize uint32
|
||||
|
||||
// UncompressedSize is the uncompressed size of the file in bytes.
|
||||
// UncompressedSize is the compressed size of the file in bytes.
|
||||
// If either the uncompressed or compressed size of the file
|
||||
// does not fit in 32 bits, UncompressedSize is set to ^uint32(0).
|
||||
// does not fit in 32 bits, CompressedSize is set to ^uint32(0).
|
||||
//
|
||||
// Deprecated: Use UncompressedSize64 instead.
|
||||
UncompressedSize uint32
|
||||
|
||||
@@ -213,8 +213,7 @@ func (w *Writer) Close() error {
|
||||
// The name must be a relative path: it must not start with a drive
|
||||
// letter (e.g. C:) or leading slash, and only forward slashes are
|
||||
// allowed. To create a directory instead of a file, add a trailing
|
||||
// slash to the name. Duplicate names will not overwrite previous entries
|
||||
// and are appended to the zip file.
|
||||
// slash to the name.
|
||||
// The file's contents must be written to the [io.Writer] before the next
|
||||
// call to [Writer.Create], [Writer.CreateHeader], or [Writer.Close].
|
||||
func (w *Writer) Create(name string) (io.Writer, error) {
|
||||
@@ -434,10 +433,6 @@ func writeHeader(w io.Writer, h *header) error {
|
||||
// [Writer.CreateHeader], [Writer.CreateRaw], or [Writer.Close].
|
||||
//
|
||||
// In contrast to [Writer.CreateHeader], the bytes passed to Writer are not compressed.
|
||||
//
|
||||
// CreateRaw's argument is stored in w. If the argument is a pointer to the embedded
|
||||
// [FileHeader] in a [File] obtained from a [Reader] created from in-memory data,
|
||||
// then w will refer to all of that memory.
|
||||
func (w *Writer) CreateRaw(fh *FileHeader) (io.Writer, error) {
|
||||
if err := w.prepare(fh); err != nil {
|
||||
return nil, err
|
||||
@@ -476,10 +471,7 @@ func (w *Writer) Copy(f *File) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Copy the FileHeader so w doesn't store a pointer to the data
|
||||
// of f's entire archive. See #65499.
|
||||
fh := f.FileHeader
|
||||
fw, err := w.CreateRaw(&fh)
|
||||
fw, err := w.CreateRaw(&f.FileHeader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -505,14 +497,14 @@ func (w *Writer) AddFS(fsys fs.FS) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if name == "." {
|
||||
if d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
info, err := d.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !d.IsDir() && !info.Mode().IsRegular() {
|
||||
if !info.Mode().IsRegular() {
|
||||
return errors.New("zip: cannot add non-regular file")
|
||||
}
|
||||
h, err := FileInfoHeader(info)
|
||||
@@ -520,17 +512,11 @@ func (w *Writer) AddFS(fsys fs.FS) error {
|
||||
return err
|
||||
}
|
||||
h.Name = name
|
||||
if d.IsDir() {
|
||||
h.Name += "/"
|
||||
}
|
||||
h.Method = Deflate
|
||||
fw, err := w.CreateHeader(h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
f, err := fsys.Open(name)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -615,7 +601,7 @@ func (w *fileWriter) writeDataDescriptor() error {
|
||||
}
|
||||
// Write data descriptor. This is more complicated than one would
|
||||
// think, see e.g. comments in zipfile.c:putextended() and
|
||||
// https://bugs.openjdk.org/browse/JDK-7073588.
|
||||
// http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=7073588.
|
||||
// The approach here is to write 8 byte sizes if needed without
|
||||
// adding a zip64 extra in the local header (too late anyway).
|
||||
var buf []byte
|
||||
|
||||
@@ -108,7 +108,7 @@ func TestWriter(t *testing.T) {
|
||||
|
||||
// TestWriterComment is test for EOCD comment read/write.
|
||||
func TestWriterComment(t *testing.T) {
|
||||
tests := []struct {
|
||||
var tests = []struct {
|
||||
comment string
|
||||
ok bool
|
||||
}{
|
||||
@@ -158,7 +158,7 @@ func TestWriterComment(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestWriterUTF8(t *testing.T) {
|
||||
utf8Tests := []struct {
|
||||
var utf8Tests = []struct {
|
||||
name string
|
||||
comment string
|
||||
nonUTF8 bool
|
||||
@@ -619,32 +619,32 @@ func TestWriterAddFS(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
w := NewWriter(buf)
|
||||
tests := []WriteTest{
|
||||
{Name: "emptyfolder", Mode: 0o755 | os.ModeDir},
|
||||
{Name: "file.go", Data: []byte("hello"), Mode: 0644},
|
||||
{Name: "subfolder/another.go", Data: []byte("world"), Mode: 0644},
|
||||
// Notably missing here is the "subfolder" directory. This makes sure even
|
||||
// if we don't have a subfolder directory listed.
|
||||
{
|
||||
Name: "file.go",
|
||||
Data: []byte("hello"),
|
||||
Mode: 0644,
|
||||
},
|
||||
{
|
||||
Name: "subfolder/another.go",
|
||||
Data: []byte("world"),
|
||||
Mode: 0644,
|
||||
},
|
||||
}
|
||||
err := w.AddFS(writeTestsToFS(tests))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := w.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Add subfolder into fsys to match what we'll read from the zip.
|
||||
tests = append(tests[:2:2], WriteTest{Name: "subfolder", Mode: 0o555 | os.ModeDir}, tests[2])
|
||||
|
||||
// read it back
|
||||
r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i, wt := range tests {
|
||||
if wt.Mode.IsDir() {
|
||||
wt.Name += "/"
|
||||
}
|
||||
testReadFile(t, r.File[i], &wt)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,14 +8,13 @@ package zip
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"internal/testenv"
|
||||
"io"
|
||||
"runtime"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -215,8 +214,9 @@ func (r *rleBuffer) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
if len(p) == 0 {
|
||||
return
|
||||
}
|
||||
skipParts, _ := slices.BinarySearchFunc(r.buf, off, func(rb repeatedByte, off int64) int {
|
||||
return cmp.Compare(rb.off+rb.n, off)
|
||||
skipParts := sort.Search(len(r.buf), func(i int) bool {
|
||||
part := &r.buf[i]
|
||||
return part.off+part.n > off
|
||||
})
|
||||
parts := r.buf[skipParts:]
|
||||
if len(parts) > 0 {
|
||||
@@ -814,6 +814,8 @@ func TestSuffixSaver(t *testing.T) {
|
||||
type zeros struct{}
|
||||
|
||||
func (zeros) Read(p []byte) (int, error) {
|
||||
clear(p)
|
||||
for i := range p {
|
||||
p[i] = 0
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
@@ -29,9 +29,6 @@ var (
|
||||
// Buffered input.
|
||||
|
||||
// Reader implements buffering for an io.Reader object.
|
||||
// A new Reader is created by calling [NewReader] or [NewReaderSize];
|
||||
// alternatively the zero value of a Reader may be used after calling [Reset]
|
||||
// on it.
|
||||
type Reader struct {
|
||||
buf []byte
|
||||
rd io.Reader // reader provided by the client
|
||||
@@ -133,10 +130,9 @@ func (b *Reader) readErr() error {
|
||||
}
|
||||
|
||||
// Peek returns the next n bytes without advancing the reader. The bytes stop
|
||||
// being valid at the next read call. If necessary, Peek will read more bytes
|
||||
// into the buffer in order to make n bytes available. If Peek returns fewer
|
||||
// than n bytes, it also returns an error explaining why the read is short.
|
||||
// The error is [ErrBufferFull] if n is larger than b's buffer size.
|
||||
// being valid at the next read call. If Peek returns fewer than n bytes, it
|
||||
// also returns an error explaining why the read is short. The error is
|
||||
// [ErrBufferFull] if n is larger than b's buffer size.
|
||||
//
|
||||
// Calling Peek prevents a [Reader.UnreadByte] or [Reader.UnreadRune] call from succeeding
|
||||
// until the next read operation.
|
||||
@@ -311,7 +307,10 @@ func (b *Reader) ReadRune() (r rune, size int, err error) {
|
||||
if b.r == b.w {
|
||||
return 0, 0, b.readErr()
|
||||
}
|
||||
r, size = utf8.DecodeRune(b.buf[b.r:b.w])
|
||||
r, size = rune(b.buf[b.r]), 1
|
||||
if r >= utf8.RuneSelf {
|
||||
r, size = utf8.DecodeRune(b.buf[b.r:b.w])
|
||||
}
|
||||
b.r += size
|
||||
b.lastByte = int(b.buf[b.r-1])
|
||||
b.lastRuneSize = size
|
||||
@@ -516,11 +515,9 @@ func (b *Reader) WriteTo(w io.Writer) (n int64, err error) {
|
||||
b.lastByte = -1
|
||||
b.lastRuneSize = -1
|
||||
|
||||
if b.r < b.w {
|
||||
n, err = b.writeBuf(w)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err = b.writeBuf(w)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if r, ok := b.rd.(io.WriterTo); ok {
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"internal/asan"
|
||||
"io"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
@@ -586,9 +585,6 @@ func TestWriteInvalidRune(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestReadStringAllocs(t *testing.T) {
|
||||
if asan.Enabled {
|
||||
t.Skip("test allocates more with -asan; see #70079")
|
||||
}
|
||||
r := strings.NewReader(" foo foo 42 42 42 42 42 42 42 42 4.2 4.2 4.2 4.2\n")
|
||||
buf := NewReader(r)
|
||||
allocs := testing.AllocsPerRun(100, func() {
|
||||
@@ -640,7 +636,7 @@ func TestWriter(t *testing.T) {
|
||||
for l := 0; l < len(written); l++ {
|
||||
if written[l] != data[l] {
|
||||
t.Errorf("wrong bytes written")
|
||||
t.Errorf("want=%q", data[:len(written)])
|
||||
t.Errorf("want=%q", data[0:len(written)])
|
||||
t.Errorf("have=%q", written)
|
||||
}
|
||||
}
|
||||
@@ -939,6 +935,7 @@ func (t *testReader) Read(buf []byte) (n int, err error) {
|
||||
}
|
||||
|
||||
func testReadLine(t *testing.T, input []byte) {
|
||||
//for stride := 1; stride < len(input); stride++ {
|
||||
for stride := 1; stride < 2; stride++ {
|
||||
done := 0
|
||||
reader := testReader{input, stride}
|
||||
@@ -1149,7 +1146,7 @@ func (w errorWriterToTest) Write(p []byte) (int, error) {
|
||||
var errorWriterToTests = []errorWriterToTest{
|
||||
{1, 0, nil, io.ErrClosedPipe, io.ErrClosedPipe},
|
||||
{0, 1, io.ErrClosedPipe, nil, io.ErrClosedPipe},
|
||||
{0, 0, io.ErrUnexpectedEOF, io.ErrClosedPipe, io.ErrUnexpectedEOF},
|
||||
{0, 0, io.ErrUnexpectedEOF, io.ErrClosedPipe, io.ErrClosedPipe},
|
||||
{0, 1, io.EOF, nil, nil},
|
||||
}
|
||||
|
||||
|
||||
@@ -33,33 +33,6 @@ func ExampleWriter_AvailableBuffer() {
|
||||
// Output: 1 2 3 4
|
||||
}
|
||||
|
||||
// ExampleWriter_ReadFrom demonstrates how to use the ReadFrom method of Writer.
|
||||
func ExampleWriter_ReadFrom() {
|
||||
var buf bytes.Buffer
|
||||
writer := bufio.NewWriter(&buf)
|
||||
|
||||
data := "Hello, world!\nThis is a ReadFrom example."
|
||||
reader := strings.NewReader(data)
|
||||
|
||||
n, err := writer.ReadFrom(reader)
|
||||
if err != nil {
|
||||
fmt.Println("ReadFrom Error:", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = writer.Flush(); err != nil {
|
||||
fmt.Println("Flush Error:", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println("Bytes written:", n)
|
||||
fmt.Println("Buffer contents:", buf.String())
|
||||
// Output:
|
||||
// Bytes written: 41
|
||||
// Buffer contents: Hello, world!
|
||||
// This is a ReadFrom example.
|
||||
}
|
||||
|
||||
// The simplest use of a Scanner, to read standard input as a set of lines.
|
||||
func ExampleScanner_lines() {
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
|
||||
@@ -1,96 +0,0 @@
|
||||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build unix
|
||||
|
||||
package bufio_test
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"net"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestCopyUnixpacket tests that we can use bufio when copying
|
||||
// across a unixpacket socket. This used to fail due to an unnecessary
|
||||
// empty Write call that was interpreted as an EOF.
|
||||
func TestCopyUnixpacket(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
socket := filepath.Join(tmpDir, "unixsock")
|
||||
|
||||
// Start a unixpacket server.
|
||||
addr := &net.UnixAddr{
|
||||
Name: socket,
|
||||
Net: "unixpacket",
|
||||
}
|
||||
server, err := net.ListenUnix("unixpacket", addr)
|
||||
if err != nil {
|
||||
t.Skipf("skipping test because opening a unixpacket socket failed: %v", err)
|
||||
}
|
||||
|
||||
// Start a goroutine for the server to accept one connection
|
||||
// and read all the data sent on the connection,
|
||||
// reporting the number of bytes read on ch.
|
||||
ch := make(chan int, 1)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
tot := 0
|
||||
defer func() {
|
||||
ch <- tot
|
||||
}()
|
||||
|
||||
serverConn, err := server.Accept()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
buf := make([]byte, 1024)
|
||||
for {
|
||||
n, err := serverConn.Read(buf)
|
||||
tot += n
|
||||
if err == io.EOF {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
clientConn, err := net.DialUnix("unixpacket", nil, addr)
|
||||
if err != nil {
|
||||
// Leaves the server goroutine hanging. Oh well.
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer wg.Wait()
|
||||
defer clientConn.Close()
|
||||
|
||||
const data = "data"
|
||||
r := bufio.NewReader(strings.NewReader(data))
|
||||
n, err := io.Copy(clientConn, r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if n != int64(len(data)) {
|
||||
t.Errorf("io.Copy returned %d, want %d", n, len(data))
|
||||
}
|
||||
|
||||
clientConn.Close()
|
||||
tot := <-ch
|
||||
|
||||
if tot != len(data) {
|
||||
t.Errorf("server read %d, want %d", tot, len(data))
|
||||
}
|
||||
}
|
||||
@@ -260,11 +260,8 @@ func (s *Scanner) setErr(err error) {
|
||||
}
|
||||
}
|
||||
|
||||
// Buffer controls memory allocation by the Scanner.
|
||||
// It sets the initial buffer to use when scanning
|
||||
// Buffer sets the initial buffer to use when scanning
|
||||
// and the maximum size of buffer that may be allocated during scanning.
|
||||
// The contents of the buffer are ignored.
|
||||
//
|
||||
// The maximum token size must be less than the larger of max and cap(buf).
|
||||
// If max <= cap(buf), [Scanner.Scan] will use this buffer only and do no allocation.
|
||||
//
|
||||
|
||||
@@ -41,7 +41,7 @@ GOROOT="$(cd .. && pwd)"
|
||||
|
||||
gettargets() {
|
||||
../bin/go tool dist list | sed -e 's|/|-|' |
|
||||
grep -E -v '^(android|ios)' # need C toolchain even for cross-compiling
|
||||
egrep -v '^(android|ios)' # need C toolchain even for cross-compiling
|
||||
echo linux-arm-arm5
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user