mirror of
https://github.com/golang/go.git
synced 2026-01-30 23:52:05 +03:00
Compare commits
91 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7a2cfb70b0 | ||
|
|
c72a2bad68 | ||
|
|
2c0a0fc6b9 | ||
|
|
c855149768 | ||
|
|
ec6e84df74 | ||
|
|
15e01a2e43 | ||
|
|
45aade7f1e | ||
|
|
c01c4d41d6 | ||
|
|
25177ecde0 | ||
|
|
e4772831d3 | ||
|
|
9facf1f2c2 | ||
|
|
0fa31cb69f | ||
|
|
d89fda21d5 | ||
|
|
acde84cf1b | ||
|
|
c57e2bd22c | ||
|
|
2aaa388971 | ||
|
|
22fdd35c24 | ||
|
|
a991f9c34d | ||
|
|
6644ed63b1 | ||
|
|
ab44565bcd | ||
|
|
9cbbf5e0f4 | ||
|
|
d04e3cbc92 | ||
|
|
bb8230f805 | ||
|
|
fdb8413fe5 | ||
|
|
1dde0b4844 | ||
|
|
3417000c69 | ||
|
|
1576793c51 | ||
|
|
59b7d40774 | ||
|
|
69c8cfe29b | ||
|
|
194de8fbfa | ||
|
|
5164a865e3 | ||
|
|
25f042daec | ||
|
|
be062b7f61 | ||
|
|
d8adc6c4c7 | ||
|
|
847cb6f9ca | ||
|
|
777f43ab27 | ||
|
|
3726f07c46 | ||
|
|
c390a1c22e | ||
|
|
1207de4f6c | ||
|
|
a0d15cb9c8 | ||
|
|
958f3a0309 | ||
|
|
6ba3a8a6ba | ||
|
|
5472853843 | ||
|
|
cfe0ae0b70 | ||
|
|
58babf6e0b | ||
|
|
8d79bf799b | ||
|
|
35c010ad6d | ||
|
|
6495ce0495 | ||
|
|
7fc8312673 | ||
|
|
cc16cdf48f | ||
|
|
9563300f6e | ||
|
|
f8080edefd | ||
|
|
ed07b321ae | ||
|
|
3b2e846e11 | ||
|
|
fbddfae62f | ||
|
|
c8c6f9abfb | ||
|
|
a74951c5af | ||
|
|
e6598e7baa | ||
|
|
82575f76b8 | ||
|
|
a886959aa2 | ||
|
|
80ff7cd35a | ||
|
|
69234ded30 | ||
|
|
032ac075c2 | ||
|
|
fa8ff1a46d | ||
|
|
53487e5477 | ||
|
|
3d1f1f27cf | ||
|
|
6de5a7180c | ||
|
|
9625a7faae | ||
|
|
9c939a1e60 | ||
|
|
7afe17bbdb | ||
|
|
8002845759 | ||
|
|
9166d2feec | ||
|
|
76346b3543 | ||
|
|
3c9340557c | ||
|
|
dbecb416d1 | ||
|
|
6885bad7dd | ||
|
|
ec7d6094e6 | ||
|
|
63b0f805cd | ||
|
|
7adb012205 | ||
|
|
c9940fe2a9 | ||
|
|
3509415eca | ||
|
|
559c77592f | ||
|
|
f5e4e45ef7 | ||
|
|
30b6fd60a6 | ||
|
|
7e4d6c2bcb | ||
|
|
8bd4ed6cbb | ||
|
|
7dff7439dc | ||
|
|
62c3a6350b | ||
|
|
eba9e08766 | ||
|
|
f3bdcda88a | ||
|
|
362f22d2d2 |
@@ -1,7 +1,7 @@
|
||||
name: Language Change Proposals
|
||||
description: Changes to the language
|
||||
labels: ["Proposal", "LanguageChange", "LanguageChangeReview"]
|
||||
title: "proposal: spec: proposal title"
|
||||
labels: ["Proposal", "v2", "LanguageChange"]
|
||||
title: "proposal: Go 2: proposal title"
|
||||
|
||||
|
||||
body:
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -30,7 +30,6 @@ _testmain.go
|
||||
/misc/cgo/testso/main
|
||||
/pkg/
|
||||
/src/*.*/
|
||||
/src/_artifacts/
|
||||
/src/cmd/cgo/zdefaultcc.go
|
||||
/src/cmd/dist/dist
|
||||
/src/cmd/go/internal/cfg/zdefaultcc.go
|
||||
@@ -38,7 +37,7 @@ _testmain.go
|
||||
/src/go/build/zcgo.go
|
||||
/src/go/doc/headscan
|
||||
/src/internal/buildcfg/zbootstrap.go
|
||||
/src/internal/runtime/sys/zversion.go
|
||||
/src/runtime/internal/sys/zversion.go
|
||||
/src/unicode/maketables
|
||||
/src/time/tzdata/zzipdata.go
|
||||
/test.out
|
||||
|
||||
4
LICENSE
4
LICENSE
@@ -1,4 +1,4 @@
|
||||
Copyright 2009 The Go Authors.
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google LLC nor the names of its
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
|
||||
223
api/go1.24.txt
223
api/go1.24.txt
@@ -1,223 +0,0 @@
|
||||
pkg bytes, func FieldsFuncSeq([]uint8, func(int32) bool) iter.Seq[[]uint8] #61901
|
||||
pkg bytes, func FieldsSeq([]uint8) iter.Seq[[]uint8] #61901
|
||||
pkg bytes, func Lines([]uint8) iter.Seq[[]uint8] #61901
|
||||
pkg bytes, func SplitAfterSeq([]uint8, []uint8) iter.Seq[[]uint8] #61901
|
||||
pkg bytes, func SplitSeq([]uint8, []uint8) iter.Seq[[]uint8] #61901
|
||||
pkg crypto/cipher, func NewCFBDecrypter //deprecated #69445
|
||||
pkg crypto/cipher, func NewCFBEncrypter //deprecated #69445
|
||||
pkg crypto/cipher, func NewGCMWithRandomNonce(Block) (AEAD, error) #69981
|
||||
pkg crypto/cipher, func NewOFB //deprecated #69445
|
||||
pkg crypto/fips140, func Enabled() bool #70123
|
||||
pkg crypto/hkdf, func Expand[$0 hash.Hash](func() $0, []uint8, string, int) ([]uint8, error) #61477
|
||||
pkg crypto/hkdf, func Extract[$0 hash.Hash](func() $0, []uint8, []uint8) ([]uint8, error) #61477
|
||||
pkg crypto/hkdf, func Key[$0 hash.Hash](func() $0, []uint8, []uint8, string, int) ([]uint8, error) #61477
|
||||
pkg crypto/mlkem, const CiphertextSize1024 = 1568 #70122
|
||||
pkg crypto/mlkem, const CiphertextSize1024 ideal-int #70122
|
||||
pkg crypto/mlkem, const CiphertextSize768 = 1088 #70122
|
||||
pkg crypto/mlkem, const CiphertextSize768 ideal-int #70122
|
||||
pkg crypto/mlkem, const EncapsulationKeySize1024 = 1568 #70122
|
||||
pkg crypto/mlkem, const EncapsulationKeySize1024 ideal-int #70122
|
||||
pkg crypto/mlkem, const EncapsulationKeySize768 = 1184 #70122
|
||||
pkg crypto/mlkem, const EncapsulationKeySize768 ideal-int #70122
|
||||
pkg crypto/mlkem, const SeedSize = 64 #70122
|
||||
pkg crypto/mlkem, const SeedSize ideal-int #70122
|
||||
pkg crypto/mlkem, const SharedKeySize = 32 #70122
|
||||
pkg crypto/mlkem, const SharedKeySize ideal-int #70122
|
||||
pkg crypto/mlkem, func GenerateKey1024() (*DecapsulationKey1024, error) #70122
|
||||
pkg crypto/mlkem, func GenerateKey768() (*DecapsulationKey768, error) #70122
|
||||
pkg crypto/mlkem, func NewDecapsulationKey1024([]uint8) (*DecapsulationKey1024, error) #70122
|
||||
pkg crypto/mlkem, func NewDecapsulationKey768([]uint8) (*DecapsulationKey768, error) #70122
|
||||
pkg crypto/mlkem, func NewEncapsulationKey1024([]uint8) (*EncapsulationKey1024, error) #70122
|
||||
pkg crypto/mlkem, func NewEncapsulationKey768([]uint8) (*EncapsulationKey768, error) #70122
|
||||
pkg crypto/mlkem, method (*DecapsulationKey1024) Bytes() []uint8 #70122
|
||||
pkg crypto/mlkem, method (*DecapsulationKey1024) Decapsulate([]uint8) ([]uint8, error) #70122
|
||||
pkg crypto/mlkem, method (*DecapsulationKey1024) EncapsulationKey() *EncapsulationKey1024 #70122
|
||||
pkg crypto/mlkem, method (*DecapsulationKey768) Bytes() []uint8 #70122
|
||||
pkg crypto/mlkem, method (*DecapsulationKey768) Decapsulate([]uint8) ([]uint8, error) #70122
|
||||
pkg crypto/mlkem, method (*DecapsulationKey768) EncapsulationKey() *EncapsulationKey768 #70122
|
||||
pkg crypto/mlkem, method (*EncapsulationKey1024) Bytes() []uint8 #70122
|
||||
pkg crypto/mlkem, method (*EncapsulationKey1024) Encapsulate() ([]uint8, []uint8) #70122
|
||||
pkg crypto/mlkem, method (*EncapsulationKey768) Bytes() []uint8 #70122
|
||||
pkg crypto/mlkem, method (*EncapsulationKey768) Encapsulate() ([]uint8, []uint8) #70122
|
||||
pkg crypto/mlkem, type DecapsulationKey1024 struct #70122
|
||||
pkg crypto/mlkem, type DecapsulationKey768 struct #70122
|
||||
pkg crypto/mlkem, type EncapsulationKey1024 struct #70122
|
||||
pkg crypto/mlkem, type EncapsulationKey768 struct #70122
|
||||
pkg crypto/pbkdf2, func Key[$0 hash.Hash](func() $0, string, []uint8, int, int) ([]uint8, error) #69488
|
||||
pkg crypto/rand, func Text() string #67057
|
||||
pkg crypto/sha3, func New224() *SHA3 #69982
|
||||
pkg crypto/sha3, func New256() *SHA3 #69982
|
||||
pkg crypto/sha3, func New384() *SHA3 #69982
|
||||
pkg crypto/sha3, func New512() *SHA3 #69982
|
||||
pkg crypto/sha3, func NewCSHAKE128([]uint8, []uint8) *SHAKE #69982
|
||||
pkg crypto/sha3, func NewCSHAKE256([]uint8, []uint8) *SHAKE #69982
|
||||
pkg crypto/sha3, func NewSHAKE128() *SHAKE #69982
|
||||
pkg crypto/sha3, func NewSHAKE256() *SHAKE #69982
|
||||
pkg crypto/sha3, func Sum224([]uint8) [28]uint8 #69982
|
||||
pkg crypto/sha3, func Sum256([]uint8) [32]uint8 #69982
|
||||
pkg crypto/sha3, func Sum384([]uint8) [48]uint8 #69982
|
||||
pkg crypto/sha3, func Sum512([]uint8) [64]uint8 #69982
|
||||
pkg crypto/sha3, func SumSHAKE128([]uint8, int) []uint8 #69982
|
||||
pkg crypto/sha3, func SumSHAKE256([]uint8, int) []uint8 #69982
|
||||
pkg crypto/sha3, method (*SHA3) AppendBinary([]uint8) ([]uint8, error) #69982
|
||||
pkg crypto/sha3, method (*SHA3) BlockSize() int #69982
|
||||
pkg crypto/sha3, method (*SHA3) MarshalBinary() ([]uint8, error) #69982
|
||||
pkg crypto/sha3, method (*SHA3) Reset() #69982
|
||||
pkg crypto/sha3, method (*SHA3) Size() int #69982
|
||||
pkg crypto/sha3, method (*SHA3) Sum([]uint8) []uint8 #69982
|
||||
pkg crypto/sha3, method (*SHA3) UnmarshalBinary([]uint8) error #69982
|
||||
pkg crypto/sha3, method (*SHA3) Write([]uint8) (int, error) #69982
|
||||
pkg crypto/sha3, method (*SHAKE) AppendBinary([]uint8) ([]uint8, error) #69982
|
||||
pkg crypto/sha3, method (*SHAKE) BlockSize() int #69982
|
||||
pkg crypto/sha3, method (*SHAKE) MarshalBinary() ([]uint8, error) #69982
|
||||
pkg crypto/sha3, method (*SHAKE) Read([]uint8) (int, error) #69982
|
||||
pkg crypto/sha3, method (*SHAKE) Reset() #69982
|
||||
pkg crypto/sha3, method (*SHAKE) UnmarshalBinary([]uint8) error #69982
|
||||
pkg crypto/sha3, method (*SHAKE) Write([]uint8) (int, error) #69982
|
||||
pkg crypto/sha3, type SHA3 struct #69982
|
||||
pkg crypto/sha3, type SHAKE struct #69982
|
||||
pkg crypto/subtle, func WithDataIndependentTiming(func()) #66450
|
||||
pkg crypto/tls, const X25519MLKEM768 = 4588 #69985
|
||||
pkg crypto/tls, const X25519MLKEM768 CurveID #69985
|
||||
pkg crypto/tls, type ClientHelloInfo struct, Extensions []uint16 #32936
|
||||
pkg crypto/tls, type Config struct, EncryptedClientHelloKeys []EncryptedClientHelloKey #68500
|
||||
pkg crypto/tls, type EncryptedClientHelloKey struct #68500
|
||||
pkg crypto/tls, type EncryptedClientHelloKey struct, Config []uint8 #68500
|
||||
pkg crypto/tls, type EncryptedClientHelloKey struct, PrivateKey []uint8 #68500
|
||||
pkg crypto/tls, type EncryptedClientHelloKey struct, SendAsRetry bool #68500
|
||||
pkg crypto/x509, const NoValidChains = 10 #68484
|
||||
pkg crypto/x509, const NoValidChains InvalidReason #68484
|
||||
pkg crypto/x509, method (OID) AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg crypto/x509, method (OID) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg crypto/x509, type Certificate struct, InhibitAnyPolicy int #68484
|
||||
pkg crypto/x509, type Certificate struct, InhibitAnyPolicyZero bool #68484
|
||||
pkg crypto/x509, type Certificate struct, InhibitPolicyMapping int #68484
|
||||
pkg crypto/x509, type Certificate struct, InhibitPolicyMappingZero bool #68484
|
||||
pkg crypto/x509, type Certificate struct, PolicyMappings []PolicyMapping #68484
|
||||
pkg crypto/x509, type Certificate struct, RequireExplicitPolicy int #68484
|
||||
pkg crypto/x509, type Certificate struct, RequireExplicitPolicyZero bool #68484
|
||||
pkg crypto/x509, type PolicyMapping struct #68484
|
||||
pkg crypto/x509, type PolicyMapping struct, IssuerDomainPolicy OID #68484
|
||||
pkg crypto/x509, type PolicyMapping struct, SubjectDomainPolicy OID #68484
|
||||
pkg crypto/x509, type VerifyOptions struct, CertificatePolicies []OID #68484
|
||||
pkg debug/elf, const VER_FLG_BASE = 1 #63952
|
||||
pkg debug/elf, const VER_FLG_BASE DynamicVersionFlag #63952
|
||||
pkg debug/elf, const VER_FLG_INFO = 4 #63952
|
||||
pkg debug/elf, const VER_FLG_INFO DynamicVersionFlag #63952
|
||||
pkg debug/elf, const VER_FLG_WEAK = 2 #63952
|
||||
pkg debug/elf, const VER_FLG_WEAK DynamicVersionFlag #63952
|
||||
pkg debug/elf, method (*File) DynamicVersionNeeds() ([]DynamicVersionNeed, error) #63952
|
||||
pkg debug/elf, method (*File) DynamicVersions() ([]DynamicVersion, error) #63952
|
||||
pkg debug/elf, type DynamicVersion struct #63952
|
||||
pkg debug/elf, type DynamicVersion struct, Deps []string #63952
|
||||
pkg debug/elf, type DynamicVersion struct, Flags DynamicVersionFlag #63952
|
||||
pkg debug/elf, type DynamicVersion struct, Name string #63952
|
||||
pkg debug/elf, type DynamicVersion struct, Index uint16 #63952
|
||||
pkg debug/elf, type DynamicVersionDep struct #63952
|
||||
pkg debug/elf, type DynamicVersionDep struct, Dep string #63952
|
||||
pkg debug/elf, type DynamicVersionDep struct, Flags DynamicVersionFlag #63952
|
||||
pkg debug/elf, type DynamicVersionDep struct, Index uint16 #63952
|
||||
pkg debug/elf, type DynamicVersionFlag uint16 #63952
|
||||
pkg debug/elf, type DynamicVersionNeed struct #63952
|
||||
pkg debug/elf, type DynamicVersionNeed struct, Name string #63952
|
||||
pkg debug/elf, type DynamicVersionNeed struct, Needs []DynamicVersionDep #63952
|
||||
pkg debug/elf, type Symbol struct, HasVersion bool #63952
|
||||
pkg debug/elf, type Symbol struct, VersionIndex VersionIndex #63952
|
||||
pkg debug/elf, method (VersionIndex) Index() uint16 #63952
|
||||
pkg debug/elf, method (VersionIndex) IsHidden() bool #63952
|
||||
pkg debug/elf, type VersionIndex uint16 #63952
|
||||
pkg encoding, type BinaryAppender interface { AppendBinary } #62384
|
||||
pkg encoding, type BinaryAppender interface, AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg encoding, type TextAppender interface { AppendText } #62384
|
||||
pkg encoding, type TextAppender interface, AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg go/types, method (*Interface) EmbeddedTypes() iter.Seq[Type] #66626
|
||||
pkg go/types, method (*Interface) ExplicitMethods() iter.Seq[*Func] #66626
|
||||
pkg go/types, method (*Interface) Methods() iter.Seq[*Func] #66626
|
||||
pkg go/types, method (*MethodSet) Methods() iter.Seq[*Selection] #66626
|
||||
pkg go/types, method (*Named) Methods() iter.Seq[*Func] #66626
|
||||
pkg go/types, method (*Scope) Children() iter.Seq[*Scope] #66626
|
||||
pkg go/types, method (*Struct) Fields() iter.Seq[*Var] #66626
|
||||
pkg go/types, method (*Tuple) Variables() iter.Seq[*Var] #66626
|
||||
pkg go/types, method (*TypeList) Types() iter.Seq[Type] #66626
|
||||
pkg go/types, method (*TypeParamList) TypeParams() iter.Seq[*TypeParam] #66626
|
||||
pkg go/types, method (*Union) Terms() iter.Seq[*Term] #66626
|
||||
pkg hash/maphash, func Comparable[$0 comparable](Seed, $0) uint64 #54670
|
||||
pkg hash/maphash, func WriteComparable[$0 comparable](*Hash, $0) #54670
|
||||
pkg log/slog, method (*LevelVar) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg log/slog, method (Level) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg log/slog, var DiscardHandler Handler #62005
|
||||
pkg math/big, method (*Float) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg math/big, method (*Int) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg math/big, method (*Rat) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg math/rand/v2, method (*ChaCha8) AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg math/rand/v2, method (*PCG) AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg net, method (IP) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg net/http, method (*Protocols) SetHTTP1(bool) #67814
|
||||
pkg net/http, method (*Protocols) SetHTTP2(bool) #67814
|
||||
pkg net/http, method (*Protocols) SetUnencryptedHTTP2(bool) #67816
|
||||
pkg net/http, method (Protocols) HTTP1() bool #67814
|
||||
pkg net/http, method (Protocols) HTTP2() bool #67814
|
||||
pkg net/http, method (Protocols) String() string #67814
|
||||
pkg net/http, method (Protocols) UnencryptedHTTP2() bool #67816
|
||||
pkg net/http, type HTTP2Config struct #67813
|
||||
pkg net/http, type HTTP2Config struct, CountError func(string) #67813
|
||||
pkg net/http, type HTTP2Config struct, MaxConcurrentStreams int #67813
|
||||
pkg net/http, type HTTP2Config struct, MaxDecoderHeaderTableSize int #67813
|
||||
pkg net/http, type HTTP2Config struct, MaxEncoderHeaderTableSize int #67813
|
||||
pkg net/http, type HTTP2Config struct, MaxReadFrameSize int #67813
|
||||
pkg net/http, type HTTP2Config struct, MaxReceiveBufferPerConnection int #67813
|
||||
pkg net/http, type HTTP2Config struct, MaxReceiveBufferPerStream int #67813
|
||||
pkg net/http, type HTTP2Config struct, PermitProhibitedCipherSuites bool #67813
|
||||
pkg net/http, type HTTP2Config struct, PingTimeout time.Duration #67813
|
||||
pkg net/http, type HTTP2Config struct, SendPingTimeout time.Duration #67813
|
||||
pkg net/http, type HTTP2Config struct, WriteByteTimeout time.Duration #67813
|
||||
pkg net/http, type Protocols struct #67814
|
||||
pkg net/http, type Server struct, HTTP2 *HTTP2Config #67813
|
||||
pkg net/http, type Server struct, Protocols *Protocols #67814
|
||||
pkg net/http, type Transport struct, HTTP2 *HTTP2Config #67813
|
||||
pkg net/http, type Transport struct, Protocols *Protocols #67814
|
||||
pkg net/netip, method (Addr) AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg net/netip, method (Addr) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg net/netip, method (AddrPort) AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg net/netip, method (AddrPort) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg net/netip, method (Prefix) AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg net/netip, method (Prefix) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg net/url, method (*URL) AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg os, func OpenInRoot(string, string) (*File, error) #67002
|
||||
pkg os, func OpenRoot(string) (*Root, error) #67002
|
||||
pkg os, method (*Root) Close() error #67002
|
||||
pkg os, method (*Root) Create(string) (*File, error) #67002
|
||||
pkg os, method (*Root) FS() fs.FS #67002
|
||||
pkg os, method (*Root) Lstat(string) (fs.FileInfo, error) #67002
|
||||
pkg os, method (*Root) Mkdir(string, fs.FileMode) error #67002
|
||||
pkg os, method (*Root) Name() string #67002
|
||||
pkg os, method (*Root) Open(string) (*File, error) #67002
|
||||
pkg os, method (*Root) OpenFile(string, int, fs.FileMode) (*File, error) #67002
|
||||
pkg os, method (*Root) OpenRoot(string) (*Root, error) #67002
|
||||
pkg os, method (*Root) Remove(string) error #67002
|
||||
pkg os, method (*Root) Stat(string) (fs.FileInfo, error) #67002
|
||||
pkg os, type Root struct #67002
|
||||
pkg regexp, method (*Regexp) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg runtime, func AddCleanup[$0 interface{}, $1 interface{}](*$0, func($1), $1) Cleanup #67535
|
||||
pkg runtime, func GOROOT //deprecated #51473
|
||||
pkg runtime, method (Cleanup) Stop() #67535
|
||||
pkg runtime, type Cleanup struct #67535
|
||||
pkg strings, func FieldsFuncSeq(string, func(int32) bool) iter.Seq[string] #61901
|
||||
pkg strings, func FieldsSeq(string) iter.Seq[string] #61901
|
||||
pkg strings, func Lines(string) iter.Seq[string] #61901
|
||||
pkg strings, func SplitAfterSeq(string, string) iter.Seq[string] #61901
|
||||
pkg strings, func SplitSeq(string, string) iter.Seq[string] #61901
|
||||
pkg testing, method (*B) Chdir(string) #62516
|
||||
pkg testing, method (*B) Context() context.Context #36532
|
||||
pkg testing, method (*B) Loop() bool #61515
|
||||
pkg testing, method (*F) Chdir(string) #62516
|
||||
pkg testing, method (*F) Context() context.Context #36532
|
||||
pkg testing, method (*T) Chdir(string) #62516
|
||||
pkg testing, method (*T) Context() context.Context #36532
|
||||
pkg testing, type TB interface, Chdir(string) #62516
|
||||
pkg testing, type TB interface, Context() context.Context #36532
|
||||
pkg time, method (Time) AppendBinary([]uint8) ([]uint8, error) #62384
|
||||
pkg time, method (Time) AppendText([]uint8) ([]uint8, error) #62384
|
||||
pkg weak, func Make[$0 interface{}](*$0) Pointer[$0] #67552
|
||||
pkg weak, method (Pointer[$0]) Value() *$0 #67552
|
||||
pkg weak, type Pointer[$0 interface{}] struct #67552
|
||||
111
api/go1.25.txt
111
api/go1.25.txt
@@ -1,111 +0,0 @@
|
||||
pkg crypto, func SignMessage(Signer, io.Reader, []uint8, SignerOpts) ([]uint8, error) #63405
|
||||
pkg crypto, type MessageSigner interface { Public, Sign, SignMessage } #63405
|
||||
pkg crypto, type MessageSigner interface, Public() PublicKey #63405
|
||||
pkg crypto, type MessageSigner interface, Sign(io.Reader, []uint8, SignerOpts) ([]uint8, error) #63405
|
||||
pkg crypto, type MessageSigner interface, SignMessage(io.Reader, []uint8, SignerOpts) ([]uint8, error) #63405
|
||||
pkg crypto/ecdsa, func ParseRawPrivateKey(elliptic.Curve, []uint8) (*PrivateKey, error) #63963
|
||||
pkg crypto/ecdsa, func ParseUncompressedPublicKey(elliptic.Curve, []uint8) (*PublicKey, error) #63963
|
||||
pkg crypto/ecdsa, method (*PrivateKey) Bytes() ([]uint8, error) #63963
|
||||
pkg crypto/ecdsa, method (*PublicKey) Bytes() ([]uint8, error) #63963
|
||||
pkg crypto/sha3, method (*SHA3) Clone() (hash.Cloner, error) #69521
|
||||
pkg crypto/tls, type Config struct, GetEncryptedClientHelloKeys func(*ClientHelloInfo) ([]EncryptedClientHelloKey, error) #71920
|
||||
pkg crypto/tls, type ConnectionState struct, CurveID CurveID #67516
|
||||
pkg debug/elf, const PT_RISCV_ATTRIBUTES = 1879048195 #72843
|
||||
pkg debug/elf, const PT_RISCV_ATTRIBUTES ProgType #72843
|
||||
pkg debug/elf, const SHT_RISCV_ATTRIBUTES = 1879048195 #72843
|
||||
pkg debug/elf, const SHT_RISCV_ATTRIBUTES SectionType #72843
|
||||
pkg go/ast, const FilterFuncDuplicates //deprecated #73088
|
||||
pkg go/ast, const FilterImportDuplicates //deprecated #73088
|
||||
pkg go/ast, const FilterUnassociatedComments //deprecated #73088
|
||||
pkg go/ast, func FilterPackage //deprecated #73088
|
||||
pkg go/ast, func MergePackageFiles //deprecated #73088
|
||||
pkg go/ast, func PackageExports //deprecated #73088
|
||||
pkg go/ast, func PreorderStack(Node, []Node, func(Node, []Node) bool) #73319
|
||||
pkg go/ast, type MergeMode //deprecated #73088
|
||||
pkg go/parser, func ParseDir //deprecated #71122
|
||||
pkg go/token, method (*FileSet) AddExistingFiles(...*File) #73205
|
||||
pkg go/types, const FieldVar = 6 #70250
|
||||
pkg go/types, const FieldVar VarKind #70250
|
||||
pkg go/types, const LocalVar = 2 #70250
|
||||
pkg go/types, const LocalVar VarKind #70250
|
||||
pkg go/types, const PackageVar = 1 #70250
|
||||
pkg go/types, const PackageVar VarKind #70250
|
||||
pkg go/types, const ParamVar = 4 #70250
|
||||
pkg go/types, const ParamVar VarKind #70250
|
||||
pkg go/types, const RecvVar = 3 #70250
|
||||
pkg go/types, const RecvVar VarKind #70250
|
||||
pkg go/types, const ResultVar = 5 #70250
|
||||
pkg go/types, const ResultVar VarKind #70250
|
||||
pkg go/types, func LookupSelection(Type, bool, *Package, string) (Selection, bool) #70737
|
||||
pkg go/types, method (*Var) Kind() VarKind #70250
|
||||
pkg go/types, method (*Var) SetKind(VarKind) #70250
|
||||
pkg go/types, method (VarKind) String() string #70250
|
||||
pkg go/types, type VarKind uint8 #70250
|
||||
pkg hash, type Cloner interface { BlockSize, Clone, Reset, Size, Sum, Write } #69521
|
||||
pkg hash, type Cloner interface, BlockSize() int #69521
|
||||
pkg hash, type Cloner interface, Clone() (Cloner, error) #69521
|
||||
pkg hash, type Cloner interface, Reset() #69521
|
||||
pkg hash, type Cloner interface, Size() int #69521
|
||||
pkg hash, type Cloner interface, Sum([]uint8) []uint8 #69521
|
||||
pkg hash, type Cloner interface, Write([]uint8) (int, error) #69521
|
||||
pkg hash, type XOF interface { BlockSize, Read, Reset, Write } #69518
|
||||
pkg hash, type XOF interface, BlockSize() int #69518
|
||||
pkg hash, type XOF interface, Read([]uint8) (int, error) #69518
|
||||
pkg hash, type XOF interface, Reset() #69518
|
||||
pkg hash, type XOF interface, Write([]uint8) (int, error) #69518
|
||||
pkg hash/maphash, method (*Hash) Clone() (hash.Cloner, error) #69521
|
||||
pkg io/fs, func Lstat(FS, string) (FileInfo, error) #49580
|
||||
pkg io/fs, func ReadLink(FS, string) (string, error) #49580
|
||||
pkg io/fs, type ReadLinkFS interface { Lstat, Open, ReadLink } #49580
|
||||
pkg io/fs, type ReadLinkFS interface, Lstat(string) (FileInfo, error) #49580
|
||||
pkg io/fs, type ReadLinkFS interface, Open(string) (File, error) #49580
|
||||
pkg io/fs, type ReadLinkFS interface, ReadLink(string) (string, error) #49580
|
||||
pkg log/slog, func GroupAttrs(string, ...Attr) Attr #66365
|
||||
pkg log/slog, method (Record) Source() *Source #70280
|
||||
pkg mime/multipart, func FileContentDisposition(string, string) string #46771
|
||||
pkg net/http, func NewCrossOriginProtection() *CrossOriginProtection #73626
|
||||
pkg net/http, method (*CrossOriginProtection) AddInsecureBypassPattern(string) #73626
|
||||
pkg net/http, method (*CrossOriginProtection) AddTrustedOrigin(string) error #73626
|
||||
pkg net/http, method (*CrossOriginProtection) Check(*Request) error #73626
|
||||
pkg net/http, method (*CrossOriginProtection) Handler(Handler) Handler #73626
|
||||
pkg net/http, method (*CrossOriginProtection) SetDenyHandler(Handler) #73626
|
||||
pkg net/http, type CrossOriginProtection struct #73626
|
||||
pkg os, method (*Root) Chmod(string, fs.FileMode) error #67002
|
||||
pkg os, method (*Root) Chown(string, int, int) error #67002
|
||||
pkg os, method (*Root) Chtimes(string, time.Time, time.Time) error #67002
|
||||
pkg os, method (*Root) Lchown(string, int, int) error #67002
|
||||
pkg os, method (*Root) Link(string, string) error #67002
|
||||
pkg os, method (*Root) MkdirAll(string, fs.FileMode) error #67002
|
||||
pkg os, method (*Root) ReadFile(string) ([]uint8, error) #73126
|
||||
pkg os, method (*Root) Readlink(string) (string, error) #67002
|
||||
pkg os, method (*Root) RemoveAll(string) error #67002
|
||||
pkg os, method (*Root) Rename(string, string) error #67002
|
||||
pkg os, method (*Root) Symlink(string, string) error #67002
|
||||
pkg os, method (*Root) WriteFile(string, []uint8, fs.FileMode) error #73126
|
||||
pkg reflect, func TypeAssert[$0 interface{}](Value) ($0, bool) #62121
|
||||
pkg runtime, func SetDefaultGOMAXPROCS() #73193
|
||||
pkg runtime/trace, func NewFlightRecorder(FlightRecorderConfig) *FlightRecorder #63185
|
||||
pkg runtime/trace, method (*FlightRecorder) Enabled() bool #63185
|
||||
pkg runtime/trace, method (*FlightRecorder) Start() error #63185
|
||||
pkg runtime/trace, method (*FlightRecorder) Stop() #63185
|
||||
pkg runtime/trace, method (*FlightRecorder) WriteTo(io.Writer) (int64, error) #63185
|
||||
pkg runtime/trace, type FlightRecorder struct #63185
|
||||
pkg runtime/trace, type FlightRecorderConfig struct #63185
|
||||
pkg runtime/trace, type FlightRecorderConfig struct, MaxBytes uint64 #63185
|
||||
pkg runtime/trace, type FlightRecorderConfig struct, MinAge time.Duration #63185
|
||||
pkg sync, method (*WaitGroup) Go(func()) #63796
|
||||
pkg testing, method (*B) Attr(string, string) #43936
|
||||
pkg testing, method (*B) Output() io.Writer #59928
|
||||
pkg testing, method (*F) Attr(string, string) #43936
|
||||
pkg testing, method (*F) Output() io.Writer #59928
|
||||
pkg testing, method (*T) Attr(string, string) #43936
|
||||
pkg testing, method (*T) Output() io.Writer #59928
|
||||
pkg testing, type TB interface, Attr(string, string) #43936
|
||||
pkg testing, type TB interface, Output() io.Writer #59928
|
||||
pkg testing/fstest, method (MapFS) Lstat(string) (fs.FileInfo, error) #49580
|
||||
pkg testing/fstest, method (MapFS) ReadLink(string) (string, error) #49580
|
||||
pkg testing/synctest, func Test(*testing.T, func(*testing.T)) #67434
|
||||
pkg testing/synctest, func Wait() #67434
|
||||
pkg unicode, var CategoryAliases map[string]string #70780
|
||||
pkg unicode, var Cn *RangeTable #70780
|
||||
pkg unicode, var LC *RangeTable #70780
|
||||
180
api/go1.26.txt
180
api/go1.26.txt
@@ -1,180 +0,0 @@
|
||||
pkg bytes, method (*Buffer) Peek(int) ([]uint8, error) #73794
|
||||
pkg crypto, type Decapsulator interface { Decapsulate, Encapsulator } #75300
|
||||
pkg crypto, type Decapsulator interface, Decapsulate([]uint8) ([]uint8, error) #75300
|
||||
pkg crypto, type Decapsulator interface, Encapsulator() Encapsulator #75300
|
||||
pkg crypto, type Encapsulator interface { Bytes, Encapsulate } #75300
|
||||
pkg crypto, type Encapsulator interface, Bytes() []uint8 #75300
|
||||
pkg crypto, type Encapsulator interface, Encapsulate() ([]uint8, []uint8) #75300
|
||||
pkg crypto/ecdh, type KeyExchanger interface { Curve, ECDH, PublicKey } #75300
|
||||
pkg crypto/ecdh, type KeyExchanger interface, Curve() Curve #75300
|
||||
pkg crypto/ecdh, type KeyExchanger interface, ECDH(*PublicKey) ([]uint8, error) #75300
|
||||
pkg crypto/ecdh, type KeyExchanger interface, PublicKey() *PublicKey #75300
|
||||
pkg crypto/ecdsa, type PrivateKey struct, D //deprecated #63963
|
||||
pkg crypto/ecdsa, type PublicKey struct, X //deprecated #63963
|
||||
pkg crypto/ecdsa, type PublicKey struct, Y //deprecated #63963
|
||||
pkg crypto/fips140, func Enforced() bool #74630
|
||||
pkg crypto/fips140, func Version() string #75301
|
||||
pkg crypto/fips140, func WithoutEnforcement(func()) #74630
|
||||
pkg crypto/hpke, func AES128GCM() AEAD #75300
|
||||
pkg crypto/hpke, func AES256GCM() AEAD #75300
|
||||
pkg crypto/hpke, func ChaCha20Poly1305() AEAD #75300
|
||||
pkg crypto/hpke, func DHKEM(ecdh.Curve) KEM #75300
|
||||
pkg crypto/hpke, func ExportOnly() AEAD #75300
|
||||
pkg crypto/hpke, func HKDFSHA256() KDF #75300
|
||||
pkg crypto/hpke, func HKDFSHA384() KDF #75300
|
||||
pkg crypto/hpke, func HKDFSHA512() KDF #75300
|
||||
pkg crypto/hpke, func MLKEM1024() KEM #75300
|
||||
pkg crypto/hpke, func MLKEM1024P384() KEM #75300
|
||||
pkg crypto/hpke, func MLKEM768() KEM #75300
|
||||
pkg crypto/hpke, func MLKEM768P256() KEM #75300
|
||||
pkg crypto/hpke, func MLKEM768X25519() KEM #75300
|
||||
pkg crypto/hpke, func NewAEAD(uint16) (AEAD, error) #75300
|
||||
pkg crypto/hpke, func NewDHKEMPrivateKey(ecdh.KeyExchanger) (PrivateKey, error) #75300
|
||||
pkg crypto/hpke, func NewDHKEMPublicKey(*ecdh.PublicKey) (PublicKey, error) #75300
|
||||
pkg crypto/hpke, func NewHybridPrivateKey(crypto.Decapsulator, ecdh.KeyExchanger) (PrivateKey, error) #75300
|
||||
pkg crypto/hpke, func NewHybridPublicKey(crypto.Encapsulator, *ecdh.PublicKey) (PublicKey, error) #75300
|
||||
pkg crypto/hpke, func NewKDF(uint16) (KDF, error) #75300
|
||||
pkg crypto/hpke, func NewKEM(uint16) (KEM, error) #75300
|
||||
pkg crypto/hpke, func NewMLKEMPrivateKey(crypto.Decapsulator) (PrivateKey, error) #75300
|
||||
pkg crypto/hpke, func NewMLKEMPublicKey(crypto.Encapsulator) (PublicKey, error) #75300
|
||||
pkg crypto/hpke, func NewRecipient([]uint8, PrivateKey, KDF, AEAD, []uint8) (*Recipient, error) #75300
|
||||
pkg crypto/hpke, func NewSender(PublicKey, KDF, AEAD, []uint8) ([]uint8, *Sender, error) #75300
|
||||
pkg crypto/hpke, func Open(PrivateKey, KDF, AEAD, []uint8, []uint8) ([]uint8, error) #75300
|
||||
pkg crypto/hpke, func SHAKE128() KDF #75300
|
||||
pkg crypto/hpke, func SHAKE256() KDF #75300
|
||||
pkg crypto/hpke, func Seal(PublicKey, KDF, AEAD, []uint8, []uint8) ([]uint8, error) #75300
|
||||
pkg crypto/hpke, method (*Recipient) Export(string, int) ([]uint8, error) #75300
|
||||
pkg crypto/hpke, method (*Recipient) Open([]uint8, []uint8) ([]uint8, error) #75300
|
||||
pkg crypto/hpke, method (*Sender) Export(string, int) ([]uint8, error) #75300
|
||||
pkg crypto/hpke, method (*Sender) Seal([]uint8, []uint8) ([]uint8, error) #75300
|
||||
pkg crypto/hpke, type AEAD interface, ID() uint16 #75300
|
||||
pkg crypto/hpke, type AEAD interface, unexported methods #75300
|
||||
pkg crypto/hpke, type KDF interface, ID() uint16 #75300
|
||||
pkg crypto/hpke, type KDF interface, unexported methods #75300
|
||||
pkg crypto/hpke, type KEM interface, DeriveKeyPair([]uint8) (PrivateKey, error) #75300
|
||||
pkg crypto/hpke, type KEM interface, GenerateKey() (PrivateKey, error) #75300
|
||||
pkg crypto/hpke, type KEM interface, ID() uint16 #75300
|
||||
pkg crypto/hpke, type KEM interface, NewPrivateKey([]uint8) (PrivateKey, error) #75300
|
||||
pkg crypto/hpke, type KEM interface, NewPublicKey([]uint8) (PublicKey, error) #75300
|
||||
pkg crypto/hpke, type KEM interface, unexported methods #75300
|
||||
pkg crypto/hpke, type PrivateKey interface, Bytes() ([]uint8, error) #75300
|
||||
pkg crypto/hpke, type PrivateKey interface, KEM() KEM #75300
|
||||
pkg crypto/hpke, type PrivateKey interface, PublicKey() PublicKey #75300
|
||||
pkg crypto/hpke, type PrivateKey interface, unexported methods #75300
|
||||
pkg crypto/hpke, type PublicKey interface, Bytes() []uint8 #75300
|
||||
pkg crypto/hpke, type PublicKey interface, KEM() KEM #75300
|
||||
pkg crypto/hpke, type PublicKey interface, unexported methods #75300
|
||||
pkg crypto/hpke, type Recipient struct #75300
|
||||
pkg crypto/hpke, type Sender struct #75300
|
||||
pkg crypto/mlkem, method (*DecapsulationKey1024) Encapsulator() crypto.Encapsulator #75300
|
||||
pkg crypto/mlkem, method (*DecapsulationKey768) Encapsulator() crypto.Encapsulator #75300
|
||||
pkg crypto/mlkem/mlkemtest, func Encapsulate1024(*mlkem.EncapsulationKey1024, []uint8) ([]uint8, []uint8, error) #73627
|
||||
pkg crypto/mlkem/mlkemtest, func Encapsulate768(*mlkem.EncapsulationKey768, []uint8) ([]uint8, []uint8, error) #73627
|
||||
pkg crypto/rsa, func DecryptPKCS1v15 //deprecated #75302
|
||||
pkg crypto/rsa, func DecryptPKCS1v15SessionKey //deprecated #75302
|
||||
pkg crypto/rsa, func EncryptOAEPWithOptions(io.Reader, *PublicKey, []uint8, *OAEPOptions) ([]uint8, error) #65716
|
||||
pkg crypto/rsa, func EncryptPKCS1v15 //deprecated #75302
|
||||
pkg crypto/rsa, type PKCS1v15DecryptOptions //deprecated #75302
|
||||
pkg crypto/tls, const QUICErrorEvent = 10 #75108
|
||||
pkg crypto/tls, const QUICErrorEvent QUICEventKind #75108
|
||||
pkg crypto/tls, const SecP256r1MLKEM768 = 4587 #71206
|
||||
pkg crypto/tls, const SecP256r1MLKEM768 CurveID #71206
|
||||
pkg crypto/tls, const SecP384r1MLKEM1024 = 4589 #71206
|
||||
pkg crypto/tls, const SecP384r1MLKEM1024 CurveID #71206
|
||||
pkg crypto/tls, type ClientHelloInfo struct, HelloRetryRequest bool #74425
|
||||
pkg crypto/tls, type ConnectionState struct, HelloRetryRequest bool #74425
|
||||
pkg crypto/tls, type QUICEvent struct, Err error #75108
|
||||
pkg crypto/x509, func OIDFromASN1OID(asn1.ObjectIdentifier) (OID, error) #75325
|
||||
pkg crypto/x509, method (ExtKeyUsage) OID() OID #75325
|
||||
pkg crypto/x509, method (ExtKeyUsage) String() string #56866
|
||||
pkg crypto/x509, method (KeyUsage) String() string #56866
|
||||
pkg debug/elf, const R_LARCH_CALL36 = 110 #75562
|
||||
pkg debug/elf, const R_LARCH_CALL36 R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC32 = 13 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC32 R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC64 = 14 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC64 R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC64_HI12 = 118 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC64_HI12 R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC64_LO20 = 117 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC64_LO20 R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC64_PC_HI12 = 114 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC64_PC_HI12 R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC64_PC_LO20 = 113 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC64_PC_LO20 R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC_CALL = 120 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC_CALL R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC_HI20 = 115 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC_HI20 R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC_LD = 119 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC_LD R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC_LO12 = 116 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC_LO12 R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC_PCREL20_S2 = 126 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC_PCREL20_S2 R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC_PC_HI20 = 111 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC_PC_HI20 R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC_PC_LO12 = 112 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_DESC_PC_LO12 R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_GD_PCREL20_S2 = 125 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_GD_PCREL20_S2 R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_LD_PCREL20_S2 = 124 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_LD_PCREL20_S2 R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_LE_ADD_R = 122 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_LE_ADD_R R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_LE_HI20_R = 121 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_LE_HI20_R R_LARCH #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_LE_LO12_R = 123 #75562
|
||||
pkg debug/elf, const R_LARCH_TLS_LE_LO12_R R_LARCH #75562
|
||||
pkg errors, func AsType[$0 error](error) ($0, bool) #51945
|
||||
pkg go/ast, func ParseDirective(token.Pos, string) (Directive, bool) #68021
|
||||
pkg go/ast, method (*Directive) End() token.Pos #68021
|
||||
pkg go/ast, method (*Directive) ParseArgs() ([]DirectiveArg, error) #68021
|
||||
pkg go/ast, method (*Directive) Pos() token.Pos #68021
|
||||
pkg go/ast, type BasicLit struct, ValueEnd token.Pos #76031
|
||||
pkg go/ast, type Directive struct #68021
|
||||
pkg go/ast, type Directive struct, Args string #68021
|
||||
pkg go/ast, type Directive struct, ArgsPos token.Pos #68021
|
||||
pkg go/ast, type Directive struct, Name string #68021
|
||||
pkg go/ast, type Directive struct, Slash token.Pos #68021
|
||||
pkg go/ast, type Directive struct, Tool string #68021
|
||||
pkg go/ast, type DirectiveArg struct #68021
|
||||
pkg go/ast, type DirectiveArg struct, Arg string #68021
|
||||
pkg go/ast, type DirectiveArg struct, Pos token.Pos #68021
|
||||
pkg go/token, method (*File) End() Pos #75849
|
||||
pkg log/slog, func NewMultiHandler(...Handler) *MultiHandler #65954
|
||||
pkg log/slog, method (*MultiHandler) Enabled(context.Context, Level) bool #65954
|
||||
pkg log/slog, method (*MultiHandler) Handle(context.Context, Record) error #65954
|
||||
pkg log/slog, method (*MultiHandler) WithAttrs([]Attr) Handler #65954
|
||||
pkg log/slog, method (*MultiHandler) WithGroup(string) Handler #65954
|
||||
pkg log/slog, type MultiHandler struct #65954
|
||||
pkg net, method (*Dialer) DialIP(context.Context, string, netip.Addr, netip.Addr) (*IPConn, error) #49097
|
||||
pkg net, method (*Dialer) DialTCP(context.Context, string, netip.AddrPort, netip.AddrPort) (*TCPConn, error) #49097
|
||||
pkg net, method (*Dialer) DialUDP(context.Context, string, netip.AddrPort, netip.AddrPort) (*UDPConn, error) #49097
|
||||
pkg net, method (*Dialer) DialUnix(context.Context, string, *UnixAddr, *UnixAddr) (*UnixConn, error) #49097
|
||||
pkg net/http, method (*ClientConn) Available() int #75772
|
||||
pkg net/http, method (*ClientConn) Close() error #75772
|
||||
pkg net/http, method (*ClientConn) Err() error #75772
|
||||
pkg net/http, method (*ClientConn) InFlight() int #75772
|
||||
pkg net/http, method (*ClientConn) Release() #75772
|
||||
pkg net/http, method (*ClientConn) Reserve() error #75772
|
||||
pkg net/http, method (*ClientConn) RoundTrip(*Request) (*Response, error) #75772
|
||||
pkg net/http, method (*ClientConn) SetStateHook(func(*ClientConn)) #75772
|
||||
pkg net/http, method (*Transport) NewClientConn(context.Context, string, string) (*ClientConn, error) #75772
|
||||
pkg net/http, type ClientConn struct #75772
|
||||
pkg net/http, type HTTP2Config struct, StrictMaxConcurrentRequests bool #67813
|
||||
pkg net/http/httputil, type ReverseProxy struct, Director //deprecated #73161
|
||||
pkg net/netip, method (Prefix) Compare(Prefix) int #61642
|
||||
pkg os, method (*Process) WithHandle(func(uintptr)) error #70352
|
||||
pkg os, var ErrNoHandle error #70352
|
||||
pkg reflect, method (Value) Fields() iter.Seq2[StructField, Value] #66631
|
||||
pkg reflect, method (Value) Methods() iter.Seq2[Method, Value] #66631
|
||||
pkg reflect, type Type interface, Fields() iter.Seq[StructField] #66631
|
||||
pkg reflect, type Type interface, Ins() iter.Seq[Type] #66631
|
||||
pkg reflect, type Type interface, Methods() iter.Seq[Method] #66631
|
||||
pkg reflect, type Type interface, Outs() iter.Seq[Type] #66631
|
||||
pkg testing, method (*B) ArtifactDir() string #71287
|
||||
pkg testing, method (*F) ArtifactDir() string #71287
|
||||
pkg testing, method (*T) ArtifactDir() string #71287
|
||||
pkg testing, type TB interface, ArtifactDir() string #71287
|
||||
pkg testing/cryptotest, func SetGlobalRandom(*testing.T, uint64) #70942
|
||||
@@ -1,2 +1,2 @@
|
||||
branch: dev.simd
|
||||
branch: release-branch.go1.23
|
||||
parent-branch: master
|
||||
|
||||
@@ -70,6 +70,6 @@ To begin the next release development cycle, populate the contents of `next`
|
||||
with those of `initial`. From the repo root:
|
||||
|
||||
> cd doc
|
||||
> cp -R initial/ next
|
||||
> cp -r initial/* next
|
||||
|
||||
Then edit `next/1-intro.md` to refer to the next version.
|
||||
|
||||
@@ -1039,12 +1039,6 @@ The value of <code>GOMIPS64</code> environment variable (<code>hardfloat</code>
|
||||
<code>GOMIPS64_hardfloat</code> or <code>GOMIPS64_softfloat</code>.
|
||||
</p>
|
||||
|
||||
<h3 id="riscv64">RISCV64</h3>
|
||||
|
||||
<p>
|
||||
Reference: <a href="/pkg/cmd/internal/obj/riscv">Go RISCV64 Assembly Instructions Reference Manual</a>
|
||||
</p>
|
||||
|
||||
<h3 id="unsupported_opcodes">Unsupported opcodes</h3>
|
||||
|
||||
<p>
|
||||
|
||||
6864
doc/go1.17_spec.html
Normal file
6864
doc/go1.17_spec.html
Normal file
File diff suppressed because it is too large
Load Diff
@@ -82,7 +82,7 @@ while still insisting that races are errors and that tools can diagnose and repo
|
||||
<p>
|
||||
The following formal definition of Go's memory model closely follows
|
||||
the approach presented by Hans-J. Boehm and Sarita V. Adve in
|
||||
“<a href="https://dl.acm.org/doi/10.1145/1375581.1375591">Foundations of the C++ Concurrency Memory Model</a>”,
|
||||
“<a href="https://www.hpl.hp.com/techreports/2008/HPL-2008-56.pdf">Foundations of the C++ Concurrency Memory Model</a>”,
|
||||
published in PLDI 2008.
|
||||
The definition of data-race-free programs and the guarantee of sequential consistency
|
||||
for race-free programs are equivalent to the ones in that work.
|
||||
@@ -231,7 +231,7 @@ do exactly this.
|
||||
|
||||
<p>
|
||||
A read of an array, struct, or complex number
|
||||
may be implemented as a read of each individual sub-value
|
||||
may by implemented as a read of each individual sub-value
|
||||
(array element, struct field, or real/imaginary component),
|
||||
in any order.
|
||||
Similarly, a write of an array, struct, or complex number
|
||||
@@ -453,7 +453,7 @@ crash, or do something else.)
|
||||
</p>
|
||||
|
||||
<p class="rule">
|
||||
The <i>k</i>th receive from a channel with capacity <i>C</i> is synchronized before the completion of the <i>k</i>+<i>C</i>th send on that channel.
|
||||
The <i>k</i>th receive on a channel with capacity <i>C</i> is synchronized before the completion of the <i>k</i>+<i>C</i>th send from that channel completes.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
|
||||
675
doc/go_spec.html
675
doc/go_spec.html
File diff suppressed because it is too large
Load Diff
170
doc/godebug.md
170
doc/godebug.md
@@ -34,7 +34,6 @@ For example, if a Go program is running in an environment that contains
|
||||
|
||||
then that Go program will disable the use of HTTP/2 by default in both
|
||||
the HTTP client and the HTTP server.
|
||||
Unrecognized settings in the `GODEBUG` environment variable are ignored.
|
||||
It is also possible to set the default `GODEBUG` for a given program
|
||||
(discussed below).
|
||||
|
||||
@@ -109,9 +108,7 @@ Only the work module's `go.mod` is consulted for `godebug` directives.
|
||||
Any directives in required dependency modules are ignored.
|
||||
It is an error to list a `godebug` with an unrecognized setting.
|
||||
(Toolchains older than Go 1.23 reject all `godebug` lines, since they do not
|
||||
understand `godebug` at all.) When a workspace is in use, `godebug`
|
||||
directives in `go.mod` files are ignored, and `go.work` will be consulted
|
||||
for `godebug` directives instead.
|
||||
understand `godebug` at all.)
|
||||
|
||||
The defaults from the `go` and `godebug` lines apply to all main
|
||||
packages that are built. For more fine-grained control,
|
||||
@@ -153,152 +150,6 @@ for example,
|
||||
see the [runtime documentation](/pkg/runtime#hdr-Environment_Variables)
|
||||
and the [go command documentation](/cmd/go#hdr-Build_and_test_caching).
|
||||
|
||||
### Go 1.26
|
||||
|
||||
Go 1.26 added a new `httpcookiemaxnum` setting that controls the maximum number
|
||||
of cookies that net/http will accept when parsing HTTP headers. If the number of
|
||||
cookie in a header exceeds the number set in `httpcookiemaxnum`, cookie parsing
|
||||
will fail early. The default value is `httpcookiemaxnum=3000`. Setting
|
||||
`httpcookiemaxnum=0` will allow the cookie parsing to accept an indefinite
|
||||
number of cookies. To avoid denial of service attacks, this setting and default
|
||||
was backported to Go 1.25.2 and Go 1.24.8.
|
||||
|
||||
Go 1.26 added a new `urlstrictcolons` setting that controls whether `net/url.Parse`
|
||||
allows malformed hostnames containing colons outside of a bracketed IPv6 address.
|
||||
The default `urlstrictcolons=1` rejects URLs such as `http://localhost:1:2` or `http://::1/`.
|
||||
Colons are permitted as part of a bracketed IPv6 address, such as `http://[::1]/`.
|
||||
|
||||
Go 1.26 enabled two additional post-quantum key exchange mechanisms:
|
||||
SecP256r1MLKEM768 and SecP384r1MLKEM1024. The default can be reverted using the
|
||||
[`tlssecpmlkem` setting](/pkg/crypto/tls/#Config.CurvePreferences).
|
||||
|
||||
Go 1.26 added a new `tracebacklabels` setting that controls the inclusion of
|
||||
goroutine labels set through the the `runtime/pprof` package. Setting `tracebacklabels=1`
|
||||
includes these key/value pairs in the goroutine status header of runtime
|
||||
tracebacks and debug=2 runtime/pprof stack dumps. This format may change in the future.
|
||||
(see go.dev/issue/76349)
|
||||
|
||||
Go 1.26 added a new `cryptocustomrand` setting that controls whether most crypto/...
|
||||
APIs ignore the random `io.Reader` parameter. For Go 1.26, it defaults
|
||||
to `cryptocustomrand=0`, ignoring the random parameters. Using `cryptocustomrand=1`
|
||||
reverts to the pre-Go 1.26 behavior.
|
||||
|
||||
### Go 1.25
|
||||
|
||||
Go 1.25 added a new `decoratemappings` setting that controls whether the Go
|
||||
runtime annotates OS anonymous memory mappings with context about their
|
||||
purpose. These annotations appear in /proc/self/maps and /proc/self/smaps as
|
||||
"[anon: Go: ...]". This setting is only used on Linux. For Go 1.25, it defaults
|
||||
to `decoratemappings=1`, enabling annotations. Using `decoratemappings=0`
|
||||
reverts to the pre-Go 1.25 behavior. This setting is fixed at program startup
|
||||
time, and can't be modified by changing the `GODEBUG` environment variable
|
||||
after the program starts.
|
||||
|
||||
Go 1.25 added a new `embedfollowsymlinks` setting that controls whether the
|
||||
Go command will follow symlinks to regular files embedding files.
|
||||
The default value `embedfollowsymlinks=0` does not allow following
|
||||
symlinks. `embedfollowsymlinks=1` will allow following symlinks.
|
||||
|
||||
Go 1.25 added a new `containermaxprocs` setting that controls whether the Go
|
||||
runtime will consider cgroup CPU limits when setting the default GOMAXPROCS.
|
||||
The default value `containermaxprocs=1` will use cgroup limits in addition to
|
||||
the total logical CPU count and CPU affinity. `containermaxprocs=0` will
|
||||
disable consideration of cgroup limits. This setting only affects Linux.
|
||||
|
||||
Go 1.25 added a new `updatemaxprocs` setting that controls whether the Go
|
||||
runtime will periodically update GOMAXPROCS for new CPU affinity or cgroup
|
||||
limits. The default value `updatemaxprocs=1` will enable periodic updates.
|
||||
`updatemaxprocs=0` will disable periodic updates.
|
||||
|
||||
Go 1.25 disabled SHA-1 signature algorithms in TLS 1.2 according to RFC 9155.
|
||||
The default can be reverted using the `tlssha1=1` setting.
|
||||
|
||||
Go 1.25 switched to SHA-256 to fill in missing SubjectKeyId in
|
||||
crypto/x509.CreateCertificate. The setting `x509sha256skid=0` reverts to SHA-1.
|
||||
|
||||
Go 1.25 corrected the semantics of contention reports for runtime-internal locks,
|
||||
and so removed the [`runtimecontentionstacks` setting](/pkg/runtime#hdr-Environment_Variables).
|
||||
|
||||
Go 1.25 (starting with Go 1.25 RC 2) disabled build information stamping when
|
||||
multiple VCS are detected due to concerns around VCS injection attacks. This
|
||||
behavior and setting was backported to Go 1.24.5 and Go 1.23.11. This behavior
|
||||
can be renabled with the setting `allowmultiplevcs=1`.
|
||||
|
||||
### Go 1.24
|
||||
|
||||
Go 1.24 added a new `fips140` setting that controls whether the Go
|
||||
Cryptographic Module operates in FIPS 140-3 mode.
|
||||
The possible values are:
|
||||
- "off": no special support for FIPS 140-3 mode. This is the default.
|
||||
- "on": the Go Cryptographic Module operates in FIPS 140-3 mode.
|
||||
- "only": like "on", but cryptographic algorithms not approved by
|
||||
FIPS 140-3 return an error or panic.
|
||||
For more information, see [FIPS 140-3 Compliance](/doc/security/fips140).
|
||||
This setting is fixed at program startup time, and can't be modified
|
||||
by changing the `GODEBUG` environment variable after the program starts.
|
||||
|
||||
Go 1.24 changed the global [`math/rand.Seed`](/pkg/math/rand/#Seed) to be a
|
||||
no-op. This behavior is controlled by the `randseednop` setting.
|
||||
For Go 1.24 it defaults to `randseednop=1`.
|
||||
Using `randseednop=0` reverts to the pre-Go 1.24 behavior.
|
||||
|
||||
Go 1.24 added new values for the `multipathtcp` setting.
|
||||
The possible values for `multipathtcp` are now:
|
||||
- "0": disable MPTCP on dialers and listeners by default
|
||||
- "1": enable MPTCP on dialers and listeners by default
|
||||
- "2": enable MPTCP on listeners only by default
|
||||
- "3": enable MPTCP on dialers only by default
|
||||
|
||||
For Go 1.24, it now defaults to multipathtcp="2", thus
|
||||
enabled by default on listeners. Using multipathtcp="0" reverts to the
|
||||
pre-Go 1.24 behavior.
|
||||
|
||||
Go 1.24 changed the behavior of `go test -json` to emit build errors as JSON
|
||||
instead of text.
|
||||
These new JSON events are distinguished by new `Action` values,
|
||||
but can still cause problems with CI systems that aren't robust to these events.
|
||||
This behavior can be controlled with the `gotestjsonbuildtext` setting.
|
||||
Using `gotestjsonbuildtext=1` restores the 1.23 behavior.
|
||||
This setting will be removed in a future release, Go 1.28 at the earliest.
|
||||
|
||||
Go 1.24 changed [`crypto/rsa`](/pkg/crypto/rsa) to require RSA keys to be at
|
||||
least 1024 bits. This behavior can be controlled with the `rsa1024min` setting.
|
||||
Using `rsa1024min=0` restores the Go 1.23 behavior.
|
||||
|
||||
Go 1.24 introduced a mechanism for enabling platform specific Data Independent
|
||||
Timing (DIT) modes in the [`crypto/subtle`](/pkg/crypto/subtle) package. This
|
||||
mode can be enabled for an entire program with the `dataindependenttiming` setting.
|
||||
For Go 1.24 it defaults to `dataindependenttiming=0`. There is no change in default
|
||||
behavior from Go 1.23 when `dataindependenttiming` is unset.
|
||||
Using `dataindependenttiming=1` enables the DIT mode for the entire Go program.
|
||||
When enabled, DIT will be enabled when calling into C from Go. When enabled,
|
||||
calling into Go code from C will enable DIT, and disable it before returning to
|
||||
C if it was not enabled when Go code was entered.
|
||||
This currently only affects arm64 programs. For all other platforms it is a no-op.
|
||||
|
||||
Go 1.24 removed the `x509sha1` setting. `crypto/x509` no longer supports verifying
|
||||
signatures on certificates that use SHA-1 based signature algorithms.
|
||||
|
||||
Go 1.24 changes the default value of the [`x509usepolicies`
|
||||
setting.](/pkg/crypto/x509/#CreateCertificate) from `0` to `1`. When marshalling
|
||||
certificates, policies are now taken from the
|
||||
[`Certificate.Policies`](/pkg/crypto/x509/#Certificate.Policies) field rather
|
||||
than the
|
||||
[`Certificate.PolicyIdentifiers`](/pkg/crypto/x509/#Certificate.PolicyIdentifiers)
|
||||
field by default.
|
||||
|
||||
Go 1.24 enabled the post-quantum key exchange mechanism
|
||||
X25519MLKEM768 by default. The default can be reverted using the
|
||||
[`tlsmlkem` setting](/pkg/crypto/tls/#Config.CurvePreferences).
|
||||
This can be useful when dealing with buggy TLS servers that do not handle large records correctly,
|
||||
causing a timeout during the handshake (see [TLS post-quantum TL;DR fail](https://tldr.fail/)).
|
||||
Go 1.24 also removed X25519Kyber768Draft00 and the Go 1.23 `tlskyber` setting.
|
||||
|
||||
Go 1.24 made [`ParsePKCS1PrivateKey`](/pkg/crypto/x509/#ParsePKCS1PrivateKey)
|
||||
use and validate the CRT parameters in the encoded private key. This behavior
|
||||
can be controlled with the `x509rsacrt` setting. Using `x509rsacrt=0` restores
|
||||
the Go 1.23 behavior.
|
||||
|
||||
### Go 1.23
|
||||
|
||||
Go 1.23 changed the channels created by package time to be unbuffered
|
||||
@@ -306,7 +157,7 @@ Go 1.23 changed the channels created by package time to be unbuffered
|
||||
and [`Timer.Reset`](/pkg/time/#Timer.Reset) method results much easier.
|
||||
The [`asynctimerchan` setting](/pkg/time/#NewTimer) disables this change.
|
||||
There are no runtime metrics for this change,
|
||||
This setting will be removed in Go 1.27.
|
||||
This setting may be removed in a future release, Go 1.27 at the earliest.
|
||||
|
||||
Go 1.23 changed the mode bits reported by [`os.Lstat`](/pkg/os#Lstat) and [`os.Stat`](/pkg/os#Stat)
|
||||
for reparse points, which can be controlled with the `winsymlink` setting.
|
||||
@@ -328,8 +179,6 @@ Previous versions default to `winreadlinkvolume=0`.
|
||||
Go 1.23 enabled the experimental post-quantum key exchange mechanism
|
||||
X25519Kyber768Draft00 by default. The default can be reverted using the
|
||||
[`tlskyber` setting](/pkg/crypto/tls/#Config.CurvePreferences).
|
||||
This can be useful when dealing with buggy TLS servers that do not handle large records correctly,
|
||||
causing a timeout during the handshake (see [TLS post-quantum TL;DR fail](https://tldr.fail/)).
|
||||
|
||||
Go 1.23 changed the behavior of
|
||||
[crypto/x509.ParseCertificate](/pkg/crypto/x509/#ParseCertificate) to reject
|
||||
@@ -343,7 +192,6 @@ any effect.
|
||||
Go 1.23 changed the default TLS cipher suites used by clients and servers when
|
||||
not explicitly configured, removing 3DES cipher suites. The default can be reverted
|
||||
using the [`tls3des` setting](/pkg/crypto/tls/#Config.CipherSuites).
|
||||
This setting will be removed in Go 1.27.
|
||||
|
||||
Go 1.23 changed the behavior of [`tls.X509KeyPair`](/pkg/crypto/tls#X509KeyPair)
|
||||
and [`tls.LoadX509KeyPair`](/pkg/crypto/tls#LoadX509KeyPair) to populate the
|
||||
@@ -351,7 +199,6 @@ Leaf field of the returned [`tls.Certificate`](/pkg/crypto/tls#Certificate).
|
||||
This behavior is controlled by the `x509keypairleaf` setting. For Go 1.23, it
|
||||
defaults to `x509keypairleaf=1`. Previous versions default to
|
||||
`x509keypairleaf=0`.
|
||||
This setting will be removed in Go 1.27.
|
||||
|
||||
Go 1.23 changed
|
||||
[`net/http.ServeContent`](/pkg/net/http#ServeContent),
|
||||
@@ -385,31 +232,28 @@ Whether the type checker produces `Alias` types or not is controlled by the
|
||||
[`gotypesalias` setting](/pkg/go/types#Alias).
|
||||
For Go 1.22 it defaults to `gotypesalias=0`.
|
||||
For Go 1.23, `gotypesalias=1` will become the default.
|
||||
This setting will be removed in Go 1.27.
|
||||
This setting will be removed in a future release, Go 1.27 at the earliest.
|
||||
|
||||
Go 1.22 changed the default minimum TLS version supported by both servers
|
||||
and clients to TLS 1.2. The default can be reverted to TLS 1.0 using the
|
||||
[`tls10server` setting](/pkg/crypto/tls/#Config).
|
||||
This setting will be removed in Go 1.27.
|
||||
|
||||
Go 1.22 changed the default TLS cipher suites used by clients and servers when
|
||||
not explicitly configured, removing the cipher suites which used RSA based key
|
||||
exchange. The default can be reverted using the [`tlsrsakex` setting](/pkg/crypto/tls/#Config).
|
||||
This setting will be removed in Go 1.27.
|
||||
|
||||
Go 1.22 disabled
|
||||
[`ConnectionState.ExportKeyingMaterial`](/pkg/crypto/tls/#ConnectionState.ExportKeyingMaterial)
|
||||
when the connection supports neither TLS 1.3 nor Extended Master Secret
|
||||
(implemented in Go 1.21). It can be reenabled with the [`tlsunsafeekm`
|
||||
setting](/pkg/crypto/tls/#ConnectionState.ExportKeyingMaterial).
|
||||
This setting will be removed in Go 1.27.
|
||||
|
||||
Go 1.22 changed how the runtime interacts with transparent huge pages on Linux.
|
||||
In particular, a common default Linux kernel configuration can result in
|
||||
significant memory overheads, and Go 1.22 no longer works around this default.
|
||||
To work around this issue without adjusting kernel settings, transparent huge
|
||||
pages can be disabled for Go memory with the
|
||||
[`disablethp` setting](/pkg/runtime#hdr-Environment_Variables).
|
||||
[`disablethp` setting](/pkg/runtime#hdr-Environment_Variable).
|
||||
This behavior was backported to Go 1.21.1, but the setting is only available
|
||||
starting with Go 1.21.6.
|
||||
This setting may be removed in a future release, and users impacted by this issue
|
||||
@@ -421,7 +265,7 @@ Go 1.22 added contention on runtime-internal locks to the [`mutex`
|
||||
profile](/pkg/runtime/pprof#Profile). Contention on these locks is always
|
||||
reported at `runtime._LostContendedRuntimeLock`. Complete stack traces of
|
||||
runtime locks can be enabled with the [`runtimecontentionstacks`
|
||||
setting](/pkg/runtime#hdr-Environment_Variables). These stack traces have
|
||||
setting](/pkg/runtime#hdr-Environment_Variable). These stack traces have
|
||||
non-standard semantics, see setting documentation for details.
|
||||
|
||||
Go 1.22 added a new [`crypto/x509.Certificate`](/pkg/crypto/x509/#Certificate)
|
||||
@@ -430,7 +274,7 @@ certificate policy OIDs with components larger than 31 bits. By default this
|
||||
field is only used during parsing, when it is populated with policy OIDs, but
|
||||
not used during marshaling. It can be used to marshal these larger OIDs, instead
|
||||
of the existing PolicyIdentifiers field, by using the
|
||||
[`x509usepolicies` setting](/pkg/crypto/x509/#CreateCertificate).
|
||||
[`x509usepolicies` setting.](/pkg/crypto/x509/#CreateCertificate).
|
||||
|
||||
|
||||
### Go 1.21
|
||||
@@ -498,7 +342,7 @@ There is no plan to remove this setting.
|
||||
|
||||
Go 1.18 removed support for SHA1 in most X.509 certificates,
|
||||
controlled by the [`x509sha1` setting](/pkg/crypto/x509#InsecureAlgorithmError).
|
||||
This setting was removed in Go 1.24.
|
||||
This setting will be removed in a future release, Go 1.22 at the earliest.
|
||||
|
||||
### Go 1.10
|
||||
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
<!--
|
||||
NOTE: In this document and others in this directory, the convention is to
|
||||
set fixed-width phrases with non-fixed-width spaces, as in
|
||||
`hello` `world`.
|
||||
-->
|
||||
|
||||
<style>
|
||||
main ul li { margin: 0.5em 0; }
|
||||
</style>
|
||||
|
||||
@@ -1 +1,3 @@
|
||||
### Minor changes to the library {#minor_library_changes}
|
||||
|
||||
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
# Copyright 2024 The Go Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file.
|
||||
|
||||
# Rules for building and testing new FIPS snapshots.
|
||||
# For example:
|
||||
#
|
||||
# make v1.2.3.zip
|
||||
# make v1.2.3.test
|
||||
#
|
||||
# and then if changes are needed, check them into master
|
||||
# and run 'make v1.2.3.rm' and repeat.
|
||||
#
|
||||
# Note that once published a snapshot zip file should never
|
||||
# be modified. We record the sha256 hashes of the zip files
|
||||
# in fips140.sum, and the cmd/go/internal/fips140 test checks
|
||||
# that the zips match.
|
||||
#
|
||||
# When the zip file is finalized, run 'make updatesum' to update
|
||||
# fips140.sum.
|
||||
|
||||
default:
|
||||
@echo nothing to make
|
||||
|
||||
# make v1.2.3.zip builds a v1.2.3.zip file
|
||||
# from the current origin/master.
|
||||
# copy and edit the 'go run' command by hand to use a different branch.
|
||||
v%.zip:
|
||||
git fetch origin master
|
||||
go run ../../src/cmd/go/internal/fips140/mkzip.go v$*
|
||||
|
||||
# normally mkzip refuses to overwrite an existing zip file.
|
||||
# make v1.2.3.rm removes the zip file and unpacked
|
||||
# copy from the module cache.
|
||||
v%.rm:
|
||||
rm -f v$*.zip
|
||||
chmod -R u+w $$(go env GOMODCACHE)/golang.org/fips140@v$* 2>/dev/null || true
|
||||
rm -rf $$(go env GOMODCACHE)/golang.org/fips140@v$*
|
||||
|
||||
# make v1.2.3.test runs the crypto tests using that snapshot.
|
||||
v%.test:
|
||||
GOFIPS140=v$* go test -short crypto...
|
||||
|
||||
# make updatesum updates the fips140.sum file.
|
||||
updatesum:
|
||||
go test cmd/go/internal/fips140 -update
|
||||
@@ -1,9 +0,0 @@
|
||||
This directory holds snapshots of the crypto/internal/fips140 tree
|
||||
that are being validated and certified for FIPS-140 use.
|
||||
The file x.txt (for example, inprocess.txt, certified.txt)
|
||||
defines the meaning of the FIPS version alias x, listing
|
||||
the exact version to use.
|
||||
|
||||
The zip files are created by cmd/go/internal/fips140/mkzip.go.
|
||||
The fips140.sum file lists checksums for the zip files.
|
||||
See the Makefile for recipes.
|
||||
@@ -1,13 +0,0 @@
|
||||
# SHA256 checksums of snapshot zip files in this directory.
|
||||
# These checksums are included in the FIPS security policy
|
||||
# (validation instructions sent to the lab) and MUST NOT CHANGE.
|
||||
# That is, the zip files themselves must not change.
|
||||
#
|
||||
# It is okay to add new zip files to the list, and it is okay to
|
||||
# remove zip files from the list when they are removed from
|
||||
# this directory. To update this file:
|
||||
#
|
||||
# go test cmd/go/internal/fips140 -update
|
||||
#
|
||||
v1.0.0-c2097c7c.zip daf3614e0406f67ae6323c902db3f953a1effb199142362a039e7526dfb9368b
|
||||
v1.1.0-rc1.zip ea94f8c3885294c9efe1bd8f9b6e86daeb25b6aff2aeb20707cd9a5101f6f54e
|
||||
@@ -1 +0,0 @@
|
||||
v1.0.0-c2097c7c
|
||||
Binary file not shown.
@@ -1 +0,0 @@
|
||||
v1.0.0-c2097c7c
|
||||
Binary file not shown.
@@ -1,64 +0,0 @@
|
||||
# Copyright 2025 The Go Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file.
|
||||
|
||||
# Mercurial extension to add a 'goreposum' command that
|
||||
# computes a hash of a remote repo's tag state.
|
||||
# Tag definitions can come from the .hgtags file stored in
|
||||
# any head of any branch, and the server protocol does not
|
||||
# expose the tags directly. However, the protocol does expose
|
||||
# the hashes of all the branch heads, so we can use a hash of
|
||||
# all those branch names and heads as a conservative snapshot
|
||||
# of the entire remote repo state, and use that as the tag sum.
|
||||
# Any change on the server then invalidates the tag sum,
|
||||
# even if it didn't have anything to do with tags, but at least
|
||||
# we will avoid re-cloning a server when there have been no
|
||||
# changes at all.
|
||||
#
|
||||
# Separately, this extension also adds a 'golookup' command that
|
||||
# returns the hash of a specific reference, like 'default' or a tag.
|
||||
# And golookup of a hash confirms that it still exists on the server.
|
||||
# We can use that to revalidate that specific versions still exist and
|
||||
# have the same meaning they did the last time we checked.
|
||||
#
|
||||
# Usage:
|
||||
#
|
||||
# hg --config "extensions.goreposum=$GOROOT/lib/hg/goreposum.py" goreposum REPOURL
|
||||
|
||||
import base64, hashlib, sys
|
||||
from mercurial import registrar, ui, hg, node
|
||||
from mercurial.i18n import _
|
||||
cmdtable = {}
|
||||
command = registrar.command(cmdtable)
|
||||
@command(b'goreposum', [], _('url'), norepo=True)
|
||||
def goreposum(ui, url):
|
||||
"""
|
||||
goreposum computes a checksum of all the named state in the remote repo.
|
||||
It hashes together all the branch names and hashes
|
||||
and then all the bookmark names and hashes.
|
||||
Tags are stored in .hgtags files in any of the branches,
|
||||
so the branch metadata includes the tags as well.
|
||||
"""
|
||||
h = hashlib.sha256()
|
||||
peer = hg.peer(ui, {}, url)
|
||||
for name, revs in peer.branchmap().items():
|
||||
h.update(name)
|
||||
for r in revs:
|
||||
h.update(b' ')
|
||||
h.update(r)
|
||||
h.update(b'\n')
|
||||
if (b'bookmarks' in peer.listkeys(b'namespaces')):
|
||||
for name, rev in peer.listkeys(b'bookmarks').items():
|
||||
h.update(name)
|
||||
h.update(b'=')
|
||||
h.update(rev)
|
||||
h.update(b'\n')
|
||||
print('r1:'+base64.standard_b64encode(h.digest()).decode('utf-8'))
|
||||
|
||||
@command(b'golookup', [], _('url rev'), norepo=True)
|
||||
def golookup(ui, url, rev):
|
||||
"""
|
||||
golookup looks up a single identifier in the repo,
|
||||
printing its hash.
|
||||
"""
|
||||
print(node.hex(hg.peer(ui, {}, url).lookup(rev)).decode('utf-8'))
|
||||
@@ -24,8 +24,8 @@
|
||||
# in the CL match the update.bash in the CL.
|
||||
|
||||
# Versions to use.
|
||||
CODE=2025c
|
||||
DATA=2025c
|
||||
CODE=2024a
|
||||
DATA=2024a
|
||||
|
||||
set -e
|
||||
|
||||
@@ -40,12 +40,7 @@ curl -sS -L -O https://www.iana.org/time-zones/repository/releases/tzdata$DATA.t
|
||||
tar xzf tzcode$CODE.tar.gz
|
||||
tar xzf tzdata$DATA.tar.gz
|
||||
|
||||
# The PACKRATLIST and PACKRATDATA options are copied from Ubuntu:
|
||||
# https://git.launchpad.net/ubuntu/+source/tzdata/tree/debian/rules?h=debian/sid
|
||||
#
|
||||
# You can see the description of these make variables in the tzdata Makefile:
|
||||
# https://github.com/eggert/tz/blob/main/Makefile
|
||||
if ! make CFLAGS=-DSTD_INSPIRED AWK=awk TZDIR=zoneinfo PACKRATDATA=backzone PACKRATLIST=zone.tab posix_only >make.out 2>&1; then
|
||||
if ! make CFLAGS=-DSTD_INSPIRED AWK=awk TZDIR=zoneinfo posix_only >make.out 2>&1; then
|
||||
cat make.out
|
||||
exit 2
|
||||
fi
|
||||
|
||||
Binary file not shown.
@@ -212,7 +212,7 @@ func copyLocalData(dstbase string) (pkgpath string, err error) {
|
||||
// Copy all immediate files and testdata directories between
|
||||
// the package being tested and the source root.
|
||||
pkgpath = ""
|
||||
for element := range strings.SplitSeq(finalPkgpath, string(filepath.Separator)) {
|
||||
for _, element := range strings.Split(finalPkgpath, string(filepath.Separator)) {
|
||||
if debug {
|
||||
log.Printf("copying %s", pkgpath)
|
||||
}
|
||||
|
||||
191
misc/linkcheck/linkcheck.go
Normal file
191
misc/linkcheck/linkcheck.go
Normal file
@@ -0,0 +1,191 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// The linkcheck command finds missing links in the godoc website.
|
||||
// It crawls a URL recursively and notes URLs and URL fragments
|
||||
// that it's seen and prints a report of missing links at the end.
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
root = flag.String("root", "http://localhost:6060", "Root to crawl")
|
||||
verbose = flag.Bool("verbose", false, "verbose")
|
||||
)
|
||||
|
||||
var wg sync.WaitGroup // outstanding fetches
|
||||
var urlq = make(chan string) // URLs to crawl
|
||||
|
||||
// urlFrag is a URL and its optional #fragment (without the #)
|
||||
type urlFrag struct {
|
||||
url, frag string
|
||||
}
|
||||
|
||||
var (
|
||||
mu sync.Mutex
|
||||
crawled = make(map[string]bool) // URL without fragment -> true
|
||||
neededFrags = make(map[urlFrag][]string) // URL#frag -> who needs it
|
||||
)
|
||||
|
||||
var aRx = regexp.MustCompile(`<a href=['"]?(/[^\s'">]+)`)
|
||||
|
||||
// Owned by crawlLoop goroutine:
|
||||
var (
|
||||
linkSources = make(map[string][]string) // url no fragment -> sources
|
||||
fragExists = make(map[urlFrag]bool)
|
||||
problems []string
|
||||
)
|
||||
|
||||
func localLinks(body string) (links []string) {
|
||||
seen := map[string]bool{}
|
||||
mv := aRx.FindAllStringSubmatch(body, -1)
|
||||
for _, m := range mv {
|
||||
ref := m[1]
|
||||
if strings.HasPrefix(ref, "/src/") {
|
||||
continue
|
||||
}
|
||||
if !seen[ref] {
|
||||
seen[ref] = true
|
||||
links = append(links, m[1])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var idRx = regexp.MustCompile(`\bid=['"]?([^\s'">]+)`)
|
||||
|
||||
func pageIDs(body string) (ids []string) {
|
||||
mv := idRx.FindAllStringSubmatch(body, -1)
|
||||
for _, m := range mv {
|
||||
ids = append(ids, m[1])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// url may contain a #fragment, and the fragment is then noted as needing to exist.
|
||||
func crawl(url string, sourceURL string) {
|
||||
if strings.Contains(url, "/devel/release") {
|
||||
return
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if u, frag, ok := strings.Cut(url, "#"); ok {
|
||||
url = u
|
||||
if frag != "" {
|
||||
uf := urlFrag{url, frag}
|
||||
neededFrags[uf] = append(neededFrags[uf], sourceURL)
|
||||
}
|
||||
}
|
||||
if crawled[url] {
|
||||
return
|
||||
}
|
||||
crawled[url] = true
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
urlq <- url
|
||||
}()
|
||||
}
|
||||
|
||||
func addProblem(url, errmsg string) {
|
||||
msg := fmt.Sprintf("Error on %s: %s (from %s)", url, errmsg, linkSources[url])
|
||||
if *verbose {
|
||||
log.Print(msg)
|
||||
}
|
||||
problems = append(problems, msg)
|
||||
}
|
||||
|
||||
func crawlLoop() {
|
||||
for url := range urlq {
|
||||
if err := doCrawl(url); err != nil {
|
||||
addProblem(url, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func doCrawl(url string) error {
|
||||
defer wg.Done()
|
||||
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := http.DefaultTransport.RoundTrip(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Handle redirects.
|
||||
if res.StatusCode/100 == 3 {
|
||||
newURL, err := res.Location()
|
||||
if err != nil {
|
||||
return fmt.Errorf("resolving redirect: %v", err)
|
||||
}
|
||||
if !strings.HasPrefix(newURL.String(), *root) {
|
||||
// Skip off-site redirects.
|
||||
return nil
|
||||
}
|
||||
crawl(newURL.String(), url)
|
||||
return nil
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
return errors.New(res.Status)
|
||||
}
|
||||
slurp, err := io.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
if err != nil {
|
||||
log.Fatalf("Error reading %s body: %v", url, err)
|
||||
}
|
||||
if *verbose {
|
||||
log.Printf("Len of %s: %d", url, len(slurp))
|
||||
}
|
||||
body := string(slurp)
|
||||
for _, ref := range localLinks(body) {
|
||||
if *verbose {
|
||||
log.Printf(" links to %s", ref)
|
||||
}
|
||||
dest := *root + ref
|
||||
linkSources[dest] = append(linkSources[dest], url)
|
||||
crawl(dest, url)
|
||||
}
|
||||
for _, id := range pageIDs(body) {
|
||||
if *verbose {
|
||||
log.Printf(" url %s has #%s", url, id)
|
||||
}
|
||||
fragExists[urlFrag{url, id}] = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
go crawlLoop()
|
||||
crawl(*root, "")
|
||||
|
||||
wg.Wait()
|
||||
close(urlq)
|
||||
for uf, needers := range neededFrags {
|
||||
if !fragExists[uf] {
|
||||
problems = append(problems, fmt.Sprintf("Missing fragment for %+v from %v", uf, needers))
|
||||
}
|
||||
}
|
||||
|
||||
for _, s := range problems {
|
||||
fmt.Println(s)
|
||||
}
|
||||
if len(problems) > 0 {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -14,7 +14,7 @@ case "$GOWASIRUNTIME" in
|
||||
exec wazero run -mount /:/ -env-inherit -cachedir "${TMPDIR:-/tmp}"/wazero ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}"
|
||||
;;
|
||||
"wasmtime" | "")
|
||||
exec wasmtime run --dir=/ --env PWD="$PWD" --env PATH="$PATH" -W max-wasm-stack=8388608 ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}"
|
||||
exec wasmtime run --dir=/ --env PWD="$PWD" --env PATH="$PATH" -W max-wasm-stack=1048576 ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}"
|
||||
;;
|
||||
*)
|
||||
echo "Unknown Go WASI runtime specified: $GOWASIRUNTIME"
|
||||
@@ -17,7 +17,7 @@ license that can be found in the LICENSE file.
|
||||
<script src="https://cdn.jsdelivr.net/npm/text-encoding@0.7.0/lib/encoding.min.js"></script>
|
||||
(see https://caniuse.com/#feat=textencoder)
|
||||
-->
|
||||
<script src="../../lib/wasm/wasm_exec.js"></script>
|
||||
<script src="wasm_exec.js"></script>
|
||||
<script>
|
||||
if (!WebAssembly.instantiateStreaming) { // polyfill
|
||||
WebAssembly.instantiateStreaming = async (resp, importObject) => {
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
if (!globalThis.fs) {
|
||||
let outputBuf = "";
|
||||
globalThis.fs = {
|
||||
constants: { O_WRONLY: -1, O_RDWR: -1, O_CREAT: -1, O_TRUNC: -1, O_APPEND: -1, O_EXCL: -1, O_DIRECTORY: -1 }, // unused
|
||||
constants: { O_WRONLY: -1, O_RDWR: -1, O_CREAT: -1, O_TRUNC: -1, O_APPEND: -1, O_EXCL: -1 }, // unused
|
||||
writeSync(fd, buf) {
|
||||
outputBuf += decoder.decode(buf);
|
||||
const nl = outputBuf.lastIndexOf("\n");
|
||||
@@ -73,14 +73,6 @@
|
||||
}
|
||||
}
|
||||
|
||||
if (!globalThis.path) {
|
||||
globalThis.path = {
|
||||
resolve(...pathSegments) {
|
||||
return pathSegments.join("/");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!globalThis.crypto) {
|
||||
throw new Error("globalThis.crypto is not available, polyfill required (crypto.getRandomValues only)");
|
||||
}
|
||||
@@ -216,16 +208,10 @@
|
||||
return decoder.decode(new DataView(this._inst.exports.mem.buffer, saddr, len));
|
||||
}
|
||||
|
||||
const testCallExport = (a, b) => {
|
||||
this._inst.exports.testExport0();
|
||||
return this._inst.exports.testExport(a, b);
|
||||
}
|
||||
|
||||
const timeOrigin = Date.now() - performance.now();
|
||||
this.importObject = {
|
||||
_gotest: {
|
||||
add: (a, b) => a + b,
|
||||
callExport: testCallExport,
|
||||
},
|
||||
gojs: {
|
||||
// Go's SP does not change as long as no Go code is running. Some operations (e.g. calls, getters and setters)
|
||||
@@ -11,7 +11,6 @@ if (process.argv.length < 3) {
|
||||
|
||||
globalThis.require = require;
|
||||
globalThis.fs = require("fs");
|
||||
globalThis.path = require("path");
|
||||
globalThis.TextEncoder = require("util").TextEncoder;
|
||||
globalThis.TextDecoder = require("util").TextDecoder;
|
||||
|
||||
@@ -33,10 +33,6 @@ Before updating vendor directories, ensure that module mode is enabled.
|
||||
Make sure that GO111MODULE is not set in the environment, or that it is
|
||||
set to 'on' or 'auto', and if you use a go.work file, set GOWORK=off.
|
||||
|
||||
Also, ensure that 'go env GOROOT' shows the root of this Go source
|
||||
tree. Otherwise, the results are undefined. It's recommended to build
|
||||
Go from source and use that 'go' binary to update its source tree.
|
||||
|
||||
Requirements may be added, updated, and removed with 'go get'.
|
||||
The vendor directory may be updated with 'go mod vendor'.
|
||||
A typical sequence might be:
|
||||
|
||||
@@ -10,4 +10,4 @@ if [ ! -f make.bash ]; then
|
||||
fi
|
||||
. ./make.bash "$@" --no-banner
|
||||
bash run.bash --no-rebuild
|
||||
../bin/go tool dist banner # print build info
|
||||
"$GOTOOLDIR/dist" banner # print build info
|
||||
|
||||
20
src/all.bat
20
src/all.bat
@@ -6,11 +6,17 @@
|
||||
|
||||
setlocal
|
||||
|
||||
if not exist make.bat (
|
||||
echo all.bat must be run from go\src
|
||||
exit /b 1
|
||||
)
|
||||
if exist make.bat goto ok
|
||||
echo all.bat must be run from go\src
|
||||
:: cannot exit: would kill parent command interpreter
|
||||
goto end
|
||||
:ok
|
||||
|
||||
call .\make.bat --no-banner || exit /b 1
|
||||
call .\run.bat --no-rebuild || exit /b 1
|
||||
..\bin\go tool dist banner
|
||||
call .\make.bat --no-banner --no-local
|
||||
if %GOBUILDFAIL%==1 goto end
|
||||
call .\run.bat --no-rebuild --no-local
|
||||
if %GOBUILDFAIL%==1 goto end
|
||||
"%GOTOOLDIR%/dist" banner
|
||||
|
||||
:end
|
||||
if x%GOBUILDEXIT%==x1 exit %GOBUILDFAIL%
|
||||
|
||||
@@ -13,4 +13,4 @@ if(! test -f make.rc){
|
||||
. ./make.rc --no-banner $*
|
||||
bind -b $GOROOT/bin /bin
|
||||
./run.rc --no-rebuild
|
||||
../bin/go tool dist banner # print build info
|
||||
$GOTOOLDIR/dist banner # print build info
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"fmt"
|
||||
"internal/godebug"
|
||||
"io/fs"
|
||||
"maps"
|
||||
"math"
|
||||
"path"
|
||||
"reflect"
|
||||
@@ -39,7 +38,6 @@ var (
|
||||
errMissData = errors.New("archive/tar: sparse file references non-existent data")
|
||||
errUnrefData = errors.New("archive/tar: sparse file contains unreferenced data")
|
||||
errWriteHole = errors.New("archive/tar: write non-NUL byte in sparse hole")
|
||||
errSparseTooLong = errors.New("archive/tar: sparse map too long")
|
||||
)
|
||||
|
||||
type headerError []string
|
||||
@@ -698,14 +696,24 @@ func FileInfoHeader(fi fs.FileInfo, link string) (*Header, error) {
|
||||
h.Gname = sys.Gname
|
||||
h.AccessTime = sys.AccessTime
|
||||
h.ChangeTime = sys.ChangeTime
|
||||
h.Xattrs = maps.Clone(sys.Xattrs)
|
||||
if sys.Xattrs != nil {
|
||||
h.Xattrs = make(map[string]string)
|
||||
for k, v := range sys.Xattrs {
|
||||
h.Xattrs[k] = v
|
||||
}
|
||||
}
|
||||
if sys.Typeflag == TypeLink {
|
||||
// hard link
|
||||
h.Typeflag = TypeLink
|
||||
h.Size = 0
|
||||
h.Linkname = sys.Linkname
|
||||
}
|
||||
h.PAXRecords = maps.Clone(sys.PAXRecords)
|
||||
if sys.PAXRecords != nil {
|
||||
h.PAXRecords = make(map[string]string)
|
||||
for k, v := range sys.PAXRecords {
|
||||
h.PAXRecords[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
var doNameLookups = true
|
||||
if iface, ok := fi.(FileInfoNames); ok {
|
||||
|
||||
@@ -531,17 +531,12 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) {
|
||||
cntNewline int64
|
||||
buf bytes.Buffer
|
||||
blk block
|
||||
totalSize int
|
||||
)
|
||||
|
||||
// feedTokens copies data in blocks from r into buf until there are
|
||||
// at least cnt newlines in buf. It will not read more blocks than needed.
|
||||
feedTokens := func(n int64) error {
|
||||
for cntNewline < n {
|
||||
totalSize += len(blk)
|
||||
if totalSize > maxSpecialFileSize {
|
||||
return errSparseTooLong
|
||||
}
|
||||
if _, err := mustReadFull(r, blk[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -574,8 +569,8 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) {
|
||||
}
|
||||
|
||||
// Parse for all member entries.
|
||||
// numEntries is trusted after this since feedTokens limits the number of
|
||||
// tokens based on maxSpecialFileSize.
|
||||
// numEntries is trusted after this since a potential attacker must have
|
||||
// committed resources proportional to what this library used.
|
||||
if err := feedTokens(2 * numEntries); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -7,17 +7,14 @@ package tar
|
||||
import (
|
||||
"bytes"
|
||||
"compress/bzip2"
|
||||
"crypto/md5"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"internal/obscuretestdata"
|
||||
"io"
|
||||
"maps"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -26,11 +23,10 @@ import (
|
||||
|
||||
func TestReader(t *testing.T) {
|
||||
vectors := []struct {
|
||||
file string // Test input file
|
||||
obscured bool // Obscured with obscuretestdata package
|
||||
headers []*Header // Expected output headers
|
||||
chksums []string // CRC32 checksum of files, leave as nil if not checked
|
||||
err error // Expected error to occur
|
||||
file string // Test input file
|
||||
headers []*Header // Expected output headers
|
||||
chksums []string // MD5 checksum of files, leave as nil if not checked
|
||||
err error // Expected error to occur
|
||||
}{{
|
||||
file: "testdata/gnu.tar",
|
||||
headers: []*Header{{
|
||||
@@ -57,8 +53,8 @@ func TestReader(t *testing.T) {
|
||||
Format: FormatGNU,
|
||||
}},
|
||||
chksums: []string{
|
||||
"6cbd88fc",
|
||||
"ddac04b3",
|
||||
"e38b27eaccb4391bdec553a7f3ae6b2f",
|
||||
"c65bd2e50a56a2138bf1716f2fd56fe9",
|
||||
},
|
||||
}, {
|
||||
file: "testdata/sparse-formats.tar",
|
||||
@@ -151,11 +147,11 @@ func TestReader(t *testing.T) {
|
||||
Format: FormatGNU,
|
||||
}},
|
||||
chksums: []string{
|
||||
"5375e1d2",
|
||||
"5375e1d2",
|
||||
"5375e1d2",
|
||||
"5375e1d2",
|
||||
"8eb179ba",
|
||||
"6f53234398c2449fe67c1812d993012f",
|
||||
"6f53234398c2449fe67c1812d993012f",
|
||||
"6f53234398c2449fe67c1812d993012f",
|
||||
"6f53234398c2449fe67c1812d993012f",
|
||||
"b0061974914468de549a2af8ced10316",
|
||||
},
|
||||
}, {
|
||||
file: "testdata/star.tar",
|
||||
@@ -272,7 +268,7 @@ func TestReader(t *testing.T) {
|
||||
Format: FormatPAX,
|
||||
}},
|
||||
chksums: []string{
|
||||
"5fd7e86a",
|
||||
"0afb597b283fe61b5d4879669a350556",
|
||||
},
|
||||
}, {
|
||||
file: "testdata/pax-records.tar",
|
||||
@@ -525,9 +521,8 @@ func TestReader(t *testing.T) {
|
||||
file: "testdata/pax-nul-path.tar",
|
||||
err: ErrHeader,
|
||||
}, {
|
||||
file: "testdata/neg-size.tar.base64",
|
||||
obscured: true,
|
||||
err: ErrHeader,
|
||||
file: "testdata/neg-size.tar",
|
||||
err: ErrHeader,
|
||||
}, {
|
||||
file: "testdata/issue10968.tar",
|
||||
err: ErrHeader,
|
||||
@@ -624,32 +619,18 @@ func TestReader(t *testing.T) {
|
||||
},
|
||||
Format: FormatPAX,
|
||||
}},
|
||||
}, {
|
||||
// Small compressed file that uncompresses to
|
||||
// a file with a very large GNU 1.0 sparse map.
|
||||
file: "testdata/gnu-sparse-many-zeros.tar.bz2",
|
||||
err: errSparseTooLong,
|
||||
}}
|
||||
|
||||
for _, v := range vectors {
|
||||
t.Run(strings.TrimSuffix(path.Base(v.file), ".base64"), func(t *testing.T) {
|
||||
path := v.file
|
||||
if v.obscured {
|
||||
tf, err := obscuretestdata.DecodeToTempFile(path)
|
||||
if err != nil {
|
||||
t.Fatalf("obscuredtestdata.DecodeToTempFile(%s): %v", path, err)
|
||||
}
|
||||
path = tf
|
||||
}
|
||||
|
||||
f, err := os.Open(path)
|
||||
t.Run(path.Base(v.file), func(t *testing.T) {
|
||||
f, err := os.Open(v.file)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var fr io.Reader = f
|
||||
if strings.HasSuffix(v.file, ".bz2") || strings.HasSuffix(v.file, ".bz2.base64") {
|
||||
if strings.HasSuffix(v.file, ".bz2") {
|
||||
fr = bzip2.NewReader(fr)
|
||||
}
|
||||
|
||||
@@ -674,7 +655,7 @@ func TestReader(t *testing.T) {
|
||||
if v.chksums == nil {
|
||||
continue
|
||||
}
|
||||
h := crc32.NewIEEE()
|
||||
h := md5.New()
|
||||
_, err = io.CopyBuffer(h, tr, rdbuf) // Effectively an incremental read
|
||||
if err != nil {
|
||||
break
|
||||
@@ -787,7 +768,7 @@ type readBadSeeker struct{ io.ReadSeeker }
|
||||
|
||||
func (rbs *readBadSeeker) Seek(int64, int) (int64, error) { return 0, fmt.Errorf("illegal seek") }
|
||||
|
||||
// TestReadTruncation tests the ending condition on various truncated files and
|
||||
// TestReadTruncation test the ending condition on various truncated files and
|
||||
// that truncated files are still detected even if the underlying io.Reader
|
||||
// satisfies io.Seeker.
|
||||
func TestReadTruncation(t *testing.T) {
|
||||
@@ -1036,7 +1017,7 @@ func TestParsePAX(t *testing.T) {
|
||||
for i, v := range vectors {
|
||||
r := strings.NewReader(v.in)
|
||||
got, err := parsePAX(r)
|
||||
if !maps.Equal(got, v.want) && !(len(got) == 0 && len(v.want) == 0) {
|
||||
if !reflect.DeepEqual(got, v.want) && !(len(got) == 0 && len(v.want) == 0) {
|
||||
t.Errorf("test %d, parsePAX():\ngot %v\nwant %v", i, got, v.want)
|
||||
}
|
||||
if ok := err == nil; ok != v.ok {
|
||||
@@ -1153,7 +1134,7 @@ func TestReadOldGNUSparseMap(t *testing.T) {
|
||||
v.input = v.input[copy(blk[:], v.input):]
|
||||
tr := Reader{r: bytes.NewReader(v.input)}
|
||||
got, err := tr.readOldGNUSparseMap(&hdr, &blk)
|
||||
if !slices.Equal(got, v.wantMap) {
|
||||
if !equalSparseEntries(got, v.wantMap) {
|
||||
t.Errorf("test %d, readOldGNUSparseMap(): got %v, want %v", i, got, v.wantMap)
|
||||
}
|
||||
if err != v.wantErr {
|
||||
@@ -1344,7 +1325,7 @@ func TestReadGNUSparsePAXHeaders(t *testing.T) {
|
||||
r := strings.NewReader(v.inputData + "#") // Add canary byte
|
||||
tr := Reader{curr: ®FileReader{r, int64(r.Len())}}
|
||||
got, err := tr.readGNUSparsePAXHeaders(&hdr)
|
||||
if !slices.Equal(got, v.wantMap) {
|
||||
if !equalSparseEntries(got, v.wantMap) {
|
||||
t.Errorf("test %d, readGNUSparsePAXHeaders(): got %v, want %v", i, got, v.wantMap)
|
||||
}
|
||||
if err != v.wantErr {
|
||||
|
||||
@@ -19,7 +19,7 @@ func init() {
|
||||
sysStat = statUnix
|
||||
}
|
||||
|
||||
// userMap and groupMap cache UID and GID lookups for performance reasons.
|
||||
// userMap and groupMap caches UID and GID lookups for performance reasons.
|
||||
// The downside is that renaming uname or gname by the OS never takes effect.
|
||||
var userMap, groupMap sync.Map // map[int]string
|
||||
|
||||
|
||||
@@ -213,17 +213,15 @@ func parsePAXTime(s string) (time.Time, error) {
|
||||
}
|
||||
|
||||
// Parse the nanoseconds.
|
||||
// Initialize an array with '0's to handle right padding automatically.
|
||||
nanoDigits := [maxNanoSecondDigits]byte{'0', '0', '0', '0', '0', '0', '0', '0', '0'}
|
||||
for i := range len(sn) {
|
||||
switch c := sn[i]; {
|
||||
case c < '0' || c > '9':
|
||||
return time.Time{}, ErrHeader
|
||||
case i < len(nanoDigits):
|
||||
nanoDigits[i] = c
|
||||
}
|
||||
if strings.Trim(sn, "0123456789") != "" {
|
||||
return time.Time{}, ErrHeader
|
||||
}
|
||||
nsecs, _ := strconv.ParseInt(string(nanoDigits[:]), 10, 64) // Must succeed after validation
|
||||
if len(sn) < maxNanoSecondDigits {
|
||||
sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad
|
||||
} else {
|
||||
sn = sn[:maxNanoSecondDigits] // Right truncate
|
||||
}
|
||||
nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed
|
||||
if len(ss) > 0 && ss[0] == '-' {
|
||||
return time.Unix(secs, -1*nsecs), nil // Negative correction
|
||||
}
|
||||
@@ -312,7 +310,7 @@ func formatPAXRecord(k, v string) (string, error) {
|
||||
// "%d %s=%s\n" % (size, key, value)
|
||||
//
|
||||
// Keys and values should be UTF-8, but the number of bad writers out there
|
||||
// forces us to be more liberal.
|
||||
// forces us to be a more liberal.
|
||||
// Thus, we only reject all keys with NUL, and only reject NULs in values
|
||||
// for the PAX version of the USTAR string fields.
|
||||
// The key must not contain an '=' character.
|
||||
|
||||
@@ -439,66 +439,3 @@ func TestFormatPAXRecord(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkParsePAXTime(b *testing.B) {
|
||||
tests := []struct {
|
||||
name string
|
||||
in string
|
||||
want time.Time
|
||||
ok bool
|
||||
}{
|
||||
{
|
||||
name: "NoNanos",
|
||||
in: "123456",
|
||||
want: time.Unix(123456, 0),
|
||||
ok: true,
|
||||
},
|
||||
{
|
||||
name: "ExactNanos",
|
||||
in: "1.123456789",
|
||||
want: time.Unix(1, 123456789),
|
||||
ok: true,
|
||||
},
|
||||
{
|
||||
name: "WithNanoPadding",
|
||||
in: "1.123",
|
||||
want: time.Unix(1, 123000000),
|
||||
ok: true,
|
||||
},
|
||||
{
|
||||
name: "WithNanoTruncate",
|
||||
in: "1.123456789123",
|
||||
want: time.Unix(1, 123456789),
|
||||
ok: true,
|
||||
},
|
||||
{
|
||||
name: "TrailingError",
|
||||
in: "1.123abc",
|
||||
want: time.Time{},
|
||||
ok: false,
|
||||
},
|
||||
{
|
||||
name: "LeadingError",
|
||||
in: "1.abc123",
|
||||
want: time.Time{},
|
||||
ok: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
b.Run(tt.name, func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for b.Loop() {
|
||||
ts, err := parsePAXTime(tt.in)
|
||||
if (err == nil) != tt.ok {
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.Fatal("expected error")
|
||||
}
|
||||
if !ts.Equal(tt.want) {
|
||||
b.Fatalf("time mismatch: got %v, want %v", ts, tt.want)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,13 +11,11 @@ import (
|
||||
"internal/testenv"
|
||||
"io"
|
||||
"io/fs"
|
||||
"maps"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -100,6 +98,10 @@ func (f *testFile) Seek(pos int64, whence int) (int64, error) {
|
||||
return f.pos, nil
|
||||
}
|
||||
|
||||
func equalSparseEntries(x, y []sparseEntry) bool {
|
||||
return (len(x) == 0 && len(y) == 0) || reflect.DeepEqual(x, y)
|
||||
}
|
||||
|
||||
func TestSparseEntries(t *testing.T) {
|
||||
vectors := []struct {
|
||||
in []sparseEntry
|
||||
@@ -196,11 +198,11 @@ func TestSparseEntries(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
gotAligned := alignSparseEntries(append([]sparseEntry{}, v.in...), v.size)
|
||||
if !slices.Equal(gotAligned, v.wantAligned) {
|
||||
if !equalSparseEntries(gotAligned, v.wantAligned) {
|
||||
t.Errorf("test %d, alignSparseEntries():\ngot %v\nwant %v", i, gotAligned, v.wantAligned)
|
||||
}
|
||||
gotInverted := invertSparseEntries(append([]sparseEntry{}, v.in...), v.size)
|
||||
if !slices.Equal(gotInverted, v.wantInverted) {
|
||||
if !equalSparseEntries(gotInverted, v.wantInverted) {
|
||||
t.Errorf("test %d, inverseSparseEntries():\ngot %v\nwant %v", i, gotInverted, v.wantInverted)
|
||||
}
|
||||
}
|
||||
@@ -742,7 +744,7 @@ func TestHeaderAllowedFormats(t *testing.T) {
|
||||
if formats != v.formats {
|
||||
t.Errorf("test %d, allowedFormats(): got %v, want %v", i, formats, v.formats)
|
||||
}
|
||||
if formats&FormatPAX > 0 && !maps.Equal(paxHdrs, v.paxHdrs) && !(len(paxHdrs) == 0 && len(v.paxHdrs) == 0) {
|
||||
if formats&FormatPAX > 0 && !reflect.DeepEqual(paxHdrs, v.paxHdrs) && !(len(paxHdrs) == 0 && len(v.paxHdrs) == 0) {
|
||||
t.Errorf("test %d, allowedFormats():\ngot %v\nwant %s", i, paxHdrs, v.paxHdrs)
|
||||
}
|
||||
if (formats != FormatUnknown) && (err != nil) {
|
||||
|
||||
BIN
src/archive/tar/testdata/gnu-sparse-big.tar
vendored
Normal file
BIN
src/archive/tar/testdata/gnu-sparse-big.tar
vendored
Normal file
Binary file not shown.
@@ -1,90 +0,0 @@
|
||||
Z251LXNwYXJzZQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAwMDAwMDAAMDAwMDAw
|
||||
MAAwMDAwMDAwADAwMDAwMDA2MDAwADAwMDAwMDAwMDAwADAyMjIyNwAgUwAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB1c3RhciAgAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwMDAwMDAwADAwMDAw
|
||||
MDAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAlQL4gAw
|
||||
MDAwMDAwMTAwMACAAAAAAAAABKgXxgAwMDAwMDAwMTAwMACAAAAAAAAABvwjqgAwMDAwMDAwMTAw
|
||||
MACAAAAAAAAACVAvjgAwMDAwMDAwMTAwMAABgAAAAAAAAA34R1gAAAAAAAAAAAAAAAAAAAAAAACA
|
||||
AAAAAAAAC6Q7cgAwMDAwMDAwMTAwMACAAAAAAAAADfhHVgAwMDAwMDAwMTAwMAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1
|
||||
Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5AAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAADAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2
|
||||
Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAMDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3
|
||||
ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OQAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4
|
||||
OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5AAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAADAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5
|
||||
MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAMDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkw
|
||||
MTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OQAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||
Binary file not shown.
BIN
src/archive/tar/testdata/neg-size.tar
vendored
Normal file
BIN
src/archive/tar/testdata/neg-size.tar
vendored
Normal file
Binary file not shown.
9
src/archive/tar/testdata/neg-size.tar.base64
vendored
9
src/archive/tar/testdata/neg-size.tar.base64
vendored
@@ -1,9 +0,0 @@
|
||||
EwMwMBMDLTgyMTk1MDI5NnQTE4NzfINzEzAwcXfhZrsDMDAwAAAAEDAxMRNz9DEwMTAwdBMTg3N8
|
||||
g3NzADATc3yDc3P0eFMTc/QxMDEwMHQTE4NzfINzc/QwE3N8g3Nz9HFTMNR0MBMwMHEw9DAAAAAQ
|
||||
MDGAABAwMTETc/QxMDH0MHQTMDBx1OFmuwMAAAD/gICAADExNTYyMQAgMBP///+AMTAwdHhTMDB0
|
||||
MBMwMHF3MDEwMTAwdBMTg3N8g3Nz9HhTMDB0MBMwMHF34Wa7AzAwMAAAABAwMTETc/QxMDEwMHQT
|
||||
E4NzfINzc/QwE3N8g3Nz9HhTE3P0MTAxMDB0ExODc3yDc3MwMBNzfINzczB4UzAwdDATMDBxMDAA
|
||||
gAAAEDAxc/QxMDEwMHQTAAAAIOFmuwMwNAAAABAwMTET////gDEwMHR4UzAwdDATMDBxd+FmuwMw
|
||||
MDAAAAAQMDExE3ODc3P0eFMTc/QxMDEwMHQTE4NzfINzc/QzMTEwMzM2MjQ4NDYxMjgzODBzfINz
|
||||
c/R4UzAwdDATMDBxMDAwAAAAEDAxAAAQMDExE3P0MTAxMDB0EzAwcdThZrsDMDQAAAAQg3N8g3Nz
|
||||
9DATc3yDc3P0eFMwMHQwEzAwcTAwMAAAABAwMQAAEDAxMRNz9DEwMTAwdBMwMHgw4Wa7AwAAEDA=
|
||||
BIN
src/archive/tar/testdata/pax-sparse-big.tar
vendored
Normal file
BIN
src/archive/tar/testdata/pax-sparse-big.tar
vendored
Normal file
Binary file not shown.
108
src/archive/tar/testdata/pax-sparse-big.tar.base64
vendored
108
src/archive/tar/testdata/pax-sparse-big.tar.base64
vendored
@@ -1,108 +0,0 @@
|
||||
UGF4SGVhZGVycy4wL3BheC1zcGFyc2UAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAwMDAwMDAAMDAwMDAw
|
||||
MAAwMDAwMDAwADAwMDAwMDAwMTcyADAwMDAwMDAwMDAwADAxMjIyNwAgeAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB1c3RhcgAwMAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAy
|
||||
MiBHTlUuc3BhcnNlLm1ham9yPTEKMjIgR05VLnNwYXJzZS5taW5vcj0wCjMwIEdOVS5zcGFyc2Uu
|
||||
bmFtZT1wYXgtc3BhcnNlCjM1IEdOVS5zcGFyc2UucmVhbHNpemU9NjAwMDAwMDAwMDAKMTMgc2l6
|
||||
ZT0zNTg0CgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEdO
|
||||
VVNwYXJzZUZpbGUuMC9wYXgtc3BhcnNlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwMDAwMDAwADAwMDAwMDAA
|
||||
MDAwMDAwMAAwMDAwMDAwNzAwMAAwMDAwMDAwMDAwMAAwMTM3MzcAIDAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAdXN0YXIAMDAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMDAwMDAwMAAwMDAwMDAw
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAANgo5
|
||||
OTk5OTk5NDg4CjUxMgoxOTk5OTk5OTQ4OAo1MTIKMjk5OTk5OTk0ODgKNTEyCjM5OTk5OTk5NDg4
|
||||
CjUxMgo0OTk5OTk5OTQ4OAo1MTIKNTk5OTk5OTk0ODgKNTEyCgAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAMDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3
|
||||
ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OQAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4
|
||||
OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5AAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAADAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5
|
||||
MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAMDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkw
|
||||
MTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OQAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAx
|
||||
MjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5AAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAADAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEy
|
||||
MzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
BIN
src/archive/tar/testdata/writer-big-long.tar
vendored
Normal file
BIN
src/archive/tar/testdata/writer-big-long.tar
vendored
Normal file
Binary file not shown.
@@ -1,27 +0,0 @@
|
||||
bG9uZ25hbWUvbG9uZ25hbWUvbG9uZ25hbWUvbG9uZ25hbWUvbG9uZ25hbWUvbG9uZ25hbWUvbG9u
|
||||
Z25hbWUvbG9uZ25hbWUvbG9uZ25hbWUvbG9uZ25hbWUvbG9uZ25hbWUvbDAwMDAwMDAAMDAwMDAw
|
||||
MAAwMDAwMDAwADAwMDAwMDAwMjU2ADAwMDAwMDAwMDAwADAzMTQyMAAgeAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB1c3RhcgAwMAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAx
|
||||
NTQgcGF0aD1sb25nbmFtZS9sb25nbmFtZS9sb25nbmFtZS9sb25nbmFtZS9sb25nbmFtZS9sb25n
|
||||
bmFtZS9sb25nbmFtZS9sb25nbmFtZS9sb25nbmFtZS9sb25nbmFtZS9sb25nbmFtZS9sb25nbmFt
|
||||
ZS9sb25nbmFtZS9sb25nbmFtZS9sb25nbmFtZS8xNmdpZy50eHQKMjAgc2l6ZT0xNzE3OTg2OTE4
|
||||
NAoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGxv
|
||||
bmduYW1lL2xvbmduYW1lL2xvbmduYW1lL2xvbmduYW1lL2xvbmduYW1lL2xvbmduYW1lL2xvbmdu
|
||||
YW1lL2xvbmduYW1lL2xvbmduYW1lL2xvbmduYW1lL2xvbmduYW1lL2wwMDAwNjQ0ADAwMDE3NTAA
|
||||
MDAwMTc1MAAwMDAwMDAwMDAwMAAxMjMzMjc3MDUwNwAwMzY0NjIAIDAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAdXN0YXIAMDBndWlsbGF1bWUAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAGd1aWxsYXVtZQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMDAwMDAwMAAwMDAwMDAw
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"maps"
|
||||
"path"
|
||||
"slices"
|
||||
"strings"
|
||||
@@ -170,10 +169,16 @@ func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error {
|
||||
// Write PAX records to the output.
|
||||
isGlobal := hdr.Typeflag == TypeXGlobalHeader
|
||||
if len(paxHdrs) > 0 || isGlobal {
|
||||
// Sort keys for deterministic ordering.
|
||||
var keys []string
|
||||
for k := range paxHdrs {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
slices.Sort(keys)
|
||||
|
||||
// Write each record to a buffer.
|
||||
var buf strings.Builder
|
||||
// Sort keys for deterministic ordering.
|
||||
for _, k := range slices.Sorted(maps.Keys(paxHdrs)) {
|
||||
for _, k := range keys {
|
||||
rec, err := formatPAXRecord(k, paxHdrs[k])
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -408,37 +413,25 @@ func (tw *Writer) AddFS(fsys fs.FS) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if name == "." {
|
||||
if d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
info, err := d.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
linkTarget := ""
|
||||
if typ := d.Type(); typ == fs.ModeSymlink {
|
||||
var err error
|
||||
linkTarget, err = fs.ReadLink(fsys, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if !typ.IsRegular() && typ != fs.ModeDir {
|
||||
// TODO(#49580): Handle symlinks when fs.ReadLinkFS is available.
|
||||
if !info.Mode().IsRegular() {
|
||||
return errors.New("tar: cannot add non-regular file")
|
||||
}
|
||||
h, err := FileInfoHeader(info, linkTarget)
|
||||
h, err := FileInfoHeader(info, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h.Name = name
|
||||
if d.IsDir() {
|
||||
h.Name += "/"
|
||||
}
|
||||
if err := tw.WriteHeader(h); err != nil {
|
||||
return err
|
||||
}
|
||||
if !d.Type().IsRegular() {
|
||||
return nil
|
||||
}
|
||||
f, err := fsys.Open(name)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -675,7 +668,6 @@ func (sw *sparseFileWriter) ReadFrom(r io.Reader) (n int64, err error) {
|
||||
func (sw sparseFileWriter) logicalRemaining() int64 {
|
||||
return sw.sp[len(sw.sp)-1].endOffset() - sw.pos
|
||||
}
|
||||
|
||||
func (sw sparseFileWriter) physicalRemaining() int64 {
|
||||
return sw.fw.physicalRemaining()
|
||||
}
|
||||
|
||||
@@ -8,14 +8,12 @@ import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"internal/obscuretestdata"
|
||||
"io"
|
||||
"io/fs"
|
||||
"maps"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"testing/fstest"
|
||||
@@ -75,9 +73,8 @@ func TestWriter(t *testing.T) {
|
||||
)
|
||||
|
||||
vectors := []struct {
|
||||
file string // Optional filename of expected output
|
||||
obscured bool // Whether file is obscured
|
||||
tests []testFnc
|
||||
file string // Optional filename of expected output
|
||||
tests []testFnc
|
||||
}{{
|
||||
// The writer test file was produced with this command:
|
||||
// tar (GNU tar) 1.26
|
||||
@@ -153,8 +150,7 @@ func TestWriter(t *testing.T) {
|
||||
// bsdtar -xvf writer-big-long.tar
|
||||
//
|
||||
// This file is in PAX format.
|
||||
file: "testdata/writer-big-long.tar.base64",
|
||||
obscured: true,
|
||||
file: "testdata/writer-big-long.tar",
|
||||
tests: []testFnc{
|
||||
testHeader{Header{
|
||||
Typeflag: TypeReg,
|
||||
@@ -396,8 +392,7 @@ func TestWriter(t *testing.T) {
|
||||
testClose{},
|
||||
},
|
||||
}, {
|
||||
file: "testdata/gnu-sparse-big.tar.base64",
|
||||
obscured: true,
|
||||
file: "testdata/gnu-sparse-big.tar",
|
||||
tests: []testFnc{
|
||||
testHeader{Header{
|
||||
Typeflag: TypeGNUSparse,
|
||||
@@ -429,8 +424,7 @@ func TestWriter(t *testing.T) {
|
||||
testClose{nil},
|
||||
},
|
||||
}, {
|
||||
file: "testdata/pax-sparse-big.tar.base64",
|
||||
obscured: true,
|
||||
file: "testdata/pax-sparse-big.tar",
|
||||
tests: []testFnc{
|
||||
testHeader{Header{
|
||||
Typeflag: TypeReg,
|
||||
@@ -488,7 +482,7 @@ func TestWriter(t *testing.T) {
|
||||
return x == y
|
||||
}
|
||||
for _, v := range vectors {
|
||||
t.Run(strings.TrimSuffix(path.Base(v.file), ".base64"), func(t *testing.T) {
|
||||
t.Run(path.Base(v.file), func(t *testing.T) {
|
||||
const maxSize = 10 << 10 // 10KiB
|
||||
buf := new(bytes.Buffer)
|
||||
tw := NewWriter(iotest.TruncateWriter(buf, maxSize))
|
||||
@@ -527,16 +521,7 @@ func TestWriter(t *testing.T) {
|
||||
}
|
||||
|
||||
if v.file != "" {
|
||||
path := v.file
|
||||
if v.obscured {
|
||||
tf, err := obscuretestdata.DecodeToTempFile(path)
|
||||
if err != nil {
|
||||
t.Fatalf("obscuretestdata.DecodeToTempFile(%s): %v", path, err)
|
||||
}
|
||||
path = tf
|
||||
}
|
||||
|
||||
want, err := os.ReadFile(path)
|
||||
want, err := os.ReadFile(v.file)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadFile() = %v, want nil", err)
|
||||
}
|
||||
@@ -717,7 +702,7 @@ func TestPaxXattrs(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !maps.Equal(hdr.Xattrs, xattrs) {
|
||||
if !reflect.DeepEqual(hdr.Xattrs, xattrs) {
|
||||
t.Fatalf("xattrs did not survive round trip: got %+v, want %+v",
|
||||
hdr.Xattrs, xattrs)
|
||||
}
|
||||
@@ -1353,41 +1338,29 @@ func TestFileWriter(t *testing.T) {
|
||||
|
||||
func TestWriterAddFS(t *testing.T) {
|
||||
fsys := fstest.MapFS{
|
||||
"emptyfolder": {Mode: 0o755 | os.ModeDir},
|
||||
"file.go": {Data: []byte("hello")},
|
||||
"subfolder/another.go": {Data: []byte("world")},
|
||||
"symlink.go": {Mode: 0o777 | os.ModeSymlink, Data: []byte("file.go")},
|
||||
// Notably missing here is the "subfolder" directory. This makes sure even
|
||||
// if we don't have a subfolder directory listed.
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
tw := NewWriter(&buf)
|
||||
if err := tw.AddFS(fsys); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Add subfolder into fsys to match what we'll read from the tar.
|
||||
fsys["subfolder"] = &fstest.MapFile{Mode: 0o555 | os.ModeDir}
|
||||
|
||||
// Test that we can get the files back from the archive
|
||||
tr := NewReader(&buf)
|
||||
|
||||
names := make([]string, 0, len(fsys))
|
||||
for name := range fsys {
|
||||
names = append(names, name)
|
||||
entries, err := fsys.ReadDir(".")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sort.Strings(names)
|
||||
|
||||
entriesLeft := len(fsys)
|
||||
for _, name := range names {
|
||||
entriesLeft--
|
||||
|
||||
entryInfo, err := fsys.Lstat(name)
|
||||
if err != nil {
|
||||
t.Fatalf("getting entry info error: %v", err)
|
||||
var curfname string
|
||||
for _, entry := range entries {
|
||||
curfname = entry.Name()
|
||||
if entry.IsDir() {
|
||||
curfname += "/"
|
||||
continue
|
||||
}
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
@@ -1397,42 +1370,22 @@ func TestWriterAddFS(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tmpName := name
|
||||
if entryInfo.IsDir() {
|
||||
tmpName += "/"
|
||||
}
|
||||
if hdr.Name != tmpName {
|
||||
t.Errorf("test fs has filename %v; archive header has %v",
|
||||
name, hdr.Name)
|
||||
data, err := io.ReadAll(tr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if entryInfo.Mode() != hdr.FileInfo().Mode() {
|
||||
t.Errorf("%s: test fs has mode %v; archive header has %v",
|
||||
name, entryInfo.Mode(), hdr.FileInfo().Mode())
|
||||
if hdr.Name != curfname {
|
||||
t.Fatalf("got filename %v, want %v",
|
||||
curfname, hdr.Name)
|
||||
}
|
||||
|
||||
switch entryInfo.Mode().Type() {
|
||||
case fs.ModeDir:
|
||||
// No additional checks necessary.
|
||||
case fs.ModeSymlink:
|
||||
origtarget := string(fsys[name].Data)
|
||||
if hdr.Linkname != origtarget {
|
||||
t.Fatalf("test fs has link content %s; archive header %v", origtarget, hdr.Linkname)
|
||||
}
|
||||
default:
|
||||
data, err := io.ReadAll(tr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
origdata := fsys[name].Data
|
||||
if string(data) != string(origdata) {
|
||||
t.Fatalf("test fs has file content %v; archive header has %v", origdata, data)
|
||||
}
|
||||
origdata := fsys[curfname].Data
|
||||
if string(data) != string(origdata) {
|
||||
t.Fatalf("got file content %v, want %v",
|
||||
data, origdata)
|
||||
}
|
||||
}
|
||||
if entriesLeft > 0 {
|
||||
t.Fatalf("not all entries are in the archive")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriterAddFSNonRegularFiles(t *testing.T) {
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"bufio"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"internal/godebug"
|
||||
@@ -805,9 +804,6 @@ func toValidName(name string) string {
|
||||
|
||||
func (r *Reader) initFileList() {
|
||||
r.fileListOnce.Do(func() {
|
||||
// Preallocate the minimum size of the index.
|
||||
// We may also synthesize additional directory entries.
|
||||
r.fileList = make([]fileListEntry, 0, len(r.File))
|
||||
// files and knownDirs map from a file/directory name
|
||||
// to an index into the r.fileList entry that we are
|
||||
// building. They are used to mark duplicate entries.
|
||||
@@ -906,8 +902,14 @@ func (r *Reader) Open(name string) (fs.File, error) {
|
||||
}
|
||||
|
||||
func split(name string) (dir, elem string, isDir bool) {
|
||||
name, isDir = strings.CutSuffix(name, "/")
|
||||
i := strings.LastIndexByte(name, '/')
|
||||
if len(name) > 0 && name[len(name)-1] == '/' {
|
||||
isDir = true
|
||||
name = name[:len(name)-1]
|
||||
}
|
||||
i := len(name) - 1
|
||||
for i >= 0 && name[i] != '/' {
|
||||
i--
|
||||
}
|
||||
if i < 0 {
|
||||
return ".", name, isDir
|
||||
}
|
||||
@@ -989,12 +991,6 @@ func (d *openDir) ReadDir(count int) ([]fs.DirEntry, error) {
|
||||
s, err := d.files[d.offset+i].stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if s.Name() == "." || !fs.ValidPath(s.Name()) {
|
||||
return nil, &fs.PathError{
|
||||
Op: "readdir",
|
||||
Path: d.e.name,
|
||||
Err: fmt.Errorf("invalid file name: %v", d.files[d.offset+i].name),
|
||||
}
|
||||
}
|
||||
list[i] = s
|
||||
}
|
||||
|
||||
@@ -8,14 +8,13 @@ import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"internal/obscuretestdata"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
"testing/fstest"
|
||||
@@ -1213,6 +1212,7 @@ func TestFS(t *testing.T) {
|
||||
[]string{"a/b/c"},
|
||||
},
|
||||
} {
|
||||
test := test
|
||||
t.Run(test.file, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
z, err := OpenReader(test.file)
|
||||
@@ -1246,6 +1246,7 @@ func TestFSWalk(t *testing.T) {
|
||||
wantErr: true,
|
||||
},
|
||||
} {
|
||||
test := test
|
||||
t.Run(test.file, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
z, err := OpenReader(test.file)
|
||||
@@ -1273,56 +1274,13 @@ func TestFSWalk(t *testing.T) {
|
||||
} else if !test.wantErr && sawErr {
|
||||
t.Error("unexpected error")
|
||||
}
|
||||
if test.want != nil && !slices.Equal(files, test.want) {
|
||||
if test.want != nil && !reflect.DeepEqual(files, test.want) {
|
||||
t.Errorf("got %v want %v", files, test.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFSWalkBadFile(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var buf bytes.Buffer
|
||||
zw := NewWriter(&buf)
|
||||
hdr := &FileHeader{Name: "."}
|
||||
hdr.SetMode(fs.ModeDir | 0o755)
|
||||
w, err := zw.CreateHeader(hdr)
|
||||
if err != nil {
|
||||
t.Fatalf("create zip header: %v", err)
|
||||
}
|
||||
_, err = w.Write([]byte("some data"))
|
||||
if err != nil {
|
||||
t.Fatalf("write zip contents: %v", err)
|
||||
|
||||
}
|
||||
err = zw.Close()
|
||||
if err != nil {
|
||||
t.Fatalf("close zip writer: %v", err)
|
||||
|
||||
}
|
||||
|
||||
zr, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
|
||||
if err != nil {
|
||||
t.Fatalf("create zip reader: %v", err)
|
||||
|
||||
}
|
||||
var count int
|
||||
var errRepeat = errors.New("repeated call to path")
|
||||
err = fs.WalkDir(zr, ".", func(p string, d fs.DirEntry, err error) error {
|
||||
count++
|
||||
if count > 2 { // once for directory read, once for the error
|
||||
return errRepeat
|
||||
}
|
||||
return err
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("expected error from invalid file name")
|
||||
} else if errors.Is(err, errRepeat) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFSModTime(t *testing.T) {
|
||||
t.Parallel()
|
||||
z, err := OpenReader("testdata/subdir.zip")
|
||||
@@ -1622,7 +1580,7 @@ func TestCVE202141772(t *testing.T) {
|
||||
t.Errorf("Opening %q with fs.FS API succeeded", f.Name)
|
||||
}
|
||||
}
|
||||
if !slices.Equal(names, entryNames) {
|
||||
if !reflect.DeepEqual(names, entryNames) {
|
||||
t.Errorf("Unexpected file entries: %q", names)
|
||||
}
|
||||
if _, err := r.Open(""); err == nil {
|
||||
@@ -1735,7 +1693,7 @@ func TestInsecurePaths(t *testing.T) {
|
||||
for _, f := range zr.File {
|
||||
gotPaths = append(gotPaths, f.Name)
|
||||
}
|
||||
if !slices.Equal(gotPaths, []string{path}) {
|
||||
if !reflect.DeepEqual(gotPaths, []string{path}) {
|
||||
t.Errorf("NewReader for archive with file %q: got files %q", path, gotPaths)
|
||||
continue
|
||||
}
|
||||
@@ -1760,7 +1718,7 @@ func TestDisableInsecurePathCheck(t *testing.T) {
|
||||
for _, f := range zr.File {
|
||||
gotPaths = append(gotPaths, f.Name)
|
||||
}
|
||||
if want := []string{name}; !slices.Equal(gotPaths, want) {
|
||||
if want := []string{name}; !reflect.DeepEqual(gotPaths, want) {
|
||||
t.Errorf("NewReader with zipinsecurepath=1: got files %q, want %q", gotPaths, want)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -505,14 +505,14 @@ func (w *Writer) AddFS(fsys fs.FS) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if name == "." {
|
||||
if d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
info, err := d.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !d.IsDir() && !info.Mode().IsRegular() {
|
||||
if !info.Mode().IsRegular() {
|
||||
return errors.New("zip: cannot add non-regular file")
|
||||
}
|
||||
h, err := FileInfoHeader(info)
|
||||
@@ -520,17 +520,11 @@ func (w *Writer) AddFS(fsys fs.FS) error {
|
||||
return err
|
||||
}
|
||||
h.Name = name
|
||||
if d.IsDir() {
|
||||
h.Name += "/"
|
||||
}
|
||||
h.Method = Deflate
|
||||
fw, err := w.CreateHeader(h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
f, err := fsys.Open(name)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -108,7 +108,7 @@ func TestWriter(t *testing.T) {
|
||||
|
||||
// TestWriterComment is test for EOCD comment read/write.
|
||||
func TestWriterComment(t *testing.T) {
|
||||
tests := []struct {
|
||||
var tests = []struct {
|
||||
comment string
|
||||
ok bool
|
||||
}{
|
||||
@@ -158,7 +158,7 @@ func TestWriterComment(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestWriterUTF8(t *testing.T) {
|
||||
utf8Tests := []struct {
|
||||
var utf8Tests = []struct {
|
||||
name string
|
||||
comment string
|
||||
nonUTF8 bool
|
||||
@@ -619,32 +619,32 @@ func TestWriterAddFS(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
w := NewWriter(buf)
|
||||
tests := []WriteTest{
|
||||
{Name: "emptyfolder", Mode: 0o755 | os.ModeDir},
|
||||
{Name: "file.go", Data: []byte("hello"), Mode: 0644},
|
||||
{Name: "subfolder/another.go", Data: []byte("world"), Mode: 0644},
|
||||
// Notably missing here is the "subfolder" directory. This makes sure even
|
||||
// if we don't have a subfolder directory listed.
|
||||
{
|
||||
Name: "file.go",
|
||||
Data: []byte("hello"),
|
||||
Mode: 0644,
|
||||
},
|
||||
{
|
||||
Name: "subfolder/another.go",
|
||||
Data: []byte("world"),
|
||||
Mode: 0644,
|
||||
},
|
||||
}
|
||||
err := w.AddFS(writeTestsToFS(tests))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := w.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Add subfolder into fsys to match what we'll read from the zip.
|
||||
tests = append(tests[:2:2], WriteTest{Name: "subfolder", Mode: 0o555 | os.ModeDir}, tests[2])
|
||||
|
||||
// read it back
|
||||
r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i, wt := range tests {
|
||||
if wt.Mode.IsDir() {
|
||||
wt.Name += "/"
|
||||
}
|
||||
testReadFile(t, r.File[i], &wt)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,9 +29,6 @@ var (
|
||||
// Buffered input.
|
||||
|
||||
// Reader implements buffering for an io.Reader object.
|
||||
// A new Reader is created by calling [NewReader] or [NewReaderSize];
|
||||
// alternatively the zero value of a Reader may be used after calling [Reset]
|
||||
// on it.
|
||||
type Reader struct {
|
||||
buf []byte
|
||||
rd io.Reader // reader provided by the client
|
||||
@@ -133,10 +130,9 @@ func (b *Reader) readErr() error {
|
||||
}
|
||||
|
||||
// Peek returns the next n bytes without advancing the reader. The bytes stop
|
||||
// being valid at the next read call. If necessary, Peek will read more bytes
|
||||
// into the buffer in order to make n bytes available. If Peek returns fewer
|
||||
// than n bytes, it also returns an error explaining why the read is short.
|
||||
// The error is [ErrBufferFull] if n is larger than b's buffer size.
|
||||
// being valid at the next read call. If Peek returns fewer than n bytes, it
|
||||
// also returns an error explaining why the read is short. The error is
|
||||
// [ErrBufferFull] if n is larger than b's buffer size.
|
||||
//
|
||||
// Calling Peek prevents a [Reader.UnreadByte] or [Reader.UnreadRune] call from succeeding
|
||||
// until the next read operation.
|
||||
@@ -311,7 +307,10 @@ func (b *Reader) ReadRune() (r rune, size int, err error) {
|
||||
if b.r == b.w {
|
||||
return 0, 0, b.readErr()
|
||||
}
|
||||
r, size = utf8.DecodeRune(b.buf[b.r:b.w])
|
||||
r, size = rune(b.buf[b.r]), 1
|
||||
if r >= utf8.RuneSelf {
|
||||
r, size = utf8.DecodeRune(b.buf[b.r:b.w])
|
||||
}
|
||||
b.r += size
|
||||
b.lastByte = int(b.buf[b.r-1])
|
||||
b.lastRuneSize = size
|
||||
@@ -516,11 +515,9 @@ func (b *Reader) WriteTo(w io.Writer) (n int64, err error) {
|
||||
b.lastByte = -1
|
||||
b.lastRuneSize = -1
|
||||
|
||||
if b.r < b.w {
|
||||
n, err = b.writeBuf(w)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err = b.writeBuf(w)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if r, ok := b.rd.(io.WriterTo); ok {
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"internal/asan"
|
||||
"io"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
@@ -586,9 +585,6 @@ func TestWriteInvalidRune(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestReadStringAllocs(t *testing.T) {
|
||||
if asan.Enabled {
|
||||
t.Skip("test allocates more with -asan; see #70079")
|
||||
}
|
||||
r := strings.NewReader(" foo foo 42 42 42 42 42 42 42 42 4.2 4.2 4.2 4.2\n")
|
||||
buf := NewReader(r)
|
||||
allocs := testing.AllocsPerRun(100, func() {
|
||||
@@ -640,7 +636,7 @@ func TestWriter(t *testing.T) {
|
||||
for l := 0; l < len(written); l++ {
|
||||
if written[l] != data[l] {
|
||||
t.Errorf("wrong bytes written")
|
||||
t.Errorf("want=%q", data[:len(written)])
|
||||
t.Errorf("want=%q", data[0:len(written)])
|
||||
t.Errorf("have=%q", written)
|
||||
}
|
||||
}
|
||||
@@ -939,6 +935,7 @@ func (t *testReader) Read(buf []byte) (n int, err error) {
|
||||
}
|
||||
|
||||
func testReadLine(t *testing.T, input []byte) {
|
||||
//for stride := 1; stride < len(input); stride++ {
|
||||
for stride := 1; stride < 2; stride++ {
|
||||
done := 0
|
||||
reader := testReader{input, stride}
|
||||
@@ -1149,7 +1146,7 @@ func (w errorWriterToTest) Write(p []byte) (int, error) {
|
||||
var errorWriterToTests = []errorWriterToTest{
|
||||
{1, 0, nil, io.ErrClosedPipe, io.ErrClosedPipe},
|
||||
{0, 1, io.ErrClosedPipe, nil, io.ErrClosedPipe},
|
||||
{0, 0, io.ErrUnexpectedEOF, io.ErrClosedPipe, io.ErrUnexpectedEOF},
|
||||
{0, 0, io.ErrUnexpectedEOF, io.ErrClosedPipe, io.ErrClosedPipe},
|
||||
{0, 1, io.EOF, nil, nil},
|
||||
}
|
||||
|
||||
|
||||
@@ -33,33 +33,6 @@ func ExampleWriter_AvailableBuffer() {
|
||||
// Output: 1 2 3 4
|
||||
}
|
||||
|
||||
// ExampleWriter_ReadFrom demonstrates how to use the ReadFrom method of Writer.
|
||||
func ExampleWriter_ReadFrom() {
|
||||
var buf bytes.Buffer
|
||||
writer := bufio.NewWriter(&buf)
|
||||
|
||||
data := "Hello, world!\nThis is a ReadFrom example."
|
||||
reader := strings.NewReader(data)
|
||||
|
||||
n, err := writer.ReadFrom(reader)
|
||||
if err != nil {
|
||||
fmt.Println("ReadFrom Error:", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = writer.Flush(); err != nil {
|
||||
fmt.Println("Flush Error:", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println("Bytes written:", n)
|
||||
fmt.Println("Buffer contents:", buf.String())
|
||||
// Output:
|
||||
// Bytes written: 41
|
||||
// Buffer contents: Hello, world!
|
||||
// This is a ReadFrom example.
|
||||
}
|
||||
|
||||
// The simplest use of a Scanner, to read standard input as a set of lines.
|
||||
func ExampleScanner_lines() {
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
|
||||
@@ -1,96 +0,0 @@
|
||||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build unix
|
||||
|
||||
package bufio_test
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"net"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestCopyUnixpacket tests that we can use bufio when copying
|
||||
// across a unixpacket socket. This used to fail due to an unnecessary
|
||||
// empty Write call that was interpreted as an EOF.
|
||||
func TestCopyUnixpacket(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
socket := filepath.Join(tmpDir, "unixsock")
|
||||
|
||||
// Start a unixpacket server.
|
||||
addr := &net.UnixAddr{
|
||||
Name: socket,
|
||||
Net: "unixpacket",
|
||||
}
|
||||
server, err := net.ListenUnix("unixpacket", addr)
|
||||
if err != nil {
|
||||
t.Skipf("skipping test because opening a unixpacket socket failed: %v", err)
|
||||
}
|
||||
|
||||
// Start a goroutine for the server to accept one connection
|
||||
// and read all the data sent on the connection,
|
||||
// reporting the number of bytes read on ch.
|
||||
ch := make(chan int, 1)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
tot := 0
|
||||
defer func() {
|
||||
ch <- tot
|
||||
}()
|
||||
|
||||
serverConn, err := server.Accept()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
buf := make([]byte, 1024)
|
||||
for {
|
||||
n, err := serverConn.Read(buf)
|
||||
tot += n
|
||||
if err == io.EOF {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
clientConn, err := net.DialUnix("unixpacket", nil, addr)
|
||||
if err != nil {
|
||||
// Leaves the server goroutine hanging. Oh well.
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer wg.Wait()
|
||||
defer clientConn.Close()
|
||||
|
||||
const data = "data"
|
||||
r := bufio.NewReader(strings.NewReader(data))
|
||||
n, err := io.Copy(clientConn, r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if n != int64(len(data)) {
|
||||
t.Errorf("io.Copy returned %d, want %d", n, len(data))
|
||||
}
|
||||
|
||||
clientConn.Close()
|
||||
tot := <-ch
|
||||
|
||||
if tot != len(data) {
|
||||
t.Errorf("server read %d, want %d", tot, len(data))
|
||||
}
|
||||
}
|
||||
@@ -260,11 +260,8 @@ func (s *Scanner) setErr(err error) {
|
||||
}
|
||||
}
|
||||
|
||||
// Buffer controls memory allocation by the Scanner.
|
||||
// It sets the initial buffer to use when scanning
|
||||
// Buffer sets the initial buffer to use when scanning
|
||||
// and the maximum size of buffer that may be allocated during scanning.
|
||||
// The contents of the buffer are ignored.
|
||||
//
|
||||
// The maximum token size must be less than the larger of max and cap(buf).
|
||||
// If max <= cap(buf), [Scanner.Scan] will use this buffer only and do no allocation.
|
||||
//
|
||||
|
||||
@@ -162,12 +162,12 @@ func delete(m map[Type]Type1, key Type)
|
||||
|
||||
// The len built-in function returns the length of v, according to its type:
|
||||
//
|
||||
// - Array: the number of elements in v.
|
||||
// - Pointer to array: the number of elements in *v (even if v is nil).
|
||||
// - Slice, or map: the number of elements in v; if v is nil, len(v) is zero.
|
||||
// - String: the number of bytes in v.
|
||||
// - Channel: the number of elements queued (unread) in the channel buffer;
|
||||
// if v is nil, len(v) is zero.
|
||||
// Array: the number of elements in v.
|
||||
// Pointer to array: the number of elements in *v (even if v is nil).
|
||||
// Slice, or map: the number of elements in v; if v is nil, len(v) is zero.
|
||||
// String: the number of bytes in v.
|
||||
// Channel: the number of elements queued (unread) in the channel buffer;
|
||||
// if v is nil, len(v) is zero.
|
||||
//
|
||||
// For some arguments, such as a string literal or a simple array expression, the
|
||||
// result can be a constant. See the Go language specification's "Length and
|
||||
@@ -176,12 +176,12 @@ func len(v Type) int
|
||||
|
||||
// The cap built-in function returns the capacity of v, according to its type:
|
||||
//
|
||||
// - Array: the number of elements in v (same as len(v)).
|
||||
// - Pointer to array: the number of elements in *v (same as len(v)).
|
||||
// - Slice: the maximum length the slice can reach when resliced;
|
||||
// if v is nil, cap(v) is zero.
|
||||
// - Channel: the channel buffer capacity, in units of elements;
|
||||
// if v is nil, cap(v) is zero.
|
||||
// Array: the number of elements in v (same as len(v)).
|
||||
// Pointer to array: the number of elements in *v (same as len(v)).
|
||||
// Slice: the maximum length the slice can reach when resliced;
|
||||
// if v is nil, cap(v) is zero.
|
||||
// Channel: the channel buffer capacity, in units of elements;
|
||||
// if v is nil, cap(v) is zero.
|
||||
//
|
||||
// For some arguments, such as a simple array expression, the result can be a
|
||||
// constant. See the Go language specification's "Length and capacity" section for
|
||||
@@ -194,18 +194,18 @@ func cap(v Type) int
|
||||
// argument, not a pointer to it. The specification of the result depends on
|
||||
// the type:
|
||||
//
|
||||
// - Slice: The size specifies the length. The capacity of the slice is
|
||||
// equal to its length. A second integer argument may be provided to
|
||||
// specify a different capacity; it must be no smaller than the
|
||||
// length. For example, make([]int, 0, 10) allocates an underlying array
|
||||
// of size 10 and returns a slice of length 0 and capacity 10 that is
|
||||
// backed by this underlying array.
|
||||
// - Map: An empty map is allocated with enough space to hold the
|
||||
// specified number of elements. The size may be omitted, in which case
|
||||
// a small starting size is allocated.
|
||||
// - Channel: The channel's buffer is initialized with the specified
|
||||
// buffer capacity. If zero, or the size is omitted, the channel is
|
||||
// unbuffered.
|
||||
// Slice: The size specifies the length. The capacity of the slice is
|
||||
// equal to its length. A second integer argument may be provided to
|
||||
// specify a different capacity; it must be no smaller than the
|
||||
// length. For example, make([]int, 0, 10) allocates an underlying array
|
||||
// of size 10 and returns a slice of length 0 and capacity 10 that is
|
||||
// backed by this underlying array.
|
||||
// Map: An empty map is allocated with enough space to hold the
|
||||
// specified number of elements. The size may be omitted, in which case
|
||||
// a small starting size is allocated.
|
||||
// Channel: The channel's buffer is initialized with the specified
|
||||
// buffer capacity. If zero, or the size is omitted, the channel is
|
||||
// unbuffered.
|
||||
func make(t Type, size ...IntegerType) Type
|
||||
|
||||
// The max built-in function returns the largest value of a fixed number of
|
||||
@@ -247,7 +247,7 @@ func imag(c ComplexType) FloatType
|
||||
// to the zero value of the respective element type. If the argument
|
||||
// type is a type parameter, the type parameter's type set must
|
||||
// contain only map or slice types, and clear performs the operation
|
||||
// implied by the type argument. If t is nil, clear is a no-op.
|
||||
// implied by the type argument.
|
||||
func clear[T ~[]Type | ~map[Type]Type1](t T)
|
||||
|
||||
// The close built-in function closes a channel, which must be either
|
||||
|
||||
@@ -21,12 +21,6 @@ type Buffer struct {
|
||||
buf []byte // contents are the bytes buf[off : len(buf)]
|
||||
off int // read at &buf[off], write at &buf[len(buf)]
|
||||
lastRead readOp // last read operation, so that Unread* can work correctly.
|
||||
|
||||
// Copying and modifying a non-zero Buffer is prone to error,
|
||||
// but we cannot employ the noCopy trick used by WaitGroup and Mutex,
|
||||
// which causes vet's copylocks checker to report misuse, as vet
|
||||
// cannot reliably distinguish the zero and non-zero cases.
|
||||
// See #26462, #25907, #47276, #48398 for history.
|
||||
}
|
||||
|
||||
// The readOp constants describe the last action performed on
|
||||
@@ -77,18 +71,6 @@ func (b *Buffer) String() string {
|
||||
return string(b.buf[b.off:])
|
||||
}
|
||||
|
||||
// Peek returns the next n bytes without advancing the buffer.
|
||||
// If Peek returns fewer than n bytes, it also returns [io.EOF].
|
||||
// The slice is only valid until the next call to a read or write method.
|
||||
// The slice aliases the buffer content at least until the next buffer modification,
|
||||
// so immediate changes to the slice will affect the result of future reads.
|
||||
func (b *Buffer) Peek(n int) ([]byte, error) {
|
||||
if b.Len() < n {
|
||||
return b.buf[b.off:], io.EOF
|
||||
}
|
||||
return b.buf[b.off : b.off+n], nil
|
||||
}
|
||||
|
||||
// empty reports whether the unread portion of the buffer is empty.
|
||||
func (b *Buffer) empty() bool { return len(b.buf) <= b.off }
|
||||
|
||||
@@ -265,8 +247,8 @@ func growSlice(b []byte, n int) []byte {
|
||||
c = 2 * cap(b)
|
||||
}
|
||||
b2 := append([]byte(nil), make([]byte, c)...)
|
||||
i := copy(b2, b)
|
||||
return b2[:i]
|
||||
copy(b2, b)
|
||||
return b2[:len(b)]
|
||||
}
|
||||
|
||||
// WriteTo writes data to w until the buffer is drained or an error occurs.
|
||||
|
||||
@@ -213,7 +213,7 @@ func TestLargeByteWrites(t *testing.T) {
|
||||
func TestLargeStringReads(t *testing.T) {
|
||||
var buf Buffer
|
||||
for i := 3; i < 30; i += 3 {
|
||||
s := fillString(t, "TestLargeReads (1)", &buf, "", 5, testString[:len(testString)/i])
|
||||
s := fillString(t, "TestLargeReads (1)", &buf, "", 5, testString[0:len(testString)/i])
|
||||
empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(testString)))
|
||||
}
|
||||
check(t, "TestLargeStringReads (3)", &buf, "")
|
||||
@@ -222,7 +222,7 @@ func TestLargeStringReads(t *testing.T) {
|
||||
func TestLargeByteReads(t *testing.T) {
|
||||
var buf Buffer
|
||||
for i := 3; i < 30; i += 3 {
|
||||
s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[:len(testBytes)/i])
|
||||
s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
|
||||
empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(testString)))
|
||||
}
|
||||
check(t, "TestLargeByteReads (3)", &buf, "")
|
||||
@@ -274,7 +274,7 @@ func TestNil(t *testing.T) {
|
||||
func TestReadFrom(t *testing.T) {
|
||||
var buf Buffer
|
||||
for i := 3; i < 30; i += 3 {
|
||||
s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[:len(testBytes)/i])
|
||||
s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
|
||||
var b Buffer
|
||||
b.ReadFrom(&buf)
|
||||
empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(testString)))
|
||||
@@ -337,7 +337,7 @@ func TestReadFromNegativeReader(t *testing.T) {
|
||||
func TestWriteTo(t *testing.T) {
|
||||
var buf Buffer
|
||||
for i := 3; i < 30; i += 3 {
|
||||
s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[:len(testBytes)/i])
|
||||
s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
|
||||
var b Buffer
|
||||
buf.WriteTo(&b)
|
||||
empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(testString)))
|
||||
@@ -354,7 +354,7 @@ func TestWriteAppend(t *testing.T) {
|
||||
got.Write(b)
|
||||
}
|
||||
if !Equal(got.Bytes(), want) {
|
||||
t.Fatalf("Bytes() = %q, want %q", &got, want)
|
||||
t.Fatalf("Bytes() = %q, want %q", got, want)
|
||||
}
|
||||
|
||||
// With a sufficiently sized buffer, there should be no allocations.
|
||||
@@ -531,40 +531,6 @@ func TestReadString(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
var peekTests = []struct {
|
||||
buffer string
|
||||
skip int
|
||||
n int
|
||||
expected string
|
||||
err error
|
||||
}{
|
||||
{"", 0, 0, "", nil},
|
||||
{"aaa", 0, 3, "aaa", nil},
|
||||
{"foobar", 0, 2, "fo", nil},
|
||||
{"a", 0, 2, "a", io.EOF},
|
||||
{"helloworld", 4, 3, "owo", nil},
|
||||
{"helloworld", 5, 5, "world", nil},
|
||||
{"helloworld", 5, 6, "world", io.EOF},
|
||||
{"helloworld", 10, 1, "", io.EOF},
|
||||
}
|
||||
|
||||
func TestPeek(t *testing.T) {
|
||||
for _, test := range peekTests {
|
||||
buf := NewBufferString(test.buffer)
|
||||
buf.Next(test.skip)
|
||||
bytes, err := buf.Peek(test.n)
|
||||
if string(bytes) != test.expected {
|
||||
t.Errorf("expected %q, got %q", test.expected, bytes)
|
||||
}
|
||||
if err != test.err {
|
||||
t.Errorf("expected error %v, got %v", test.err, err)
|
||||
}
|
||||
if buf.Len() != len(test.buffer)-test.skip {
|
||||
t.Errorf("bad length after peek: %d, want %d", buf.Len(), len(test.buffer)-test.skip)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReadString(b *testing.B) {
|
||||
const n = 32 << 10
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@ package bytes
|
||||
|
||||
import (
|
||||
"internal/bytealg"
|
||||
"math/bits"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
_ "unsafe" // for linkname
|
||||
@@ -137,7 +136,6 @@ func LastIndexByte(s []byte, c byte) int {
|
||||
// If r is [utf8.RuneError], it returns the first instance of any
|
||||
// invalid UTF-8 byte sequence.
|
||||
func IndexRune(s []byte, r rune) int {
|
||||
const haveFastIndex = bytealg.MaxBruteForce > 0
|
||||
switch {
|
||||
case 0 <= r && r < utf8.RuneSelf:
|
||||
return IndexByte(s, byte(r))
|
||||
@@ -153,64 +151,9 @@ func IndexRune(s []byte, r rune) int {
|
||||
case !utf8.ValidRune(r):
|
||||
return -1
|
||||
default:
|
||||
// Search for rune r using the last byte of its UTF-8 encoded form.
|
||||
// The distribution of the last byte is more uniform compared to the
|
||||
// first byte which has a 78% chance of being [240, 243, 244].
|
||||
var b [utf8.UTFMax]byte
|
||||
n := utf8.EncodeRune(b[:], r)
|
||||
last := n - 1
|
||||
i := last
|
||||
fails := 0
|
||||
for i < len(s) {
|
||||
if s[i] != b[last] {
|
||||
o := IndexByte(s[i+1:], b[last])
|
||||
if o < 0 {
|
||||
return -1
|
||||
}
|
||||
i += o + 1
|
||||
}
|
||||
// Step backwards comparing bytes.
|
||||
for j := 1; j < n; j++ {
|
||||
if s[i-j] != b[last-j] {
|
||||
goto next
|
||||
}
|
||||
}
|
||||
return i - last
|
||||
next:
|
||||
fails++
|
||||
i++
|
||||
if (haveFastIndex && fails > bytealg.Cutover(i)) && i < len(s) ||
|
||||
(!haveFastIndex && fails >= 4+i>>4 && i < len(s)) {
|
||||
goto fallback
|
||||
}
|
||||
}
|
||||
return -1
|
||||
|
||||
fallback:
|
||||
// Switch to bytealg.Index, if available, or a brute force search when
|
||||
// IndexByte returns too many false positives.
|
||||
if haveFastIndex {
|
||||
if j := bytealg.Index(s[i-last:], b[:n]); j >= 0 {
|
||||
return i + j - last
|
||||
}
|
||||
} else {
|
||||
// If bytealg.Index is not available a brute force search is
|
||||
// ~1.5-3x faster than Rabin-Karp since n is small.
|
||||
c0 := b[last]
|
||||
c1 := b[last-1] // There are at least 2 chars to match
|
||||
loop:
|
||||
for ; i < len(s); i++ {
|
||||
if s[i] == c0 && s[i-1] == c1 {
|
||||
for k := 2; k < n; k++ {
|
||||
if s[i-k] != b[last-k] {
|
||||
continue loop
|
||||
}
|
||||
}
|
||||
return i - last
|
||||
}
|
||||
}
|
||||
}
|
||||
return -1
|
||||
return Index(s, b[:n])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -451,9 +394,7 @@ var asciiSpace = [256]uint8{'\t': 1, '\n': 1, '\v': 1, '\f': 1, '\r': 1, ' ': 1}
|
||||
// Fields interprets s as a sequence of UTF-8-encoded code points.
|
||||
// It splits the slice s around each instance of one or more consecutive white space
|
||||
// characters, as defined by [unicode.IsSpace], returning a slice of subslices of s or an
|
||||
// empty slice if s contains only white space. Every element of the returned slice is
|
||||
// non-empty. Unlike [Split], leading and trailing runs of white space characters
|
||||
// are discarded.
|
||||
// empty slice if s contains only white space.
|
||||
func Fields(s []byte) [][]byte {
|
||||
// First count the fields.
|
||||
// This is an exact count if s is ASCII, otherwise it is an approximation.
|
||||
@@ -507,9 +448,7 @@ func Fields(s []byte) [][]byte {
|
||||
// FieldsFunc interprets s as a sequence of UTF-8-encoded code points.
|
||||
// It splits the slice s at each run of code points c satisfying f(c) and
|
||||
// returns a slice of subslices of s. If all code points in s satisfy f(c), or
|
||||
// len(s) == 0, an empty slice is returned. Every element of the returned slice is
|
||||
// non-empty. Unlike [Split], leading and trailing runs of code points
|
||||
// satisfying f(c) are discarded.
|
||||
// len(s) == 0, an empty slice is returned.
|
||||
//
|
||||
// FieldsFunc makes no guarantees about the order in which it calls f(c)
|
||||
// and assumes that f always returns the same value for a given c.
|
||||
@@ -528,7 +467,11 @@ func FieldsFunc(s []byte, f func(rune) bool) [][]byte {
|
||||
// more efficient, possibly due to cache effects.
|
||||
start := -1 // valid span start if >= 0
|
||||
for i := 0; i < len(s); {
|
||||
r, size := utf8.DecodeRune(s[i:])
|
||||
size := 1
|
||||
r := rune(s[i])
|
||||
if r >= utf8.RuneSelf {
|
||||
r, size = utf8.DecodeRune(s[i:])
|
||||
}
|
||||
if f(r) {
|
||||
if start >= 0 {
|
||||
spans = append(spans, span{start, i})
|
||||
@@ -592,7 +535,7 @@ func Join(s [][]byte, sep []byte) []byte {
|
||||
|
||||
// HasPrefix reports whether the byte slice s begins with prefix.
|
||||
func HasPrefix(s, prefix []byte) bool {
|
||||
return len(s) >= len(prefix) && Equal(s[:len(prefix)], prefix)
|
||||
return len(s) >= len(prefix) && Equal(s[0:len(prefix)], prefix)
|
||||
}
|
||||
|
||||
// HasSuffix reports whether the byte slice s ends with suffix.
|
||||
@@ -610,7 +553,11 @@ func Map(mapping func(r rune) rune, s []byte) []byte {
|
||||
// fine. It could also shrink but that falls out naturally.
|
||||
b := make([]byte, 0, len(s))
|
||||
for i := 0; i < len(s); {
|
||||
r, wid := utf8.DecodeRune(s[i:])
|
||||
wid := 1
|
||||
r := rune(s[i])
|
||||
if r >= utf8.RuneSelf {
|
||||
r, wid = utf8.DecodeRune(s[i:])
|
||||
}
|
||||
r = mapping(r)
|
||||
if r >= 0 {
|
||||
b = utf8.AppendRune(b, r)
|
||||
@@ -647,11 +594,10 @@ func Repeat(b []byte, count int) []byte {
|
||||
if count < 0 {
|
||||
panic("bytes: negative Repeat count")
|
||||
}
|
||||
hi, lo := bits.Mul(uint(len(b)), uint(count))
|
||||
if hi > 0 || lo > uint(maxInt) {
|
||||
if len(b) > maxInt/count {
|
||||
panic("bytes: Repeat output length overflow")
|
||||
}
|
||||
n := int(lo) // lo = len(b) * count
|
||||
n := len(b) * count
|
||||
|
||||
if len(b) == 0 {
|
||||
return []byte{}
|
||||
@@ -678,7 +624,10 @@ func Repeat(b []byte, count int) []byte {
|
||||
nb := bytealg.MakeNoZero(n)[:n:n]
|
||||
bp := copy(nb, b)
|
||||
for bp < n {
|
||||
chunk := min(bp, chunkMax)
|
||||
chunk := bp
|
||||
if chunk > chunkMax {
|
||||
chunk = chunkMax
|
||||
}
|
||||
bp += copy(nb[bp:], nb[:chunk])
|
||||
}
|
||||
return nb
|
||||
@@ -909,7 +858,11 @@ func LastIndexFunc(s []byte, f func(r rune) bool) int {
|
||||
func indexFunc(s []byte, f func(r rune) bool, truth bool) int {
|
||||
start := 0
|
||||
for start < len(s) {
|
||||
r, wid := utf8.DecodeRune(s[start:])
|
||||
wid := 1
|
||||
r := rune(s[start])
|
||||
if r >= utf8.RuneSelf {
|
||||
r, wid = utf8.DecodeRune(s[start:])
|
||||
}
|
||||
if f(r) == truth {
|
||||
return start
|
||||
}
|
||||
@@ -1040,7 +993,10 @@ func trimLeftASCII(s []byte, as *asciiSet) []byte {
|
||||
|
||||
func trimLeftUnicode(s []byte, cutset string) []byte {
|
||||
for len(s) > 0 {
|
||||
r, n := utf8.DecodeRune(s)
|
||||
r, n := rune(s[0]), 1
|
||||
if r >= utf8.RuneSelf {
|
||||
r, n = utf8.DecodeRune(s)
|
||||
}
|
||||
if !containsRune(cutset, r) {
|
||||
break
|
||||
}
|
||||
@@ -1102,34 +1058,41 @@ func trimRightUnicode(s []byte, cutset string) []byte {
|
||||
// TrimSpace returns a subslice of s by slicing off all leading and
|
||||
// trailing white space, as defined by Unicode.
|
||||
func TrimSpace(s []byte) []byte {
|
||||
// Fast path for ASCII: look for the first ASCII non-space byte.
|
||||
for lo, c := range s {
|
||||
// Fast path for ASCII: look for the first ASCII non-space byte
|
||||
start := 0
|
||||
for ; start < len(s); start++ {
|
||||
c := s[start]
|
||||
if c >= utf8.RuneSelf {
|
||||
// If we run into a non-ASCII byte, fall back to the
|
||||
// slower unicode-aware method on the remaining bytes.
|
||||
return TrimFunc(s[lo:], unicode.IsSpace)
|
||||
// slower unicode-aware method on the remaining bytes
|
||||
return TrimFunc(s[start:], unicode.IsSpace)
|
||||
}
|
||||
if asciiSpace[c] != 0 {
|
||||
continue
|
||||
}
|
||||
s = s[lo:]
|
||||
// Now look for the first ASCII non-space byte from the end.
|
||||
for hi := len(s) - 1; hi >= 0; hi-- {
|
||||
c := s[hi]
|
||||
if c >= utf8.RuneSelf {
|
||||
return TrimFunc(s[:hi+1], unicode.IsSpace)
|
||||
}
|
||||
if asciiSpace[c] == 0 {
|
||||
// At this point, s[:hi+1] starts and ends with ASCII
|
||||
// non-space bytes, so we're done. Non-ASCII cases have
|
||||
// already been handled above.
|
||||
return s[:hi+1]
|
||||
}
|
||||
if asciiSpace[c] == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Special case to preserve previous TrimLeftFunc behavior,
|
||||
// returning nil instead of empty slice if all spaces.
|
||||
return nil
|
||||
|
||||
// Now look for the first ASCII non-space byte from the end
|
||||
stop := len(s)
|
||||
for ; stop > start; stop-- {
|
||||
c := s[stop-1]
|
||||
if c >= utf8.RuneSelf {
|
||||
return TrimFunc(s[start:stop], unicode.IsSpace)
|
||||
}
|
||||
if asciiSpace[c] == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// At this point s[start:stop] starts and ends with an ASCII
|
||||
// non-space bytes, so we're done. Non-ASCII cases have already
|
||||
// been handled above.
|
||||
if start == stop {
|
||||
// Special case to preserve previous TrimLeftFunc behavior,
|
||||
// returning nil instead of empty slice if all spaces.
|
||||
return nil
|
||||
}
|
||||
return s[start:stop]
|
||||
}
|
||||
|
||||
// Runes interprets s as a sequence of UTF-8-encoded code points.
|
||||
@@ -1170,22 +1133,19 @@ func Replace(s, old, new []byte, n int) []byte {
|
||||
t := make([]byte, len(s)+n*(len(new)-len(old)))
|
||||
w := 0
|
||||
start := 0
|
||||
if len(old) > 0 {
|
||||
for range n {
|
||||
j := start + Index(s[start:], old)
|
||||
w += copy(t[w:], s[start:j])
|
||||
w += copy(t[w:], new)
|
||||
start = j + len(old)
|
||||
for i := 0; i < n; i++ {
|
||||
j := start
|
||||
if len(old) == 0 {
|
||||
if i > 0 {
|
||||
_, wid := utf8.DecodeRune(s[start:])
|
||||
j += wid
|
||||
}
|
||||
} else {
|
||||
j += Index(s[start:], old)
|
||||
}
|
||||
} else { // len(old) == 0
|
||||
w += copy(t[w:], s[start:j])
|
||||
w += copy(t[w:], new)
|
||||
for range n - 1 {
|
||||
_, wid := utf8.DecodeRune(s[start:])
|
||||
j := start + wid
|
||||
w += copy(t[w:], s[start:j])
|
||||
w += copy(t[w:], new)
|
||||
start = j
|
||||
}
|
||||
start = j + len(old)
|
||||
}
|
||||
w += copy(t[w:], s[start:])
|
||||
return t[0:w]
|
||||
@@ -1206,7 +1166,7 @@ func ReplaceAll(s, old, new []byte) []byte {
|
||||
func EqualFold(s, t []byte) bool {
|
||||
// ASCII fast path
|
||||
i := 0
|
||||
for n := min(len(s), len(t)); i < n; i++ {
|
||||
for ; i < len(s) && i < len(t); i++ {
|
||||
sr := s[i]
|
||||
tr := t[i]
|
||||
if sr|tr >= utf8.RuneSelf {
|
||||
@@ -1236,10 +1196,19 @@ hasUnicode:
|
||||
t = t[i:]
|
||||
for len(s) != 0 && len(t) != 0 {
|
||||
// Extract first rune from each.
|
||||
sr, size := utf8.DecodeRune(s)
|
||||
s = s[size:]
|
||||
tr, size := utf8.DecodeRune(t)
|
||||
t = t[size:]
|
||||
var sr, tr rune
|
||||
if s[0] < utf8.RuneSelf {
|
||||
sr, s = rune(s[0]), s[1:]
|
||||
} else {
|
||||
r, size := utf8.DecodeRune(s)
|
||||
sr, s = r, s[size:]
|
||||
}
|
||||
if t[0] < utf8.RuneSelf {
|
||||
tr, t = rune(t[0]), t[1:]
|
||||
} else {
|
||||
r, size := utf8.DecodeRune(t)
|
||||
tr, t = r, t[size:]
|
||||
}
|
||||
|
||||
// If they match, keep going; if not, return false.
|
||||
|
||||
|
||||
@@ -7,12 +7,10 @@ package bytes_test
|
||||
import (
|
||||
. "bytes"
|
||||
"fmt"
|
||||
"internal/asan"
|
||||
"internal/testenv"
|
||||
"iter"
|
||||
"math"
|
||||
"math/rand"
|
||||
"slices"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"unicode"
|
||||
@@ -20,6 +18,18 @@ import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func eq(a, b []string) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < len(a); i++ {
|
||||
if a[i] != b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func sliceOfString(s [][]byte) []string {
|
||||
result := make([]string, len(s))
|
||||
for i, v := range s {
|
||||
@@ -28,37 +38,6 @@ func sliceOfString(s [][]byte) []string {
|
||||
return result
|
||||
}
|
||||
|
||||
func collect(t *testing.T, seq iter.Seq[[]byte]) [][]byte {
|
||||
out := slices.Collect(seq)
|
||||
out1 := slices.Collect(seq)
|
||||
if !slices.Equal(sliceOfString(out), sliceOfString(out1)) {
|
||||
t.Fatalf("inconsistent seq:\n%s\n%s", out, out1)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
type LinesTest struct {
|
||||
a string
|
||||
b []string
|
||||
}
|
||||
|
||||
var linesTests = []LinesTest{
|
||||
{a: "abc\nabc\n", b: []string{"abc\n", "abc\n"}},
|
||||
{a: "abc\r\nabc", b: []string{"abc\r\n", "abc"}},
|
||||
{a: "abc\r\n", b: []string{"abc\r\n"}},
|
||||
{a: "\nabc", b: []string{"\n", "abc"}},
|
||||
{a: "\nabc\n\n", b: []string{"\n", "abc\n", "\n"}},
|
||||
}
|
||||
|
||||
func TestLines(t *testing.T) {
|
||||
for _, s := range linesTests {
|
||||
result := sliceOfString(slices.Collect(Lines([]byte(s.a))))
|
||||
if !slices.Equal(result, s.b) {
|
||||
t.Errorf(`slices.Collect(Lines(%q)) = %q; want %q`, s.a, result, s.b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// For ease of reading, the test cases use strings that are converted to byte
|
||||
// slices before invoking the functions.
|
||||
|
||||
@@ -198,11 +177,6 @@ var indexTests = []BinOpTest{
|
||||
{"oxoxoxoxoxoxoxoxoxoxoxox", "oy", -1},
|
||||
// test fallback to Rabin-Karp.
|
||||
{"000000000000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000000001", 5},
|
||||
// test fallback to IndexRune
|
||||
{"oxoxoxoxoxoxoxoxoxoxox☺", "☺", 22},
|
||||
// invalid UTF-8 byte sequence (must be longer than bytealg.MaxBruteForce to
|
||||
// test that we don't use IndexRune)
|
||||
{"xx0123456789012345678901234567890123456789012345678901234567890120123456789012345678901234567890123456xxx\xed\x9f\xc0", "\xed\x9f\xc0", 105},
|
||||
}
|
||||
|
||||
var lastIndexTests = []BinOpTest{
|
||||
@@ -451,31 +425,6 @@ func TestIndexRune(t *testing.T) {
|
||||
{"some_text=some_value", '=', 9},
|
||||
{"☺a", 'a', 3},
|
||||
{"a☻☺b", '☺', 4},
|
||||
{"𠀳𠀗𠀾𠁄𠀧𠁆𠁂𠀫𠀖𠀪𠀲𠀴𠁀𠀨𠀿", '𠀿', 56},
|
||||
|
||||
// 2 bytes
|
||||
{"ӆ", 'ӆ', 0},
|
||||
{"a", 'ӆ', -1},
|
||||
{" ӆ", 'ӆ', 2},
|
||||
{" a", 'ӆ', -1},
|
||||
{strings.Repeat("ц", 64) + "ӆ", 'ӆ', 128}, // test cutover
|
||||
{strings.Repeat("ц", 64), 'ӆ', -1},
|
||||
|
||||
// 3 bytes
|
||||
{"Ꚁ", 'Ꚁ', 0},
|
||||
{"a", 'Ꚁ', -1},
|
||||
{" Ꚁ", 'Ꚁ', 2},
|
||||
{" a", 'Ꚁ', -1},
|
||||
{strings.Repeat("Ꙁ", 64) + "Ꚁ", 'Ꚁ', 192}, // test cutover
|
||||
{strings.Repeat("Ꙁ", 64) + "Ꚁ", '䚀', -1}, // 'Ꚁ' and '䚀' share the same last two bytes
|
||||
|
||||
// 4 bytes
|
||||
{"𡌀", '𡌀', 0},
|
||||
{"a", '𡌀', -1},
|
||||
{" 𡌀", '𡌀', 2},
|
||||
{" a", '𡌀', -1},
|
||||
{strings.Repeat("𡋀", 64) + "𡌀", '𡌀', 256}, // test cutover
|
||||
{strings.Repeat("𡋀", 64) + "𡌀", '𣌀', -1}, // '𡌀' and '𣌀' share the same last two bytes
|
||||
|
||||
// RuneError should match any invalid UTF-8 byte sequence.
|
||||
{"<22>", '<27>', 0},
|
||||
@@ -489,13 +438,6 @@ func TestIndexRune(t *testing.T) {
|
||||
{"a☺b☻c☹d\xe2\x98<39>\xff<66>\xed\xa0\x80", -1, -1},
|
||||
{"a☺b☻c☹d\xe2\x98<39>\xff<66>\xed\xa0\x80", 0xD800, -1}, // Surrogate pair
|
||||
{"a☺b☻c☹d\xe2\x98<39>\xff<66>\xed\xa0\x80", utf8.MaxRune + 1, -1},
|
||||
|
||||
// Test the cutover to bytealg.Index when it is triggered in
|
||||
// the middle of rune that contains consecutive runs of equal bytes.
|
||||
{"aaaaaKKKK\U000bc104", '\U000bc104', 17}, // cutover: (n + 16) / 8
|
||||
{"aaaaaKKKK鄄", '鄄', 17},
|
||||
{"aaKKKKKa\U000bc104", '\U000bc104', 18}, // cutover: 4 + n>>4
|
||||
{"aaKKKKKa鄄", '鄄', 18},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
if got := IndexRune([]byte(tt.in), tt.rune); got != tt.want {
|
||||
@@ -643,21 +585,6 @@ func BenchmarkIndexRuneASCII(b *testing.B) {
|
||||
benchBytes(b, indexSizes, bmIndexRuneASCII(IndexRune))
|
||||
}
|
||||
|
||||
func BenchmarkIndexRuneUnicode(b *testing.B) {
|
||||
b.Run("Latin", func(b *testing.B) {
|
||||
// Latin is mostly 1, 2, 3 byte runes.
|
||||
benchBytes(b, indexSizes, bmIndexRuneUnicode(unicode.Latin, 'é'))
|
||||
})
|
||||
b.Run("Cyrillic", func(b *testing.B) {
|
||||
// Cyrillic is mostly 2 and 3 byte runes.
|
||||
benchBytes(b, indexSizes, bmIndexRuneUnicode(unicode.Cyrillic, 'Ꙁ'))
|
||||
})
|
||||
b.Run("Han", func(b *testing.B) {
|
||||
// Han consists only of 3 and 4 byte runes.
|
||||
benchBytes(b, indexSizes, bmIndexRuneUnicode(unicode.Han, '𠀿'))
|
||||
})
|
||||
}
|
||||
|
||||
func bmIndexRuneASCII(index func([]byte, rune) int) func(b *testing.B, n int) {
|
||||
return func(b *testing.B, n int) {
|
||||
buf := bmbuf[0:n]
|
||||
@@ -688,61 +615,6 @@ func bmIndexRune(index func([]byte, rune) int) func(b *testing.B, n int) {
|
||||
}
|
||||
}
|
||||
|
||||
func bmIndexRuneUnicode(rt *unicode.RangeTable, needle rune) func(b *testing.B, n int) {
|
||||
var rs []rune
|
||||
for _, r16 := range rt.R16 {
|
||||
for r := rune(r16.Lo); r <= rune(r16.Hi); r += rune(r16.Stride) {
|
||||
if r != needle {
|
||||
rs = append(rs, r)
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, r32 := range rt.R32 {
|
||||
for r := rune(r32.Lo); r <= rune(r32.Hi); r += rune(r32.Stride) {
|
||||
if r != needle {
|
||||
rs = append(rs, r)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Shuffle the runes so that they are not in descending order.
|
||||
// The sort is deterministic since this is used for benchmarks,
|
||||
// which need to be repeatable.
|
||||
rr := rand.New(rand.NewSource(1))
|
||||
rr.Shuffle(len(rs), func(i, j int) {
|
||||
rs[i], rs[j] = rs[j], rs[i]
|
||||
})
|
||||
uchars := string(rs)
|
||||
|
||||
return func(b *testing.B, n int) {
|
||||
buf := bmbuf[0:n]
|
||||
o := copy(buf, uchars)
|
||||
for o < len(buf) {
|
||||
o += copy(buf[o:], uchars)
|
||||
}
|
||||
|
||||
// Make space for the needle rune at the end of buf.
|
||||
m := utf8.RuneLen(needle)
|
||||
for o := m; o > 0; {
|
||||
_, sz := utf8.DecodeLastRune(buf)
|
||||
copy(buf[len(buf)-sz:], "\x00\x00\x00\x00")
|
||||
buf = buf[:len(buf)-sz]
|
||||
o -= sz
|
||||
}
|
||||
buf = utf8.AppendRune(buf[:n-m], needle)
|
||||
|
||||
n -= m // adjust for rune len
|
||||
for i := 0; i < b.N; i++ {
|
||||
j := IndexRune(buf, needle)
|
||||
if j != n {
|
||||
b.Fatal("bad index", j)
|
||||
}
|
||||
}
|
||||
for i := range buf {
|
||||
buf[i] = '\x00'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEqual(b *testing.B) {
|
||||
b.Run("0", func(b *testing.B) {
|
||||
var buf [4]byte
|
||||
@@ -891,7 +763,9 @@ func BenchmarkCountSingle(b *testing.B) {
|
||||
b.Fatal("bad count", j, expect)
|
||||
}
|
||||
}
|
||||
clear(buf)
|
||||
for i := 0; i < len(buf); i++ {
|
||||
buf[i] = 0
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -934,18 +808,10 @@ func TestSplit(t *testing.T) {
|
||||
}
|
||||
|
||||
result := sliceOfString(a)
|
||||
if !slices.Equal(result, tt.a) {
|
||||
if !eq(result, tt.a) {
|
||||
t.Errorf(`Split(%q, %q, %d) = %v; want %v`, tt.s, tt.sep, tt.n, result, tt.a)
|
||||
continue
|
||||
}
|
||||
|
||||
if tt.n < 0 {
|
||||
b := sliceOfString(slices.Collect(SplitSeq([]byte(tt.s), []byte(tt.sep))))
|
||||
if !slices.Equal(b, tt.a) {
|
||||
t.Errorf(`collect(SplitSeq(%q, %q)) = %v; want %v`, tt.s, tt.sep, b, tt.a)
|
||||
}
|
||||
}
|
||||
|
||||
if tt.n == 0 || len(a) == 0 {
|
||||
continue
|
||||
}
|
||||
@@ -959,9 +825,9 @@ func TestSplit(t *testing.T) {
|
||||
t.Errorf(`Join(Split(%q, %q, %d), %q) = %q`, tt.s, tt.sep, tt.n, tt.sep, s)
|
||||
}
|
||||
if tt.n < 0 {
|
||||
b := sliceOfString(Split([]byte(tt.s), []byte(tt.sep)))
|
||||
if !slices.Equal(result, b) {
|
||||
t.Errorf("Split disagrees with SplitN(%q, %q, %d) = %v; want %v", tt.s, tt.sep, tt.n, b, a)
|
||||
b := Split([]byte(tt.s), []byte(tt.sep))
|
||||
if !reflect.DeepEqual(a, b) {
|
||||
t.Errorf("Split disagrees withSplitN(%q, %q, %d) = %v; want %v", tt.s, tt.sep, tt.n, b, a)
|
||||
}
|
||||
}
|
||||
if len(a) > 0 {
|
||||
@@ -1000,18 +866,11 @@ func TestSplitAfter(t *testing.T) {
|
||||
}
|
||||
|
||||
result := sliceOfString(a)
|
||||
if !slices.Equal(result, tt.a) {
|
||||
if !eq(result, tt.a) {
|
||||
t.Errorf(`Split(%q, %q, %d) = %v; want %v`, tt.s, tt.sep, tt.n, result, tt.a)
|
||||
continue
|
||||
}
|
||||
|
||||
if tt.n < 0 {
|
||||
b := sliceOfString(slices.Collect(SplitAfterSeq([]byte(tt.s), []byte(tt.sep))))
|
||||
if !slices.Equal(b, tt.a) {
|
||||
t.Errorf(`collect(SplitAfterSeq(%q, %q)) = %v; want %v`, tt.s, tt.sep, b, tt.a)
|
||||
}
|
||||
}
|
||||
|
||||
if want := tt.a[len(tt.a)-1] + "z"; string(x) != want {
|
||||
t.Errorf("last appended result was %s; want %s", x, want)
|
||||
}
|
||||
@@ -1021,9 +880,9 @@ func TestSplitAfter(t *testing.T) {
|
||||
t.Errorf(`Join(Split(%q, %q, %d), %q) = %q`, tt.s, tt.sep, tt.n, tt.sep, s)
|
||||
}
|
||||
if tt.n < 0 {
|
||||
b := sliceOfString(SplitAfter([]byte(tt.s), []byte(tt.sep)))
|
||||
if !slices.Equal(result, b) {
|
||||
t.Errorf("SplitAfter disagrees with SplitAfterN(%q, %q, %d) = %v; want %v", tt.s, tt.sep, tt.n, b, a)
|
||||
b := SplitAfter([]byte(tt.s), []byte(tt.sep))
|
||||
if !reflect.DeepEqual(a, b) {
|
||||
t.Errorf("SplitAfter disagrees withSplitAfterN(%q, %q, %d) = %v; want %v", tt.s, tt.sep, tt.n, b, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1060,16 +919,11 @@ func TestFields(t *testing.T) {
|
||||
}
|
||||
|
||||
result := sliceOfString(a)
|
||||
if !slices.Equal(result, tt.a) {
|
||||
if !eq(result, tt.a) {
|
||||
t.Errorf("Fields(%q) = %v; want %v", tt.s, a, tt.a)
|
||||
continue
|
||||
}
|
||||
|
||||
result2 := sliceOfString(collect(t, FieldsSeq([]byte(tt.s))))
|
||||
if !slices.Equal(result2, tt.a) {
|
||||
t.Errorf(`collect(FieldsSeq(%q)) = %v; want %v`, tt.s, result2, tt.a)
|
||||
}
|
||||
|
||||
if string(b) != tt.s {
|
||||
t.Errorf("slice changed to %s; want %s", string(b), tt.s)
|
||||
}
|
||||
@@ -1085,7 +939,7 @@ func TestFieldsFunc(t *testing.T) {
|
||||
for _, tt := range fieldstests {
|
||||
a := FieldsFunc([]byte(tt.s), unicode.IsSpace)
|
||||
result := sliceOfString(a)
|
||||
if !slices.Equal(result, tt.a) {
|
||||
if !eq(result, tt.a) {
|
||||
t.Errorf("FieldsFunc(%q, unicode.IsSpace) = %v; want %v", tt.s, a, tt.a)
|
||||
continue
|
||||
}
|
||||
@@ -1108,15 +962,10 @@ func TestFieldsFunc(t *testing.T) {
|
||||
}
|
||||
|
||||
result := sliceOfString(a)
|
||||
if !slices.Equal(result, tt.a) {
|
||||
if !eq(result, tt.a) {
|
||||
t.Errorf("FieldsFunc(%q) = %v, want %v", tt.s, a, tt.a)
|
||||
}
|
||||
|
||||
result2 := sliceOfString(collect(t, FieldsFuncSeq([]byte(tt.s), pred)))
|
||||
if !slices.Equal(result2, tt.a) {
|
||||
t.Errorf(`collect(FieldsFuncSeq(%q)) = %v; want %v`, tt.s, result2, tt.a)
|
||||
}
|
||||
|
||||
if string(b) != tt.s {
|
||||
t.Errorf("slice changed to %s; want %s", b, tt.s)
|
||||
}
|
||||
@@ -1224,7 +1073,7 @@ func TestMap(t *testing.T) {
|
||||
// Run a couple of awful growth/shrinkage tests
|
||||
a := tenRunes('a')
|
||||
|
||||
// 1. Grow. This triggers two reallocations in Map.
|
||||
// 1. Grow. This triggers two reallocations in Map.
|
||||
maxRune := func(r rune) rune { return unicode.MaxRune }
|
||||
m := Map(maxRune, []byte(a))
|
||||
expect := tenRunes(unicode.MaxRune)
|
||||
@@ -1437,6 +1286,18 @@ func TestRepeatCatchesOverflow(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func runesEqual(a, b []rune) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i, r := range a {
|
||||
if r != b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type RunesTest struct {
|
||||
in string
|
||||
out []rune
|
||||
@@ -1457,7 +1318,7 @@ func TestRunes(t *testing.T) {
|
||||
for _, tt := range RunesTests {
|
||||
tin := []byte(tt.in)
|
||||
a := Runes(tin)
|
||||
if !slices.Equal(a, tt.out) {
|
||||
if !runesEqual(a, tt.out) {
|
||||
t.Errorf("Runes(%q) = %v; want %v", tin, a, tt.out)
|
||||
continue
|
||||
}
|
||||
@@ -1785,20 +1646,9 @@ var ReplaceTests = []ReplaceTest{
|
||||
|
||||
func TestReplace(t *testing.T) {
|
||||
for _, tt := range ReplaceTests {
|
||||
var (
|
||||
in = []byte(tt.in)
|
||||
old = []byte(tt.old)
|
||||
new = []byte(tt.new)
|
||||
)
|
||||
if !asan.Enabled {
|
||||
allocs := testing.AllocsPerRun(10, func() { Replace(in, old, new, tt.n) })
|
||||
if allocs > 1 {
|
||||
t.Errorf("Replace(%q, %q, %q, %d) allocates %.2f objects", tt.in, tt.old, tt.new, tt.n, allocs)
|
||||
}
|
||||
}
|
||||
in = append(in, "<spare>"...)
|
||||
in := append([]byte(tt.in), "<spare>"...)
|
||||
in = in[:len(tt.in)]
|
||||
out := Replace(in, old, new, tt.n)
|
||||
out := Replace(in, []byte(tt.old), []byte(tt.new), tt.n)
|
||||
if s := string(out); s != tt.out {
|
||||
t.Errorf("Replace(%q, %q, %q, %d) = %q, want %q", tt.in, tt.old, tt.new, tt.n, s, tt.out)
|
||||
}
|
||||
@@ -1806,7 +1656,7 @@ func TestReplace(t *testing.T) {
|
||||
t.Errorf("Replace(%q, %q, %q, %d) didn't copy", tt.in, tt.old, tt.new, tt.n)
|
||||
}
|
||||
if tt.n == -1 {
|
||||
out := ReplaceAll(in, old, new)
|
||||
out := ReplaceAll(in, []byte(tt.old), []byte(tt.new))
|
||||
if s := string(out); s != tt.out {
|
||||
t.Errorf("ReplaceAll(%q, %q, %q) = %q, want %q", tt.in, tt.old, tt.new, s, tt.out)
|
||||
}
|
||||
@@ -1814,69 +1664,6 @@ func TestReplace(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func FuzzReplace(f *testing.F) {
|
||||
for _, tt := range ReplaceTests {
|
||||
f.Add([]byte(tt.in), []byte(tt.old), []byte(tt.new), tt.n)
|
||||
}
|
||||
f.Fuzz(func(t *testing.T, in, old, new []byte, n int) {
|
||||
differentImpl := func(in, old, new []byte, n int) []byte {
|
||||
var out Buffer
|
||||
if n < 0 {
|
||||
n = math.MaxInt
|
||||
}
|
||||
for i := 0; i < len(in); {
|
||||
if n == 0 {
|
||||
out.Write(in[i:])
|
||||
break
|
||||
}
|
||||
if HasPrefix(in[i:], old) {
|
||||
out.Write(new)
|
||||
i += len(old)
|
||||
n--
|
||||
if len(old) != 0 {
|
||||
continue
|
||||
}
|
||||
if i == len(in) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(old) == 0 {
|
||||
_, length := utf8.DecodeRune(in[i:])
|
||||
out.Write(in[i : i+length])
|
||||
i += length
|
||||
} else {
|
||||
out.WriteByte(in[i])
|
||||
i++
|
||||
}
|
||||
}
|
||||
if len(old) == 0 && n != 0 {
|
||||
out.Write(new)
|
||||
}
|
||||
return out.Bytes()
|
||||
}
|
||||
if simple, replace := differentImpl(in, old, new, n), Replace(in, old, new, n); !slices.Equal(simple, replace) {
|
||||
t.Errorf("The two implementations do not match %q != %q for Replace(%q, %q, %q, %d)", simple, replace, in, old, new, n)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkReplace(b *testing.B) {
|
||||
for _, tt := range ReplaceTests {
|
||||
desc := fmt.Sprintf("%q %q %q %d", tt.in, tt.old, tt.new, tt.n)
|
||||
var (
|
||||
in = []byte(tt.in)
|
||||
old = []byte(tt.old)
|
||||
new = []byte(tt.new)
|
||||
)
|
||||
b.Run(desc, func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for b.Loop() {
|
||||
Replace(in, old, new, tt.n)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type TitleTest struct {
|
||||
in, out string
|
||||
}
|
||||
@@ -2126,9 +1913,8 @@ func TestContainsFunc(t *testing.T) {
|
||||
var makeFieldsInput = func() []byte {
|
||||
x := make([]byte, 1<<20)
|
||||
// Input is ~10% space, ~10% 2-byte UTF-8, rest ASCII non-space.
|
||||
r := rand.New(rand.NewSource(99))
|
||||
for i := range x {
|
||||
switch r.Intn(10) {
|
||||
switch rand.Intn(10) {
|
||||
case 0:
|
||||
x[i] = ' '
|
||||
case 1:
|
||||
@@ -2147,9 +1933,8 @@ var makeFieldsInput = func() []byte {
|
||||
var makeFieldsInputASCII = func() []byte {
|
||||
x := make([]byte, 1<<20)
|
||||
// Input is ~10% space, rest ASCII non-space.
|
||||
r := rand.New(rand.NewSource(99))
|
||||
for i := range x {
|
||||
if r.Intn(10) == 0 {
|
||||
if rand.Intn(10) == 0 {
|
||||
x[i] = ' '
|
||||
} else {
|
||||
x[i] = 'x'
|
||||
@@ -2246,9 +2031,8 @@ func makeBenchInputHard() []byte {
|
||||
"hello", "world",
|
||||
}
|
||||
x := make([]byte, 0, 1<<20)
|
||||
r := rand.New(rand.NewSource(99))
|
||||
for {
|
||||
i := r.Intn(len(tokens))
|
||||
i := rand.Intn(len(tokens))
|
||||
if len(x)+len(tokens[i]) >= 1<<20 {
|
||||
break
|
||||
}
|
||||
@@ -2260,11 +2044,6 @@ func makeBenchInputHard() []byte {
|
||||
var benchInputHard = makeBenchInputHard()
|
||||
|
||||
func benchmarkIndexHard(b *testing.B, sep []byte) {
|
||||
n := Index(benchInputHard, sep)
|
||||
if n < 0 {
|
||||
n = len(benchInputHard)
|
||||
}
|
||||
b.SetBytes(int64(n))
|
||||
for i := 0; i < b.N; i++ {
|
||||
Index(benchInputHard, sep)
|
||||
}
|
||||
|
||||
@@ -245,9 +245,9 @@ func ExampleCut() {
|
||||
}
|
||||
|
||||
func ExampleCutPrefix() {
|
||||
show := func(s, prefix string) {
|
||||
after, found := bytes.CutPrefix([]byte(s), []byte(prefix))
|
||||
fmt.Printf("CutPrefix(%q, %q) = %q, %v\n", s, prefix, after, found)
|
||||
show := func(s, sep string) {
|
||||
after, found := bytes.CutPrefix([]byte(s), []byte(sep))
|
||||
fmt.Printf("CutPrefix(%q, %q) = %q, %v\n", s, sep, after, found)
|
||||
}
|
||||
show("Gopher", "Go")
|
||||
show("Gopher", "ph")
|
||||
@@ -257,9 +257,9 @@ func ExampleCutPrefix() {
|
||||
}
|
||||
|
||||
func ExampleCutSuffix() {
|
||||
show := func(s, suffix string) {
|
||||
before, found := bytes.CutSuffix([]byte(s), []byte(suffix))
|
||||
fmt.Printf("CutSuffix(%q, %q) = %q, %v\n", s, suffix, before, found)
|
||||
show := func(s, sep string) {
|
||||
before, found := bytes.CutSuffix([]byte(s), []byte(sep))
|
||||
fmt.Printf("CutSuffix(%q, %q) = %q, %v\n", s, sep, before, found)
|
||||
}
|
||||
show("Gopher", "Go")
|
||||
show("Gopher", "er")
|
||||
@@ -502,10 +502,10 @@ func ExampleTitle() {
|
||||
|
||||
func ExampleToTitle() {
|
||||
fmt.Printf("%s\n", bytes.ToTitle([]byte("loud noises")))
|
||||
fmt.Printf("%s\n", bytes.ToTitle([]byte("брат")))
|
||||
fmt.Printf("%s\n", bytes.ToTitle([]byte("хлеб")))
|
||||
// Output:
|
||||
// LOUD NOISES
|
||||
// БРАТ
|
||||
// ХЛЕБ
|
||||
}
|
||||
|
||||
func ExampleToTitleSpecial() {
|
||||
@@ -628,93 +628,3 @@ func ExampleToUpperSpecial() {
|
||||
// Original : ahoj vývojári golang
|
||||
// ToUpper : AHOJ VÝVOJÁRİ GOLANG
|
||||
}
|
||||
|
||||
func ExampleLines() {
|
||||
text := []byte("Hello\nWorld\nGo Programming\n")
|
||||
for line := range bytes.Lines(text) {
|
||||
fmt.Printf("%q\n", line)
|
||||
}
|
||||
|
||||
// Output:
|
||||
// "Hello\n"
|
||||
// "World\n"
|
||||
// "Go Programming\n"
|
||||
}
|
||||
|
||||
func ExampleSplitSeq() {
|
||||
s := []byte("a,b,c,d")
|
||||
for part := range bytes.SplitSeq(s, []byte(",")) {
|
||||
fmt.Printf("%q\n", part)
|
||||
}
|
||||
|
||||
// Output:
|
||||
// "a"
|
||||
// "b"
|
||||
// "c"
|
||||
// "d"
|
||||
}
|
||||
|
||||
func ExampleSplitAfterSeq() {
|
||||
s := []byte("a,b,c,d")
|
||||
for part := range bytes.SplitAfterSeq(s, []byte(",")) {
|
||||
fmt.Printf("%q\n", part)
|
||||
}
|
||||
|
||||
// Output:
|
||||
// "a,"
|
||||
// "b,"
|
||||
// "c,"
|
||||
// "d"
|
||||
}
|
||||
|
||||
func ExampleFieldsSeq() {
|
||||
text := []byte("The quick brown fox")
|
||||
fmt.Println("Split byte slice into fields:")
|
||||
for word := range bytes.FieldsSeq(text) {
|
||||
fmt.Printf("%q\n", word)
|
||||
}
|
||||
|
||||
textWithSpaces := []byte(" lots of spaces ")
|
||||
fmt.Println("\nSplit byte slice with multiple spaces:")
|
||||
for word := range bytes.FieldsSeq(textWithSpaces) {
|
||||
fmt.Printf("%q\n", word)
|
||||
}
|
||||
|
||||
// Output:
|
||||
// Split byte slice into fields:
|
||||
// "The"
|
||||
// "quick"
|
||||
// "brown"
|
||||
// "fox"
|
||||
//
|
||||
// Split byte slice with multiple spaces:
|
||||
// "lots"
|
||||
// "of"
|
||||
// "spaces"
|
||||
}
|
||||
|
||||
func ExampleFieldsFuncSeq() {
|
||||
text := []byte("The quick brown fox")
|
||||
fmt.Println("Split on whitespace(similar to FieldsSeq):")
|
||||
for word := range bytes.FieldsFuncSeq(text, unicode.IsSpace) {
|
||||
fmt.Printf("%q\n", word)
|
||||
}
|
||||
|
||||
mixedText := []byte("abc123def456ghi")
|
||||
fmt.Println("\nSplit on digits:")
|
||||
for word := range bytes.FieldsFuncSeq(mixedText, unicode.IsDigit) {
|
||||
fmt.Printf("%q\n", word)
|
||||
}
|
||||
|
||||
// Output:
|
||||
// Split on whitespace(similar to FieldsSeq):
|
||||
// "The"
|
||||
// "quick"
|
||||
// "brown"
|
||||
// "fox"
|
||||
//
|
||||
// Split on digits:
|
||||
// "abc"
|
||||
// "def"
|
||||
// "ghi"
|
||||
}
|
||||
|
||||
@@ -1,137 +0,0 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bytes
|
||||
|
||||
import (
|
||||
"iter"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Lines returns an iterator over the newline-terminated lines in the byte slice s.
|
||||
// The lines yielded by the iterator include their terminating newlines.
|
||||
// If s is empty, the iterator yields no lines at all.
|
||||
// If s does not end in a newline, the final yielded line will not end in a newline.
|
||||
// It returns a single-use iterator.
|
||||
func Lines(s []byte) iter.Seq[[]byte] {
|
||||
return func(yield func([]byte) bool) {
|
||||
for len(s) > 0 {
|
||||
var line []byte
|
||||
if i := IndexByte(s, '\n'); i >= 0 {
|
||||
line, s = s[:i+1], s[i+1:]
|
||||
} else {
|
||||
line, s = s, nil
|
||||
}
|
||||
if !yield(line[:len(line):len(line)]) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// splitSeq is SplitSeq or SplitAfterSeq, configured by how many
|
||||
// bytes of sep to include in the results (none or all).
|
||||
func splitSeq(s, sep []byte, sepSave int) iter.Seq[[]byte] {
|
||||
return func(yield func([]byte) bool) {
|
||||
if len(sep) == 0 {
|
||||
for len(s) > 0 {
|
||||
_, size := utf8.DecodeRune(s)
|
||||
if !yield(s[:size:size]) {
|
||||
return
|
||||
}
|
||||
s = s[size:]
|
||||
}
|
||||
return
|
||||
}
|
||||
for {
|
||||
i := Index(s, sep)
|
||||
if i < 0 {
|
||||
break
|
||||
}
|
||||
frag := s[:i+sepSave]
|
||||
if !yield(frag[:len(frag):len(frag)]) {
|
||||
return
|
||||
}
|
||||
s = s[i+len(sep):]
|
||||
}
|
||||
yield(s[:len(s):len(s)])
|
||||
}
|
||||
}
|
||||
|
||||
// SplitSeq returns an iterator over all subslices of s separated by sep.
|
||||
// The iterator yields the same subslices that would be returned by [Split](s, sep),
|
||||
// but without constructing a new slice containing the subslices.
|
||||
// It returns a single-use iterator.
|
||||
func SplitSeq(s, sep []byte) iter.Seq[[]byte] {
|
||||
return splitSeq(s, sep, 0)
|
||||
}
|
||||
|
||||
// SplitAfterSeq returns an iterator over subslices of s split after each instance of sep.
|
||||
// The iterator yields the same subslices that would be returned by [SplitAfter](s, sep),
|
||||
// but without constructing a new slice containing the subslices.
|
||||
// It returns a single-use iterator.
|
||||
func SplitAfterSeq(s, sep []byte) iter.Seq[[]byte] {
|
||||
return splitSeq(s, sep, len(sep))
|
||||
}
|
||||
|
||||
// FieldsSeq returns an iterator over subslices of s split around runs of
|
||||
// whitespace characters, as defined by [unicode.IsSpace].
|
||||
// The iterator yields the same subslices that would be returned by [Fields](s),
|
||||
// but without constructing a new slice containing the subslices.
|
||||
func FieldsSeq(s []byte) iter.Seq[[]byte] {
|
||||
return func(yield func([]byte) bool) {
|
||||
start := -1
|
||||
for i := 0; i < len(s); {
|
||||
size := 1
|
||||
r := rune(s[i])
|
||||
isSpace := asciiSpace[s[i]] != 0
|
||||
if r >= utf8.RuneSelf {
|
||||
r, size = utf8.DecodeRune(s[i:])
|
||||
isSpace = unicode.IsSpace(r)
|
||||
}
|
||||
if isSpace {
|
||||
if start >= 0 {
|
||||
if !yield(s[start:i:i]) {
|
||||
return
|
||||
}
|
||||
start = -1
|
||||
}
|
||||
} else if start < 0 {
|
||||
start = i
|
||||
}
|
||||
i += size
|
||||
}
|
||||
if start >= 0 {
|
||||
yield(s[start:len(s):len(s)])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FieldsFuncSeq returns an iterator over subslices of s split around runs of
|
||||
// Unicode code points satisfying f(c).
|
||||
// The iterator yields the same subslices that would be returned by [FieldsFunc](s),
|
||||
// but without constructing a new slice containing the subslices.
|
||||
func FieldsFuncSeq(s []byte, f func(rune) bool) iter.Seq[[]byte] {
|
||||
return func(yield func([]byte) bool) {
|
||||
start := -1
|
||||
for i := 0; i < len(s); {
|
||||
r, size := utf8.DecodeRune(s[i:])
|
||||
if f(r) {
|
||||
if start >= 0 {
|
||||
if !yield(s[start:i:i]) {
|
||||
return
|
||||
}
|
||||
start = -1
|
||||
}
|
||||
} else if start < 0 {
|
||||
start = i
|
||||
}
|
||||
i += size
|
||||
}
|
||||
if start >= 0 {
|
||||
yield(s[start:len(s):len(s)])
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bytes_test
|
||||
|
||||
import (
|
||||
. "bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkSplitSeqEmptySeparator(b *testing.B) {
|
||||
for range b.N {
|
||||
for range SplitSeq(benchInputHard, nil) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSplitSeqSingleByteSeparator(b *testing.B) {
|
||||
sep := []byte("/")
|
||||
for range b.N {
|
||||
for range SplitSeq(benchInputHard, sep) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSplitSeqMultiByteSeparator(b *testing.B) {
|
||||
sep := []byte("hello")
|
||||
for range b.N {
|
||||
for range SplitSeq(benchInputHard, sep) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSplitAfterSeqEmptySeparator(b *testing.B) {
|
||||
for range b.N {
|
||||
for range SplitAfterSeq(benchInputHard, nil) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSplitAfterSeqSingleByteSeparator(b *testing.B) {
|
||||
sep := []byte("/")
|
||||
for range b.N {
|
||||
for range SplitAfterSeq(benchInputHard, sep) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSplitAfterSeqMultiByteSeparator(b *testing.B) {
|
||||
sep := []byte("hello")
|
||||
for range b.N {
|
||||
for range SplitAfterSeq(benchInputHard, sep) {
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -6,16 +6,27 @@
|
||||
|
||||
setlocal
|
||||
|
||||
go tool dist env -w -p >env.bat || exit /b 1
|
||||
set GOBUILDFAIL=0
|
||||
|
||||
go tool dist env -w -p >env.bat
|
||||
if errorlevel 1 goto fail
|
||||
call .\env.bat
|
||||
del env.bat
|
||||
echo.
|
||||
|
||||
if not exist %GOTOOLDIR%\dist.exe (
|
||||
echo cannot find %GOTOOLDIR%\dist.exe; nothing to clean
|
||||
exit /b 1
|
||||
)
|
||||
if exist %GOTOOLDIR%\dist.exe goto distok
|
||||
echo cannot find %GOTOOLDIR%\dist; nothing to clean
|
||||
goto fail
|
||||
:distok
|
||||
|
||||
"%GOBIN%\go" clean -i std
|
||||
"%GOBIN%\go" tool dist clean
|
||||
"%GOBIN%\go" clean -i cmd
|
||||
|
||||
goto end
|
||||
|
||||
:fail
|
||||
set GOBUILDFAIL=1
|
||||
|
||||
:end
|
||||
if x%GOBUILDEXIT%==x1 exit %GOBUILDFAIL%
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -27,6 +28,26 @@ func TestMain(m *testing.M) {
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
// addr2linePath returns the path to the "addr2line" binary to run.
|
||||
func addr2linePath(t testing.TB) string {
|
||||
t.Helper()
|
||||
testenv.MustHaveExec(t)
|
||||
|
||||
addr2linePathOnce.Do(func() {
|
||||
addr2lineExePath, addr2linePathErr = os.Executable()
|
||||
})
|
||||
if addr2linePathErr != nil {
|
||||
t.Fatal(addr2linePathErr)
|
||||
}
|
||||
return addr2lineExePath
|
||||
}
|
||||
|
||||
var (
|
||||
addr2linePathOnce sync.Once
|
||||
addr2lineExePath string
|
||||
addr2linePathErr error
|
||||
)
|
||||
|
||||
func loadSyms(t *testing.T, dbgExePath string) map[string]string {
|
||||
cmd := testenv.Command(t, testenv.GoToolPath(t), "tool", "nm", dbgExePath)
|
||||
out, err := cmd.CombinedOutput()
|
||||
@@ -49,7 +70,7 @@ func loadSyms(t *testing.T, dbgExePath string) map[string]string {
|
||||
}
|
||||
|
||||
func runAddr2Line(t *testing.T, dbgExePath, addr string) (funcname, path, lineno string) {
|
||||
cmd := testenv.Command(t, testenv.Executable(t), dbgExePath)
|
||||
cmd := testenv.Command(t, addr2linePath(t), dbgExePath)
|
||||
cmd.Stdin = strings.NewReader(addr)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
@@ -87,22 +108,27 @@ func testAddr2Line(t *testing.T, dbgExePath, addr string) {
|
||||
// Debug paths are stored slash-separated, so convert to system-native.
|
||||
srcPath = filepath.FromSlash(srcPath)
|
||||
fi2, err := os.Stat(srcPath)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Stat failed: %v", err)
|
||||
}
|
||||
if !os.SameFile(fi1, fi2) {
|
||||
t.Fatalf("addr2line_test.go and %s are not same file", srcPath)
|
||||
}
|
||||
if want := "102"; srcLineNo != want {
|
||||
if want := "124"; srcLineNo != want {
|
||||
t.Fatalf("line number = %v; want %s", srcLineNo, want)
|
||||
}
|
||||
}
|
||||
|
||||
// This is line 101. The test depends on that.
|
||||
// This is line 123. The test depends on that.
|
||||
func TestAddr2Line(t *testing.T) {
|
||||
testenv.MustHaveGoBuild(t)
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
tmpDir, err := os.MkdirTemp("", "TestAddr2Line")
|
||||
if err != nil {
|
||||
t.Fatal("TempDir failed: ", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// Build copy of test binary with debug symbols,
|
||||
// since the one running now may not have them.
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"internal/testenv"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
@@ -57,10 +57,7 @@ func TestGolden(t *testing.T) {
|
||||
// TODO(gri) remove extra pkg directory eventually
|
||||
goldenFile := filepath.Join("testdata", "src", "pkg", fi.Name(), "golden.txt")
|
||||
w := NewWalker(nil, "testdata/src/pkg")
|
||||
pkg, err := w.import_(fi.Name())
|
||||
if err != nil {
|
||||
t.Fatalf("import %s: %v", fi.Name(), err)
|
||||
}
|
||||
pkg, _ := w.import_(fi.Name())
|
||||
w.export(pkg)
|
||||
|
||||
if *updateGolden {
|
||||
@@ -80,7 +77,7 @@ func TestGolden(t *testing.T) {
|
||||
t.Fatalf("opening golden.txt for package %q: %v", fi.Name(), err)
|
||||
}
|
||||
wanted := strings.Split(string(bs), "\n")
|
||||
slices.Sort(wanted)
|
||||
sort.Strings(wanted)
|
||||
for _, feature := range wanted {
|
||||
if feature == "" {
|
||||
continue
|
||||
@@ -99,11 +96,6 @@ func TestGolden(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCompareAPI(t *testing.T) {
|
||||
if *flagCheck {
|
||||
// not worth repeating in -check
|
||||
t.Skip("skipping with -check set")
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
features, required, exception []string
|
||||
@@ -185,11 +177,6 @@ func TestCompareAPI(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSkipInternal(t *testing.T) {
|
||||
if *flagCheck {
|
||||
// not worth repeating in -check
|
||||
t.Skip("skipping with -check set")
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
pkg string
|
||||
want bool
|
||||
@@ -214,13 +201,7 @@ func BenchmarkAll(b *testing.B) {
|
||||
for _, context := range contexts {
|
||||
w := NewWalker(context, filepath.Join(testenv.GOROOT(b), "src"))
|
||||
for _, name := range w.stdPackages {
|
||||
pkg, err := w.import_(name)
|
||||
if _, nogo := err.(*build.NoGoError); nogo {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
b.Fatalf("import %s (%s-%s): %v", name, context.GOOS, context.GOARCH, err)
|
||||
}
|
||||
pkg, _ := w.import_(name)
|
||||
w.export(pkg)
|
||||
}
|
||||
w.Features()
|
||||
@@ -258,7 +239,8 @@ func TestIssue21181(t *testing.T) {
|
||||
w := NewWalker(context, "testdata/src/issue21181")
|
||||
pkg, err := w.import_("p")
|
||||
if err != nil {
|
||||
t.Fatalf("import %s (%s-%s): %v", "p", context.GOOS, context.GOARCH, err)
|
||||
t.Fatalf("%s: (%s-%s) %s %v", err, context.GOOS, context.GOARCH,
|
||||
pkg.Name(), w.imported)
|
||||
}
|
||||
w.export(pkg)
|
||||
}
|
||||
@@ -304,20 +286,14 @@ func TestIssue41358(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIssue64958(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping with -short")
|
||||
}
|
||||
if *flagCheck {
|
||||
// slow, not worth repeating in -check
|
||||
t.Skip("skipping with -check set")
|
||||
}
|
||||
testenv.MustHaveGoBuild(t)
|
||||
|
||||
defer func() {
|
||||
if x := recover(); x != nil {
|
||||
t.Errorf("expected no panic; recovered %v", x)
|
||||
}
|
||||
}()
|
||||
|
||||
testenv.MustHaveGoBuild(t)
|
||||
|
||||
for _, context := range contexts {
|
||||
w := NewWalker(context, "testdata/src/issue64958")
|
||||
pkg, err := w.importFrom("p", "", 0)
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -232,8 +232,8 @@ func compareAPI(w io.Writer, features, required, exception []string) (ok bool) {
|
||||
featureSet := set(features)
|
||||
exceptionSet := set(exception)
|
||||
|
||||
slices.Sort(features)
|
||||
slices.Sort(required)
|
||||
sort.Strings(features)
|
||||
sort.Strings(required)
|
||||
|
||||
take := func(sl *[]string) string {
|
||||
s := (*sl)[0]
|
||||
@@ -378,7 +378,7 @@ func (w *Walker) Features() (fs []string) {
|
||||
for f := range w.features {
|
||||
fs = append(fs, f)
|
||||
}
|
||||
slices.Sort(fs)
|
||||
sort.Strings(fs)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -431,7 +431,7 @@ func tagKey(dir string, context *build.Context, tags []string) string {
|
||||
// an indirect imported package. See https://github.com/golang/go/issues/21181
|
||||
// for more detail.
|
||||
tags = append(tags, context.GOOS, context.GOARCH)
|
||||
slices.Sort(tags)
|
||||
sort.Strings(tags)
|
||||
|
||||
for _, tag := range tags {
|
||||
if ctags[tag] {
|
||||
@@ -535,7 +535,7 @@ func (w *Walker) loadImports() {
|
||||
}
|
||||
}
|
||||
|
||||
slices.Sort(stdPackages)
|
||||
sort.Strings(stdPackages)
|
||||
imports = listImports{
|
||||
stdPackages: stdPackages,
|
||||
importMap: importMap,
|
||||
@@ -717,7 +717,7 @@ func sortedMethodNames(typ *types.Interface) []string {
|
||||
for i := range list {
|
||||
list[i] = typ.Method(i).Name()
|
||||
}
|
||||
slices.Sort(list)
|
||||
sort.Strings(list)
|
||||
return list
|
||||
}
|
||||
|
||||
@@ -747,7 +747,7 @@ func (w *Walker) sortedEmbeddeds(typ *types.Interface) []string {
|
||||
list = append(list, buf.String())
|
||||
}
|
||||
}
|
||||
slices.Sort(list)
|
||||
sort.Strings(list)
|
||||
return list
|
||||
}
|
||||
|
||||
@@ -1019,7 +1019,7 @@ func (w *Walker) emitType(obj *types.TypeName) {
|
||||
|
||||
func (w *Walker) emitStructType(name string, typ *types.Struct) {
|
||||
typeStruct := fmt.Sprintf("type %s struct", name)
|
||||
w.emitf("%s", typeStruct)
|
||||
w.emitf(typeStruct)
|
||||
defer w.pushScope(typeStruct)()
|
||||
|
||||
for i := 0; i < typ.NumFields(); i++ {
|
||||
@@ -1058,7 +1058,7 @@ func (w *Walker) emitIfaceType(name string, typ *types.Interface) {
|
||||
if w.isDeprecated(m) {
|
||||
w.emitf("%s //deprecated", m.Name())
|
||||
}
|
||||
w.emitf("%s%s", m.Name(), w.signatureString(m.Signature()))
|
||||
w.emitf("%s%s", m.Name(), w.signatureString(m.Type().(*types.Signature)))
|
||||
}
|
||||
|
||||
if !complete {
|
||||
@@ -1083,12 +1083,12 @@ func (w *Walker) emitIfaceType(name string, typ *types.Interface) {
|
||||
return
|
||||
}
|
||||
|
||||
slices.Sort(methodNames)
|
||||
sort.Strings(methodNames)
|
||||
w.emitf("type %s interface { %s }", name, strings.Join(methodNames, ", "))
|
||||
}
|
||||
|
||||
func (w *Walker) emitFunc(f *types.Func) {
|
||||
sig := f.Signature()
|
||||
sig := f.Type().(*types.Signature)
|
||||
if sig.Recv() != nil {
|
||||
panic("method considered a regular function: " + f.String())
|
||||
}
|
||||
|
||||
12
src/cmd/api/testdata/src/pkg/p1/golden.txt
vendored
12
src/cmd/api/testdata/src/pkg/p1/golden.txt
vendored
@@ -1,6 +1,6 @@
|
||||
pkg p1, const A //deprecated
|
||||
pkg p1, const A = 1
|
||||
pkg p1, const A ideal-int
|
||||
pkg p1, const A //deprecated
|
||||
pkg p1, const A64 = 1
|
||||
pkg p1, const A64 int64
|
||||
pkg p1, const AIsLowerA = 11
|
||||
@@ -25,8 +25,8 @@ pkg p1, func TakesFunc(func(int) int)
|
||||
pkg p1, method (*B) JustOnB()
|
||||
pkg p1, method (*B) OnBothTandBPtr()
|
||||
pkg p1, method (*Embedded) OnEmbedded()
|
||||
pkg p1, method (*S2) SMethod //deprecated
|
||||
pkg p1, method (*S2) SMethod(int8, int16, int64)
|
||||
pkg p1, method (*S2) SMethod //deprecated
|
||||
pkg p1, method (*T) JustOnT()
|
||||
pkg p1, method (*T) OnBothTandBPtr()
|
||||
pkg p1, method (B) OnBothTandBVal()
|
||||
@@ -53,8 +53,8 @@ pkg p1, type Error interface { Error, Temporary }
|
||||
pkg p1, type Error interface, Error() string
|
||||
pkg p1, type Error interface, Temporary() bool
|
||||
pkg p1, type FuncType func(int, int, string) (*B, error)
|
||||
pkg p1, type I interface, Get //deprecated
|
||||
pkg p1, type I interface, Get(string) int64
|
||||
pkg p1, type I interface, Get //deprecated
|
||||
pkg p1, type I interface, GetNamed(string) int64
|
||||
pkg p1, type I interface, Name() string
|
||||
pkg p1, type I interface, PackageTwoMeth()
|
||||
@@ -63,9 +63,9 @@ pkg p1, type I interface, unexported methods
|
||||
pkg p1, type MyInt int
|
||||
pkg p1, type Namer interface { Name }
|
||||
pkg p1, type Namer interface, Name() string
|
||||
pkg p1, type Private //deprecated
|
||||
pkg p1, type Private interface, X()
|
||||
pkg p1, type Private interface, unexported methods
|
||||
pkg p1, type Private //deprecated
|
||||
pkg p1, type Public interface { X, Y }
|
||||
pkg p1, type Public interface, X()
|
||||
pkg p1, type Public interface, Y()
|
||||
@@ -84,8 +84,8 @@ pkg p1, type TPtrExported struct
|
||||
pkg p1, type TPtrExported struct, embedded *Embedded
|
||||
pkg p1, type TPtrUnexported struct
|
||||
pkg p1, type Time struct
|
||||
pkg p1, type URL //deprecated
|
||||
pkg p1, type URL struct
|
||||
pkg p1, type URL //deprecated
|
||||
pkg p1, var Byte uint8
|
||||
pkg p1, var ByteConv []uint8
|
||||
pkg p1, var ByteFunc func(uint8) int32
|
||||
@@ -97,8 +97,8 @@ pkg p1, var StrConv string
|
||||
pkg p1, var V string
|
||||
pkg p1, var V1 uint64
|
||||
pkg p1, var V2 p2.Twoer
|
||||
pkg p1, var VError //deprecated
|
||||
pkg p1, var VError Error
|
||||
pkg p1, var VError //deprecated
|
||||
pkg p1, var X I
|
||||
pkg p1, var X0 int64
|
||||
pkg p1, var Y int
|
||||
|
||||
5
src/cmd/api/testdata/src/pkg/p2/golden.txt
vendored
5
src/cmd/api/testdata/src/pkg/p2/golden.txt
vendored
@@ -1,7 +1,8 @@
|
||||
pkg p2, func F //deprecated
|
||||
pkg p2, func F() string
|
||||
pkg p2, func F //deprecated
|
||||
pkg p2, func G() Twoer
|
||||
pkg p2, func NewError(string) error
|
||||
pkg p2, type Twoer interface { PackageTwoMeth }
|
||||
pkg p2, type Twoer interface, PackageTwoMeth //deprecated
|
||||
pkg p2, type Twoer interface, PackageTwoMeth()
|
||||
pkg p2, type Twoer interface, PackageTwoMeth //deprecated
|
||||
|
||||
|
||||
6
src/cmd/api/testdata/src/pkg/p4/golden.txt
vendored
6
src/cmd/api/testdata/src/pkg/p4/golden.txt
vendored
@@ -1,6 +1,6 @@
|
||||
pkg p4, func Clone //deprecated
|
||||
pkg p4, func Clone[$0 interface{ ~[]$1 }, $1 interface{}]($0) $0
|
||||
pkg p4, func NewPair[$0 interface{ M }, $1 interface{ ~int }]($0, $1) Pair[$0, $1]
|
||||
pkg p4, method (Pair[$0, $1]) First() $0
|
||||
pkg p4, method (Pair[$0, $1]) Second() $1
|
||||
pkg p4, method (Pair[$0, $1]) First() $0
|
||||
pkg p4, type Pair[$0 interface{ M }, $1 interface{ ~int }] struct
|
||||
pkg p4, func Clone[$0 interface{ ~[]$1 }, $1 interface{}]($0) $0
|
||||
pkg p4, func Clone //deprecated
|
||||
|
||||
@@ -92,8 +92,7 @@ func jumpX86(word string) bool {
|
||||
func jumpRISCV(word string) bool {
|
||||
switch word {
|
||||
case "BEQ", "BEQZ", "BGE", "BGEU", "BGEZ", "BGT", "BGTU", "BGTZ", "BLE", "BLEU", "BLEZ",
|
||||
"BLT", "BLTU", "BLTZ", "BNE", "BNEZ", "CALL", "CBEQZ", "CBNEZ", "CJ", "CJALR", "CJR",
|
||||
"JAL", "JALR", "JMP":
|
||||
"BLT", "BLTU", "BLTZ", "BNE", "BNEZ", "CALL", "JAL", "JALR", "JMP":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
@@ -521,27 +520,15 @@ func archLoong64(linkArch *obj.LinkArch) *Arch {
|
||||
for i := loong64.REG_R0; i <= loong64.REG_R31; i++ {
|
||||
register[obj.Rconv(i)] = int16(i)
|
||||
}
|
||||
|
||||
for i := loong64.REG_F0; i <= loong64.REG_F31; i++ {
|
||||
register[obj.Rconv(i)] = int16(i)
|
||||
}
|
||||
|
||||
for i := loong64.REG_FCSR0; i <= loong64.REG_FCSR31; i++ {
|
||||
register[obj.Rconv(i)] = int16(i)
|
||||
}
|
||||
|
||||
for i := loong64.REG_FCC0; i <= loong64.REG_FCC31; i++ {
|
||||
register[obj.Rconv(i)] = int16(i)
|
||||
}
|
||||
|
||||
for i := loong64.REG_V0; i <= loong64.REG_V31; i++ {
|
||||
register[obj.Rconv(i)] = int16(i)
|
||||
}
|
||||
|
||||
for i := loong64.REG_X0; i <= loong64.REG_X31; i++ {
|
||||
register[obj.Rconv(i)] = int16(i)
|
||||
}
|
||||
|
||||
// Pseudo-registers.
|
||||
register["SB"] = RSB
|
||||
register["FP"] = RFP
|
||||
@@ -554,8 +541,6 @@ func archLoong64(linkArch *obj.LinkArch) *Arch {
|
||||
"FCSR": true,
|
||||
"FCC": true,
|
||||
"R": true,
|
||||
"V": true,
|
||||
"X": true,
|
||||
}
|
||||
|
||||
instructions := make(map[string]obj.As)
|
||||
@@ -601,10 +586,6 @@ func archRISCV64(shared bool) *Arch {
|
||||
name := fmt.Sprintf("F%d", i-riscv.REG_F0)
|
||||
register[name] = int16(i)
|
||||
}
|
||||
for i := riscv.REG_V0; i <= riscv.REG_V31; i++ {
|
||||
name := fmt.Sprintf("V%d", i-riscv.REG_V0)
|
||||
register[name] = int16(i)
|
||||
}
|
||||
|
||||
// General registers with ABI names.
|
||||
register["ZERO"] = riscv.REG_ZERO
|
||||
|
||||
@@ -101,7 +101,7 @@ func IsARMCMP(op obj.As) bool {
|
||||
// one of the STREX-like instructions that require special handling.
|
||||
func IsARMSTREX(op obj.As) bool {
|
||||
switch op {
|
||||
case arm.ASTREX, arm.ASTREXD, arm.ASTREXB, arm.ASWPW, arm.ASWPBU:
|
||||
case arm.ASTREX, arm.ASTREXD, arm.ASWPW, arm.ASWPBU:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
||||
@@ -59,10 +59,10 @@ func jumpArm64(word string) bool {
|
||||
|
||||
var arm64SpecialOperand map[string]arm64.SpecialOperand
|
||||
|
||||
// ARM64SpecialOperand returns the internal representation of a special operand.
|
||||
func ARM64SpecialOperand(name string) arm64.SpecialOperand {
|
||||
// GetARM64SpecialOperand returns the internal representation of a special operand.
|
||||
func GetARM64SpecialOperand(name string) arm64.SpecialOperand {
|
||||
if arm64SpecialOperand == nil {
|
||||
// Generate mapping when function is first called.
|
||||
// Generate the mapping automatically when the first time the function is called.
|
||||
arm64SpecialOperand = map[string]arm64.SpecialOperand{}
|
||||
for opd := arm64.SPOP_BEGIN; opd < arm64.SPOP_END; opd++ {
|
||||
arm64SpecialOperand[opd.String()] = opd
|
||||
@@ -195,6 +195,149 @@ func ARM64RegisterShift(reg, op, count int16) (int64, error) {
|
||||
return int64(reg&31)<<16 | int64(op)<<22 | int64(uint16(count)), nil
|
||||
}
|
||||
|
||||
// ARM64RegisterExtension constructs an ARM64 register with extension or arrangement.
|
||||
func ARM64RegisterExtension(a *obj.Addr, ext string, reg, num int16, isAmount, isIndex bool) error {
|
||||
Rnum := (reg & 31) + int16(num<<5)
|
||||
if isAmount {
|
||||
if num < 0 || num > 7 {
|
||||
return errors.New("index shift amount is out of range")
|
||||
}
|
||||
}
|
||||
if reg <= arm64.REG_R31 && reg >= arm64.REG_R0 {
|
||||
if !isAmount {
|
||||
return errors.New("invalid register extension")
|
||||
}
|
||||
switch ext {
|
||||
case "UXTB":
|
||||
if a.Type == obj.TYPE_MEM {
|
||||
return errors.New("invalid shift for the register offset addressing mode")
|
||||
}
|
||||
a.Reg = arm64.REG_UXTB + Rnum
|
||||
case "UXTH":
|
||||
if a.Type == obj.TYPE_MEM {
|
||||
return errors.New("invalid shift for the register offset addressing mode")
|
||||
}
|
||||
a.Reg = arm64.REG_UXTH + Rnum
|
||||
case "UXTW":
|
||||
// effective address of memory is a base register value and an offset register value.
|
||||
if a.Type == obj.TYPE_MEM {
|
||||
a.Index = arm64.REG_UXTW + Rnum
|
||||
} else {
|
||||
a.Reg = arm64.REG_UXTW + Rnum
|
||||
}
|
||||
case "UXTX":
|
||||
if a.Type == obj.TYPE_MEM {
|
||||
return errors.New("invalid shift for the register offset addressing mode")
|
||||
}
|
||||
a.Reg = arm64.REG_UXTX + Rnum
|
||||
case "SXTB":
|
||||
if a.Type == obj.TYPE_MEM {
|
||||
return errors.New("invalid shift for the register offset addressing mode")
|
||||
}
|
||||
a.Reg = arm64.REG_SXTB + Rnum
|
||||
case "SXTH":
|
||||
if a.Type == obj.TYPE_MEM {
|
||||
return errors.New("invalid shift for the register offset addressing mode")
|
||||
}
|
||||
a.Reg = arm64.REG_SXTH + Rnum
|
||||
case "SXTW":
|
||||
if a.Type == obj.TYPE_MEM {
|
||||
a.Index = arm64.REG_SXTW + Rnum
|
||||
} else {
|
||||
a.Reg = arm64.REG_SXTW + Rnum
|
||||
}
|
||||
case "SXTX":
|
||||
if a.Type == obj.TYPE_MEM {
|
||||
a.Index = arm64.REG_SXTX + Rnum
|
||||
} else {
|
||||
a.Reg = arm64.REG_SXTX + Rnum
|
||||
}
|
||||
case "LSL":
|
||||
a.Index = arm64.REG_LSL + Rnum
|
||||
default:
|
||||
return errors.New("unsupported general register extension type: " + ext)
|
||||
|
||||
}
|
||||
} else if reg <= arm64.REG_V31 && reg >= arm64.REG_V0 {
|
||||
switch ext {
|
||||
case "B8":
|
||||
if isIndex {
|
||||
return errors.New("invalid register extension")
|
||||
}
|
||||
a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_8B & 15) << 5)
|
||||
case "B16":
|
||||
if isIndex {
|
||||
return errors.New("invalid register extension")
|
||||
}
|
||||
a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_16B & 15) << 5)
|
||||
case "H4":
|
||||
if isIndex {
|
||||
return errors.New("invalid register extension")
|
||||
}
|
||||
a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_4H & 15) << 5)
|
||||
case "H8":
|
||||
if isIndex {
|
||||
return errors.New("invalid register extension")
|
||||
}
|
||||
a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_8H & 15) << 5)
|
||||
case "S2":
|
||||
if isIndex {
|
||||
return errors.New("invalid register extension")
|
||||
}
|
||||
a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_2S & 15) << 5)
|
||||
case "S4":
|
||||
if isIndex {
|
||||
return errors.New("invalid register extension")
|
||||
}
|
||||
a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_4S & 15) << 5)
|
||||
case "D1":
|
||||
if isIndex {
|
||||
return errors.New("invalid register extension")
|
||||
}
|
||||
a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_1D & 15) << 5)
|
||||
case "D2":
|
||||
if isIndex {
|
||||
return errors.New("invalid register extension")
|
||||
}
|
||||
a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_2D & 15) << 5)
|
||||
case "Q1":
|
||||
if isIndex {
|
||||
return errors.New("invalid register extension")
|
||||
}
|
||||
a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_1Q & 15) << 5)
|
||||
case "B":
|
||||
if !isIndex {
|
||||
return nil
|
||||
}
|
||||
a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_B & 15) << 5)
|
||||
a.Index = num
|
||||
case "H":
|
||||
if !isIndex {
|
||||
return nil
|
||||
}
|
||||
a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_H & 15) << 5)
|
||||
a.Index = num
|
||||
case "S":
|
||||
if !isIndex {
|
||||
return nil
|
||||
}
|
||||
a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_S & 15) << 5)
|
||||
a.Index = num
|
||||
case "D":
|
||||
if !isIndex {
|
||||
return nil
|
||||
}
|
||||
a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_D & 15) << 5)
|
||||
a.Index = num
|
||||
default:
|
||||
return errors.New("unsupported simd register extension type: " + ext)
|
||||
}
|
||||
} else {
|
||||
return errors.New("invalid register and extension combination")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ARM64RegisterArrangement constructs an ARM64 vector register arrangement.
|
||||
func ARM64RegisterArrangement(reg int16, name, arng string) (int64, error) {
|
||||
var curQ, curSize uint16
|
||||
|
||||
@@ -11,8 +11,6 @@ package arch
|
||||
import (
|
||||
"cmd/internal/obj"
|
||||
"cmd/internal/obj/loong64"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func jumpLoong64(word string) bool {
|
||||
@@ -23,6 +21,29 @@ func jumpLoong64(word string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsLoong64CMP reports whether the op (as defined by an loong64.A* constant) is
|
||||
// one of the CMP instructions that require special handling.
|
||||
func IsLoong64CMP(op obj.As) bool {
|
||||
switch op {
|
||||
case loong64.ACMPEQF, loong64.ACMPEQD, loong64.ACMPGEF, loong64.ACMPGED,
|
||||
loong64.ACMPGTF, loong64.ACMPGTD:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsLoong64MUL reports whether the op (as defined by an loong64.A* constant) is
|
||||
// one of the MUL/DIV/REM instructions that require special handling.
|
||||
func IsLoong64MUL(op obj.As) bool {
|
||||
switch op {
|
||||
case loong64.AMUL, loong64.AMULU, loong64.AMULV, loong64.AMULVU,
|
||||
loong64.ADIV, loong64.ADIVU, loong64.ADIVV, loong64.ADIVVU,
|
||||
loong64.AREM, loong64.AREMU, loong64.AREMV, loong64.AREMVU:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsLoong64RDTIME reports whether the op (as defined by an loong64.A*
|
||||
// constant) is one of the RDTIMELW/RDTIMEHW/RDTIMED instructions that
|
||||
// require special handling.
|
||||
@@ -34,94 +55,10 @@ func IsLoong64RDTIME(op obj.As) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func IsLoong64PRELD(op obj.As) bool {
|
||||
switch op {
|
||||
case loong64.APRELD, loong64.APRELDX:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func IsLoong64AMO(op obj.As) bool {
|
||||
return loong64.IsAtomicInst(op)
|
||||
}
|
||||
|
||||
var loong64ElemExtMap = map[string]int16{
|
||||
"B": loong64.ARNG_B,
|
||||
"H": loong64.ARNG_H,
|
||||
"W": loong64.ARNG_W,
|
||||
"V": loong64.ARNG_V,
|
||||
"BU": loong64.ARNG_BU,
|
||||
"HU": loong64.ARNG_HU,
|
||||
"WU": loong64.ARNG_WU,
|
||||
"VU": loong64.ARNG_VU,
|
||||
}
|
||||
|
||||
var loong64LsxArngExtMap = map[string]int16{
|
||||
"B16": loong64.ARNG_16B,
|
||||
"H8": loong64.ARNG_8H,
|
||||
"W4": loong64.ARNG_4W,
|
||||
"V2": loong64.ARNG_2V,
|
||||
}
|
||||
|
||||
var loong64LasxArngExtMap = map[string]int16{
|
||||
"B32": loong64.ARNG_32B,
|
||||
"H16": loong64.ARNG_16H,
|
||||
"W8": loong64.ARNG_8W,
|
||||
"V4": loong64.ARNG_4V,
|
||||
"Q2": loong64.ARNG_2Q,
|
||||
}
|
||||
|
||||
// Loong64RegisterExtension constructs an Loong64 register with extension or arrangement.
|
||||
func Loong64RegisterExtension(a *obj.Addr, ext string, reg, num int16, isAmount, isIndex bool) error {
|
||||
var ok bool
|
||||
var arng_type int16
|
||||
var simd_type int16
|
||||
|
||||
switch {
|
||||
case reg >= loong64.REG_V0 && reg <= loong64.REG_V31:
|
||||
simd_type = loong64.LSX
|
||||
case reg >= loong64.REG_X0 && reg <= loong64.REG_X31:
|
||||
simd_type = loong64.LASX
|
||||
default:
|
||||
return errors.New("Loong64 extension: invalid LSX/LASX register: " + fmt.Sprintf("%d", reg))
|
||||
}
|
||||
|
||||
if isIndex {
|
||||
arng_type, ok = loong64ElemExtMap[ext]
|
||||
if !ok {
|
||||
return errors.New("Loong64 extension: invalid LSX/LASX arrangement type: " + ext)
|
||||
}
|
||||
|
||||
a.Reg = loong64.REG_ELEM
|
||||
a.Reg += ((reg & loong64.EXT_REG_MASK) << loong64.EXT_REG_SHIFT)
|
||||
a.Reg += ((arng_type & loong64.EXT_TYPE_MASK) << loong64.EXT_TYPE_SHIFT)
|
||||
a.Reg += ((simd_type & loong64.EXT_SIMDTYPE_MASK) << loong64.EXT_SIMDTYPE_SHIFT)
|
||||
a.Index = num
|
||||
} else {
|
||||
switch simd_type {
|
||||
case loong64.LSX:
|
||||
arng_type, ok = loong64LsxArngExtMap[ext]
|
||||
if !ok {
|
||||
return errors.New("Loong64 extension: invalid LSX arrangement type: " + ext)
|
||||
}
|
||||
|
||||
case loong64.LASX:
|
||||
arng_type, ok = loong64LasxArngExtMap[ext]
|
||||
if !ok {
|
||||
return errors.New("Loong64 extension: invalid LASX arrangement type: " + ext)
|
||||
}
|
||||
}
|
||||
|
||||
a.Reg = loong64.REG_ARNG
|
||||
a.Reg += ((reg & loong64.EXT_REG_MASK) << loong64.EXT_REG_SHIFT)
|
||||
a.Reg += ((arng_type & loong64.EXT_TYPE_MASK) << loong64.EXT_TYPE_SHIFT)
|
||||
a.Reg += ((simd_type & loong64.EXT_SIMDTYPE_MASK) << loong64.EXT_SIMDTYPE_SHIFT)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func loong64RegisterNumber(name string, n int16) (int16, bool) {
|
||||
switch name {
|
||||
case "F":
|
||||
@@ -140,14 +77,6 @@ func loong64RegisterNumber(name string, n int16) (int16, bool) {
|
||||
if 0 <= n && n <= 31 {
|
||||
return loong64.REG_R0 + n, true
|
||||
}
|
||||
case "V":
|
||||
if 0 <= n && n <= 31 {
|
||||
return loong64.REG_V0 + n, true
|
||||
}
|
||||
case "X":
|
||||
if 0 <= n && n <= 31 {
|
||||
return loong64.REG_X0 + n, true
|
||||
}
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ func jumpPPC64(word string) bool {
|
||||
// one of the CMP instructions that require special handling.
|
||||
func IsPPC64CMP(op obj.As) bool {
|
||||
switch op {
|
||||
case ppc64.ACMP, ppc64.ACMPU, ppc64.ACMPW, ppc64.ACMPWU, ppc64.AFCMPO, ppc64.AFCMPU, ppc64.ADCMPO, ppc64.ADCMPU, ppc64.ADCMPOQ, ppc64.ADCMPUQ:
|
||||
case ppc64.ACMP, ppc64.ACMPU, ppc64.ACMPW, ppc64.ACMPWU, ppc64.AFCMPO, ppc64.AFCMPU:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
||||
@@ -11,11 +11,11 @@ package arch
|
||||
import (
|
||||
"cmd/internal/obj"
|
||||
"cmd/internal/obj/riscv"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// IsRISCV64AMO reports whether op is an AMO instruction that requires
|
||||
// special handling.
|
||||
// IsRISCV64AMO reports whether the op (as defined by a riscv.A*
|
||||
// constant) is one of the AMO instructions that requires special
|
||||
// handling.
|
||||
func IsRISCV64AMO(op obj.As) bool {
|
||||
switch op {
|
||||
case riscv.ASCW, riscv.ASCD, riscv.AAMOSWAPW, riscv.AAMOSWAPD, riscv.AAMOADDW, riscv.AAMOADDD,
|
||||
@@ -26,59 +26,3 @@ func IsRISCV64AMO(op obj.As) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsRISCV64VTypeI reports whether op is a vtype immediate instruction that
|
||||
// requires special handling.
|
||||
func IsRISCV64VTypeI(op obj.As) bool {
|
||||
return op == riscv.AVSETVLI || op == riscv.AVSETIVLI
|
||||
}
|
||||
|
||||
// IsRISCV64CSRO reports whether the op is an instruction that uses
|
||||
// CSR symbolic names and whether that instruction expects a register
|
||||
// or an immediate source operand.
|
||||
func IsRISCV64CSRO(op obj.As) (imm bool, ok bool) {
|
||||
switch op {
|
||||
case riscv.ACSRRCI, riscv.ACSRRSI, riscv.ACSRRWI:
|
||||
imm = true
|
||||
fallthrough
|
||||
case riscv.ACSRRC, riscv.ACSRRS, riscv.ACSRRW:
|
||||
ok = true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var riscv64SpecialOperand map[string]riscv.SpecialOperand
|
||||
|
||||
// RISCV64SpecialOperand returns the internal representation of a special operand.
|
||||
func RISCV64SpecialOperand(name string) riscv.SpecialOperand {
|
||||
if riscv64SpecialOperand == nil {
|
||||
// Generate mapping when function is first called.
|
||||
riscv64SpecialOperand = map[string]riscv.SpecialOperand{}
|
||||
for opd := riscv.SPOP_RVV_BEGIN; opd < riscv.SPOP_RVV_END; opd++ {
|
||||
riscv64SpecialOperand[opd.String()] = opd
|
||||
}
|
||||
// Add the CSRs
|
||||
for csrCode, csrName := range riscv.CSRs {
|
||||
// The set of RVV special operand names and the set of CSR special operands
|
||||
// names are disjoint and so can safely share a single namespace. However,
|
||||
// it's possible that a future update to the CSRs in inst.go could introduce
|
||||
// a conflict. This check ensures that such a conflict does not go
|
||||
// unnoticed.
|
||||
if _, ok := riscv64SpecialOperand[csrName]; ok {
|
||||
panic(fmt.Sprintf("riscv64 special operand %q redefined", csrName))
|
||||
}
|
||||
riscv64SpecialOperand[csrName] = riscv.SpecialOperand(int(csrCode) + int(riscv.SPOP_CSR_BEGIN))
|
||||
}
|
||||
}
|
||||
if opd, ok := riscv64SpecialOperand[name]; ok {
|
||||
return opd
|
||||
}
|
||||
return riscv.SPOP_END
|
||||
}
|
||||
|
||||
// RISCV64ValidateVectorType reports whether the given configuration is a
|
||||
// valid vector type.
|
||||
func RISCV64ValidateVectorType(vsew, vlmul, vtail, vmask int64) error {
|
||||
_, err := riscv.EncodeVectorType(vsew, vlmul, vtail, vmask)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -248,7 +248,7 @@ func (p *Parser) asmData(operands [][]lex.Token) {
|
||||
case obj.TYPE_CONST:
|
||||
switch sz {
|
||||
case 1, 2, 4, 8:
|
||||
nameAddr.Sym.WriteInt(p.ctxt, nameAddr.Offset, sz, valueAddr.Offset)
|
||||
nameAddr.Sym.WriteInt(p.ctxt, nameAddr.Offset, int(sz), valueAddr.Offset)
|
||||
default:
|
||||
p.errorf("bad int size for DATA argument: %d", sz)
|
||||
}
|
||||
@@ -262,10 +262,10 @@ func (p *Parser) asmData(operands [][]lex.Token) {
|
||||
p.errorf("bad float size for DATA argument: %d", sz)
|
||||
}
|
||||
case obj.TYPE_SCONST:
|
||||
nameAddr.Sym.WriteString(p.ctxt, nameAddr.Offset, sz, valueAddr.Val.(string))
|
||||
nameAddr.Sym.WriteString(p.ctxt, nameAddr.Offset, int(sz), valueAddr.Val.(string))
|
||||
case obj.TYPE_ADDR:
|
||||
if sz == p.arch.PtrSize {
|
||||
nameAddr.Sym.WriteAddr(p.ctxt, nameAddr.Offset, sz, valueAddr.Sym, valueAddr.Offset)
|
||||
nameAddr.Sym.WriteAddr(p.ctxt, nameAddr.Offset, int(sz), valueAddr.Sym, valueAddr.Offset)
|
||||
} else {
|
||||
p.errorf("bad addr size for DATA argument: %d", sz)
|
||||
}
|
||||
@@ -353,7 +353,6 @@ func (p *Parser) asmPCAlign(operands [][]lex.Token) {
|
||||
prog := &obj.Prog{
|
||||
Ctxt: p.ctxt,
|
||||
As: obj.APCALIGN,
|
||||
Pos: p.pos(),
|
||||
From: key,
|
||||
}
|
||||
p.append(prog, "", true)
|
||||
@@ -643,6 +642,12 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) {
|
||||
break
|
||||
}
|
||||
} else if p.arch.Family == sys.Loong64 {
|
||||
if arch.IsLoong64CMP(op) {
|
||||
prog.From = a[0]
|
||||
prog.Reg = p.getRegister(prog, op, &a[1])
|
||||
break
|
||||
}
|
||||
|
||||
if arch.IsLoong64RDTIME(op) {
|
||||
// The Loong64 RDTIME family of instructions is a bit special,
|
||||
// in that both its register operands are outputs
|
||||
@@ -654,12 +659,6 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) {
|
||||
prog.RegTo2 = a[1].Reg
|
||||
break
|
||||
}
|
||||
|
||||
if arch.IsLoong64PRELD(op) {
|
||||
prog.From = a[0]
|
||||
prog.AddRestSource(a[1])
|
||||
break
|
||||
}
|
||||
}
|
||||
prog.From = a[0]
|
||||
prog.To = a[1]
|
||||
@@ -676,11 +675,6 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) {
|
||||
prog.From = a[0]
|
||||
prog.To = a[1]
|
||||
prog.RegTo2 = a[2].Reg
|
||||
|
||||
case arch.IsLoong64PRELD(op):
|
||||
prog.From = a[0]
|
||||
prog.AddRestSourceArgs([]obj.Addr{a[1], a[2]})
|
||||
|
||||
default:
|
||||
prog.From = a[0]
|
||||
prog.Reg = p.getRegister(prog, op, &a[1])
|
||||
@@ -782,21 +776,6 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) {
|
||||
prog.RegTo2 = a[2].Reg
|
||||
break
|
||||
}
|
||||
// RISCV64 instructions that reference CSRs with symbolic names.
|
||||
if isImm, ok := arch.IsRISCV64CSRO(op); ok {
|
||||
if a[0].Type != obj.TYPE_CONST && isImm {
|
||||
p.errorf("invalid value for first operand to %s instruction, must be a 5 bit unsigned immediate", op)
|
||||
return
|
||||
}
|
||||
if a[1].Type != obj.TYPE_SPECIAL {
|
||||
p.errorf("invalid value for second operand to %s instruction, must be a CSR name", op)
|
||||
return
|
||||
}
|
||||
prog.AddRestSourceArgs([]obj.Addr{a[1]})
|
||||
prog.From = a[0]
|
||||
prog.To = a[2]
|
||||
break
|
||||
}
|
||||
prog.From = a[0]
|
||||
prog.Reg = p.getRegister(prog, op, &a[1])
|
||||
prog.To = a[2]
|
||||
@@ -849,13 +828,6 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) {
|
||||
prog.To = a[3]
|
||||
break
|
||||
}
|
||||
if p.arch.Family == sys.Loong64 {
|
||||
prog.From = a[0]
|
||||
prog.Reg = p.getRegister(prog, op, &a[1])
|
||||
prog.AddRestSource(a[2])
|
||||
prog.To = a[3]
|
||||
break
|
||||
}
|
||||
if p.arch.Family == sys.PPC64 {
|
||||
prog.From = a[0]
|
||||
prog.To = a[3]
|
||||
@@ -941,19 +913,6 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) {
|
||||
prog.To = a[5]
|
||||
break
|
||||
}
|
||||
if p.arch.Family == sys.RISCV64 && arch.IsRISCV64VTypeI(op) {
|
||||
prog.From = a[0]
|
||||
vsew := p.getSpecial(prog, op, &a[1])
|
||||
vlmul := p.getSpecial(prog, op, &a[2])
|
||||
vtail := p.getSpecial(prog, op, &a[3])
|
||||
vmask := p.getSpecial(prog, op, &a[4])
|
||||
if err := arch.RISCV64ValidateVectorType(vsew, vlmul, vtail, vmask); err != nil {
|
||||
p.errorf("invalid vtype: %v", err)
|
||||
}
|
||||
prog.AddRestSourceArgs([]obj.Addr{a[1], a[2], a[3], a[4]})
|
||||
prog.To = a[5]
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
p.errorf("can't handle %s instruction with %d operands", op, len(a))
|
||||
@@ -989,6 +948,14 @@ func (p *Parser) getConstant(prog *obj.Prog, op obj.As, addr *obj.Addr) int64 {
|
||||
return addr.Offset
|
||||
}
|
||||
|
||||
// getImmediate checks that addr represents an immediate constant and returns its value.
|
||||
func (p *Parser) getImmediate(prog *obj.Prog, op obj.As, addr *obj.Addr) int64 {
|
||||
if addr.Type != obj.TYPE_CONST || addr.Name != 0 || addr.Reg != 0 || addr.Index != 0 {
|
||||
p.errorf("%s: expected immediate constant; found %s", op, obj.Dconv(prog, addr))
|
||||
}
|
||||
return addr.Offset
|
||||
}
|
||||
|
||||
// getRegister checks that addr represents a register and returns its value.
|
||||
func (p *Parser) getRegister(prog *obj.Prog, op obj.As, addr *obj.Addr) int16 {
|
||||
if addr.Type != obj.TYPE_REG || addr.Offset != 0 || addr.Name != 0 || addr.Index != 0 {
|
||||
@@ -996,11 +963,3 @@ func (p *Parser) getRegister(prog *obj.Prog, op obj.As, addr *obj.Addr) int16 {
|
||||
}
|
||||
return addr.Reg
|
||||
}
|
||||
|
||||
// getSpecial checks that addr represents a special operand and returns its value.
|
||||
func (p *Parser) getSpecial(prog *obj.Prog, op obj.As, addr *obj.Addr) int64 {
|
||||
if addr.Type != obj.TYPE_SPECIAL || addr.Name != 0 || addr.Reg != 0 || addr.Index != 0 {
|
||||
p.errorf("%s: expected special operand; found %s", op, obj.Dconv(prog, addr))
|
||||
}
|
||||
return addr.Offset
|
||||
}
|
||||
|
||||
@@ -38,7 +38,7 @@ func testEndToEnd(t *testing.T, goarch, file string) {
|
||||
ctxt.IsAsm = true
|
||||
defer ctxt.Bso.Flush()
|
||||
failed := false
|
||||
ctxt.DiagFunc = func(format string, args ...any) {
|
||||
ctxt.DiagFunc = func(format string, args ...interface{}) {
|
||||
failed = true
|
||||
t.Errorf(format, args...)
|
||||
}
|
||||
@@ -193,17 +193,12 @@ Diff:
|
||||
top := pList.Firstpc
|
||||
var text *obj.LSym
|
||||
ok = true
|
||||
ctxt.DiagFunc = func(format string, args ...any) {
|
||||
ctxt.DiagFunc = func(format string, args ...interface{}) {
|
||||
t.Errorf(format, args...)
|
||||
ok = false
|
||||
}
|
||||
obj.Flushplist(ctxt, pList, nil)
|
||||
|
||||
if !ok {
|
||||
// If we've encountered errors, the output is unlikely to be sane.
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
for p := top; p != nil; p = p.Link {
|
||||
if p.As == obj.ATEXT {
|
||||
text = p.From.Sym
|
||||
@@ -299,7 +294,7 @@ func testErrors(t *testing.T, goarch, file string, flags ...string) {
|
||||
failed := false
|
||||
var errBuf bytes.Buffer
|
||||
parser.errorWriter = &errBuf
|
||||
ctxt.DiagFunc = func(format string, args ...any) {
|
||||
ctxt.DiagFunc = func(format string, args ...interface{}) {
|
||||
failed = true
|
||||
s := fmt.Sprintf(format, args...)
|
||||
if !strings.HasSuffix(s, "\n") {
|
||||
@@ -470,16 +465,9 @@ func TestLOONG64Encoder(t *testing.T) {
|
||||
testEndToEnd(t, "loong64", "loong64enc1")
|
||||
testEndToEnd(t, "loong64", "loong64enc2")
|
||||
testEndToEnd(t, "loong64", "loong64enc3")
|
||||
testEndToEnd(t, "loong64", "loong64enc4")
|
||||
testEndToEnd(t, "loong64", "loong64enc5")
|
||||
testEndToEnd(t, "loong64", "loong64enc6")
|
||||
testEndToEnd(t, "loong64", "loong64")
|
||||
}
|
||||
|
||||
func TestLOONG64Errors(t *testing.T) {
|
||||
testErrors(t, "loong64", "loong64error")
|
||||
}
|
||||
|
||||
func TestPPC64EndToEnd(t *testing.T) {
|
||||
defer func(old int) { buildcfg.GOPPC64 = old }(buildcfg.GOPPC64)
|
||||
for _, goppc64 := range []int{8, 9, 10} {
|
||||
@@ -491,35 +479,12 @@ func TestPPC64EndToEnd(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testRISCV64AllProfiles(t *testing.T, testFn func(t *testing.T)) {
|
||||
t.Helper()
|
||||
|
||||
defer func(orig int) { buildcfg.GORISCV64 = orig }(buildcfg.GORISCV64)
|
||||
|
||||
for _, goriscv64 := range []int{20, 22, 23} {
|
||||
t.Run(fmt.Sprintf("rva%vu64", goriscv64), func(t *testing.T) {
|
||||
buildcfg.GORISCV64 = goriscv64
|
||||
testFn(t)
|
||||
})
|
||||
}
|
||||
func TestRISCVEndToEnd(t *testing.T) {
|
||||
testEndToEnd(t, "riscv64", "riscv64")
|
||||
}
|
||||
|
||||
func TestRISCV64EndToEnd(t *testing.T) {
|
||||
testRISCV64AllProfiles(t, func(t *testing.T) {
|
||||
testEndToEnd(t, "riscv64", "riscv64")
|
||||
})
|
||||
}
|
||||
|
||||
func TestRISCV64Errors(t *testing.T) {
|
||||
testRISCV64AllProfiles(t, func(t *testing.T) {
|
||||
testErrors(t, "riscv64", "riscv64error")
|
||||
})
|
||||
}
|
||||
|
||||
func TestRISCV64Validation(t *testing.T) {
|
||||
testRISCV64AllProfiles(t, func(t *testing.T) {
|
||||
testErrors(t, "riscv64", "riscv64validation")
|
||||
})
|
||||
func TestRISCVErrors(t *testing.T) {
|
||||
testErrors(t, "riscv64", "riscv64error")
|
||||
}
|
||||
|
||||
func TestS390XEndToEnd(t *testing.T) {
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"cmd/asm/internal/lex"
|
||||
"cmd/internal/obj"
|
||||
"cmd/internal/obj/arm64"
|
||||
"cmd/internal/obj/riscv"
|
||||
"cmd/internal/obj/x86"
|
||||
"cmd/internal/objabi"
|
||||
"cmd/internal/src"
|
||||
@@ -78,7 +77,7 @@ func NewParser(ctxt *obj.Link, ar *arch.Arch, lexer lex.TokenReader) *Parser {
|
||||
// and turn it into a recoverable panic.
|
||||
var panicOnError bool
|
||||
|
||||
func (p *Parser) errorf(format string, args ...any) {
|
||||
func (p *Parser) errorf(format string, args ...interface{}) {
|
||||
if panicOnError {
|
||||
panic(fmt.Errorf(format, args...))
|
||||
}
|
||||
@@ -90,7 +89,7 @@ func (p *Parser) errorf(format string, args ...any) {
|
||||
if p.lex != nil {
|
||||
// Put file and line information on head of message.
|
||||
format = "%s:%d: " + format + "\n"
|
||||
args = append([]any{p.lex.File(), p.lineNum}, args...)
|
||||
args = append([]interface{}{p.lex.File(), p.lineNum}, args...)
|
||||
}
|
||||
fmt.Fprintf(p.errorWriter, format, args...)
|
||||
p.errorCount++
|
||||
@@ -218,8 +217,8 @@ next:
|
||||
for {
|
||||
tok = p.nextToken()
|
||||
if len(operands) == 0 && len(items) == 0 {
|
||||
if p.arch.InFamily(sys.ARM, sys.ARM64, sys.AMD64, sys.I386, sys.Loong64, sys.RISCV64) && tok == '.' {
|
||||
// Suffixes: ARM conditionals, Loong64 vector instructions, RISCV rounding mode or x86 modifiers.
|
||||
if p.arch.InFamily(sys.ARM, sys.ARM64, sys.AMD64, sys.I386, sys.RISCV64) && tok == '.' {
|
||||
// Suffixes: ARM conditionals, RISCV rounding mode or x86 modifiers.
|
||||
tok = p.nextToken()
|
||||
str := p.lex.Text()
|
||||
if tok != scanner.Ident {
|
||||
@@ -399,21 +398,16 @@ func (p *Parser) operand(a *obj.Addr) {
|
||||
tok := p.next()
|
||||
name := tok.String()
|
||||
if tok.ScanToken == scanner.Ident && !p.atStartOfRegister(name) {
|
||||
// See if this is an architecture specific special operand.
|
||||
switch p.arch.Family {
|
||||
case sys.ARM64:
|
||||
if opd := arch.ARM64SpecialOperand(name); opd != arm64.SPOP_END {
|
||||
// arm64 special operands.
|
||||
if opd := arch.GetARM64SpecialOperand(name); opd != arm64.SPOP_END {
|
||||
a.Type = obj.TYPE_SPECIAL
|
||||
a.Offset = int64(opd)
|
||||
break
|
||||
}
|
||||
case sys.RISCV64:
|
||||
if opd := arch.RISCV64SpecialOperand(name); opd != riscv.SPOP_END {
|
||||
a.Type = obj.TYPE_SPECIAL
|
||||
a.Offset = int64(opd)
|
||||
}
|
||||
}
|
||||
|
||||
if a.Type != obj.TYPE_SPECIAL {
|
||||
fallthrough
|
||||
default:
|
||||
// We have a symbol. Parse $sym±offset(symkind)
|
||||
p.symbolReference(a, p.qualifySymbol(name), prefix)
|
||||
}
|
||||
@@ -576,13 +570,12 @@ func (p *Parser) atRegisterShift() bool {
|
||||
// atRegisterExtension reports whether we are at the start of an ARM64 extended register.
|
||||
// We have consumed the register or R prefix.
|
||||
func (p *Parser) atRegisterExtension() bool {
|
||||
switch p.arch.Family {
|
||||
case sys.ARM64, sys.Loong64:
|
||||
// R1.xxx
|
||||
return p.peek() == '.'
|
||||
default:
|
||||
// ARM64 only.
|
||||
if p.arch.Family != sys.ARM64 {
|
||||
return false
|
||||
}
|
||||
// R1.xxx
|
||||
return p.peek() == '.'
|
||||
}
|
||||
|
||||
// registerReference parses a register given either the name, R10, or a parenthesized form, SPR(10).
|
||||
@@ -715,7 +708,7 @@ func (p *Parser) registerShift(name string, prefix rune) int64 {
|
||||
if p.arch.Family == sys.ARM64 {
|
||||
off, err := arch.ARM64RegisterShift(r1, op, count)
|
||||
if err != nil {
|
||||
p.errorf("%v", err)
|
||||
p.errorf(err.Error())
|
||||
}
|
||||
return off
|
||||
} else {
|
||||
@@ -775,14 +768,9 @@ func (p *Parser) registerExtension(a *obj.Addr, name string, prefix rune) {
|
||||
|
||||
switch p.arch.Family {
|
||||
case sys.ARM64:
|
||||
err := arm64.ARM64RegisterExtension(a, ext, reg, num, isAmount, isIndex)
|
||||
err := arch.ARM64RegisterExtension(a, ext, reg, num, isAmount, isIndex)
|
||||
if err != nil {
|
||||
p.errorf("%v", err)
|
||||
}
|
||||
case sys.Loong64:
|
||||
err := arch.Loong64RegisterExtension(a, ext, reg, num, isAmount, isIndex)
|
||||
if err != nil {
|
||||
p.errorf("%v", err)
|
||||
p.errorf(err.Error())
|
||||
}
|
||||
default:
|
||||
p.errorf("register extension not supported on this architecture")
|
||||
@@ -1129,7 +1117,7 @@ ListLoop:
|
||||
ext := tok.String()
|
||||
curArrangement, err := arch.ARM64RegisterArrangement(reg, name, ext)
|
||||
if err != nil {
|
||||
p.errorf("%v", err)
|
||||
p.errorf(err.Error())
|
||||
}
|
||||
if firstReg == -1 {
|
||||
// only record the first register and arrangement
|
||||
@@ -1176,7 +1164,7 @@ ListLoop:
|
||||
case sys.ARM64:
|
||||
offset, err := arch.ARM64RegisterListOffset(firstReg, regCnt, arrangement)
|
||||
if err != nil {
|
||||
p.errorf("%v", err)
|
||||
p.errorf(err.Error())
|
||||
}
|
||||
a.Offset = offset
|
||||
default:
|
||||
|
||||
@@ -169,8 +169,3 @@ TEXT ·a34(SB), 0, $0-0
|
||||
SHLXQ AX, CX, R15
|
||||
ADDQ $1, R15
|
||||
RET
|
||||
|
||||
// Ensure from3 get GOT-rewritten without errors.
|
||||
TEXT ·a35(SB), 0, $0-0
|
||||
VGF2P8AFFINEQB $0, runtime·writeBarrier(SB), Z1, Z1
|
||||
RET
|
||||
|
||||
@@ -1059,7 +1059,5 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0
|
||||
RDPID DX // f30fc7fa
|
||||
RDPID R11 // f3410fc7fb
|
||||
|
||||
ENDBR64 // f30f1efa
|
||||
|
||||
// End of tests.
|
||||
RET
|
||||
|
||||
21
src/cmd/asm/internal/asm/testdata/arm64.s
vendored
21
src/cmd/asm/internal/asm/testdata/arm64.s
vendored
@@ -400,8 +400,6 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
|
||||
MOVD $0x11110000, R1 // MOVD $286326784, R1 // 2122a2d2
|
||||
MOVD $0xaaaa0000aaaa1111, R1 // MOVD $-6149102338357718767, R1 // 212282d24155b5f24155f5f2
|
||||
MOVD $0x1111ffff1111aaaa, R1 // MOVD $1230045644216969898, R1 // a1aa8a922122a2f22122e2f2
|
||||
MOVD $0xaaaaaaaaaaaaaaab, R1 // MOVD $-6148914691236517205, R1 // e1f301b2615595f2
|
||||
MOVD $0x0ff019940ff00ff0, R1 // MOVD $1148446028692721648, R1 // e19f0cb28132c3f2
|
||||
MOVD $0, R1 // e1031faa
|
||||
MOVD $-1, R1 // 01008092
|
||||
MOVD $0x210000, R0 // MOVD $2162688, R0 // 2004a0d2
|
||||
@@ -632,8 +630,6 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
|
||||
FMOVS F1, 0x44332211(R2) // FMOVS F1, 1144201745(R2)
|
||||
FMOVD F1, 0x1007000(R2) // FMOVD F1, 16805888(R2)
|
||||
FMOVD F1, 0x44332211(R2) // FMOVD F1, 1144201745(R2)
|
||||
FMOVQ F1, 0x1003000(R2) // FMOVQ F1, 16789504(R2)
|
||||
FMOVQ F1, 0x44332211(R2) // FMOVQ F1, 1144201745(R2)
|
||||
|
||||
MOVB 0x1000000(R1), R2 // MOVB 16777216(R1), R2
|
||||
MOVB 0x44332211(R1), R2 // MOVB 1144201745(R1), R2
|
||||
@@ -647,8 +643,6 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
|
||||
FMOVS 0x44332211(R1), F2 // FMOVS 1144201745(R1), F2
|
||||
FMOVD 0x1000000(R1), F2 // FMOVD 16777216(R1), F2
|
||||
FMOVD 0x44332211(R1), F2 // FMOVD 1144201745(R1), F2
|
||||
FMOVQ 0x1000000(R1), F2 // FMOVQ 16777216(R1), F2
|
||||
FMOVQ 0x44332211(R1), F2 // FMOVQ 1144201745(R1), F2
|
||||
|
||||
// shifted or extended register offset.
|
||||
MOVD (R2)(R6.SXTW), R4 // 44c866f8
|
||||
@@ -1783,7 +1777,6 @@ next:
|
||||
MSR R17, ZCR_EL1 // 111218d5
|
||||
SYS $32768, R1 // 018008d5
|
||||
SYS $32768 // 1f8008d5
|
||||
MSR $1, DIT // 5f4103d5
|
||||
|
||||
// TLBI instruction
|
||||
TLBI VMALLE1IS // 1f8308d5
|
||||
@@ -1894,18 +1887,4 @@ next:
|
||||
DC CIGDVAC, R25 // b97e0bd5
|
||||
DC CVAP, R26 // 3a7c0bd5
|
||||
DC CVADP, R27 // 3b7d0bd5
|
||||
|
||||
// Branch Target Identification
|
||||
BTI C // 5f2403d5
|
||||
BTI J // 9f2403d5
|
||||
BTI JC // df2403d5
|
||||
|
||||
// Pointer Authentication Codes (PAC)
|
||||
PACIASP // 3f2303d5
|
||||
AUTIASP // bf2303d5
|
||||
PACIBSP // 7f2303d5
|
||||
AUTIBSP // ff2303d5
|
||||
AUTIA1716 // 9f2103d5
|
||||
AUTIB1716 // df2103d5
|
||||
|
||||
END
|
||||
|
||||
@@ -420,12 +420,4 @@ TEXT errors(SB),$0
|
||||
AESE V1.B16, V2.B8 // ERROR "invalid arrangement"
|
||||
SHA256SU1 V1.S4, V2.B16, V3.S4 // ERROR "invalid arrangement"
|
||||
SHA1H V1.B16, V2.B16 // ERROR "invalid operands"
|
||||
BTI // ERROR "missing operand"
|
||||
BTI PLDL1KEEP // ERROR "illegal argument"
|
||||
PACIASP C // ERROR "illegal combination"
|
||||
AUTIASP R2 // ERROR "illegal combination"
|
||||
PACIBSP R0 // ERROR "illegal combination"
|
||||
AUTIBSP C // ERROR "illegal combination"
|
||||
AUTIA1716 $45 // ERROR "illegal combination"
|
||||
AUTIB1716 R0 // ERROR "illegal combination"
|
||||
RET
|
||||
|
||||
2
src/cmd/asm/internal/asm/testdata/armerror.s
vendored
2
src/cmd/asm/internal/asm/testdata/armerror.s
vendored
@@ -260,7 +260,5 @@ TEXT errors(SB),$0
|
||||
STREXD R0, (R2), R1 // ERROR "cannot use same register as both source and destination"
|
||||
STREXD R0, (R2), R2 // ERROR "cannot use same register as both source and destination"
|
||||
STREXD R1, (R4), R7 // ERROR "must be even"
|
||||
STREXB R0, (R2), R0 // ERROR "cannot use same register as both source and destination"
|
||||
STREXB R0, (R2), R2 // ERROR "cannot use same register as both source and destination"
|
||||
|
||||
END
|
||||
|
||||
2
src/cmd/asm/internal/asm/testdata/armv6.s
vendored
2
src/cmd/asm/internal/asm/testdata/armv6.s
vendored
@@ -52,10 +52,8 @@ TEXT foo(SB), DUPOK|NOSPLIT, $0
|
||||
MOVDF F4, F5 // c45bb7ee
|
||||
|
||||
LDREX (R8), R9 // 9f9f98e1
|
||||
LDREXB (R11), R12 // 9fcfdbe1
|
||||
LDREXD (R11), R12 // 9fcfbbe1
|
||||
STREX R3, (R4), R5 // STREX (R4), R3, R5 // 935f84e1
|
||||
STREXB R8, (R9), g // STREXB (R9), R8, g // 98afc9e1
|
||||
STREXD R8, (R9), g // STREXD (R9), R8, g // 98afa9e1
|
||||
|
||||
CMPF F8, F9 // c89ab4ee10faf1ee
|
||||
|
||||
1208
src/cmd/asm/internal/asm/testdata/loong64enc1.s
vendored
1208
src/cmd/asm/internal/asm/testdata/loong64enc1.s
vendored
File diff suppressed because it is too large
Load Diff
59
src/cmd/asm/internal/asm/testdata/loong64enc2.s
vendored
59
src/cmd/asm/internal/asm/testdata/loong64enc2.s
vendored
@@ -5,6 +5,8 @@
|
||||
#include "../../../../../runtime/textflag.h"
|
||||
|
||||
TEXT asmtest(SB),DUPOK|NOSPLIT,$0
|
||||
MOVB R4, R5 // 85e04000a5e04800
|
||||
MOVWU R4, R5 // 85804100a5804500
|
||||
MOVW $74565, R4 // 4402001484148d03
|
||||
MOVW $4097, R4 // 2400001484048003
|
||||
MOVV $74565, R4 // 4402001484148d03
|
||||
@@ -12,7 +14,7 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0
|
||||
AND $-1, R4, R5 // 1efcbf0285f81400
|
||||
AND $-1, R4 // 1efcbf0284f81400
|
||||
MOVW $-1, F4 // 1efcbf02c4a71401
|
||||
MOVW $1, F4 // 1e048003c4a71401
|
||||
MOVW $1, F4 // 1e048002c4a71401
|
||||
TEQ $4, R4, R5 // 8508005c04002a00
|
||||
TEQ $4, R4 // 0408005c04002a00
|
||||
TNE $4, R4, R5 // 8508005804002a00
|
||||
@@ -21,10 +23,6 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0
|
||||
ADD $4096, R4, R5 // 3e00001485781000
|
||||
ADD $65536, R4 // 1e02001484781000
|
||||
ADD $4096, R4 // 3e00001484781000
|
||||
ADDW $65536, R4, R5 // 1e02001485781000
|
||||
ADDW $4096, R4, R5 // 3e00001485781000
|
||||
ADDW $65536, R4 // 1e02001484781000
|
||||
ADDW $4096, R4 // 3e00001484781000
|
||||
ADDV $65536, R4, R5 // 1e02001485f81000
|
||||
ADDV $4096, R4, R5 // 3e00001485f81000
|
||||
ADDV $65536, R4 // 1e02001484f81000
|
||||
@@ -41,6 +39,10 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0
|
||||
SGTU $4096, R4, R5 // 3e00001485f81200
|
||||
SGTU $65536, R4 // 1e02001484f81200
|
||||
SGTU $4096, R4 // 3e00001484f81200
|
||||
ADDU $65536, R4, R5 // 1e02001485781000
|
||||
ADDU $4096, R4, R5 // 3e00001485781000
|
||||
ADDU $65536, R4 // 1e02001484781000
|
||||
ADDU $4096, R4 // 3e00001484781000
|
||||
ADDVU $65536, R4, R5 // 1e02001485f81000
|
||||
ADDVU $4096, R4, R5 // 3e00001485f81000
|
||||
ADDVU $65536, R4 // 1e02001484f81000
|
||||
@@ -57,6 +59,7 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0
|
||||
XOR $4096, R4 // 3e00001484f81500
|
||||
XOR $-1, R4, R5 // 1efcbf0285f81500
|
||||
XOR $-1, R4 // 1efcbf0284f81500
|
||||
MOVH R4, R5 // 85c04000a5c04800
|
||||
|
||||
// relocation instructions
|
||||
MOVW R4, name(SB) // 1e00001ac4038029
|
||||
@@ -77,49 +80,3 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0
|
||||
MOVH name(SB), R4 // 1e00001ac4034028
|
||||
MOVHU R4, name(SB) // 1e00001ac4034029
|
||||
MOVHU name(SB), R4 // 1e00001ac403402a
|
||||
|
||||
// MOVV C_DCON12_20S, r
|
||||
MOVV $0x273fffff80000000, R4 // MOVV $2828260563841187840, R4 // 0400001584cc0903
|
||||
MOVV $0xf73fffff80000000, R4 // MOVV $-630503949979353088, R4 // 0400001584cc3d03
|
||||
|
||||
// MOVV C_DCON20S_20, r
|
||||
MOVV $0xfff800000f000000, R4 // MOVV $-2251799562027008, R4 // 04001e1404000017
|
||||
|
||||
// MOVV C_DCON12_12S, r
|
||||
MOVV $0x273ffffffffff800, R4 // MOVV $2828260565988669440, R4 // 0400e00284cc0903
|
||||
MOVV $0xf73ffffffffff800, R4 // MOVV $-630503947831871488, R4 // 0400e00284cc3d03
|
||||
|
||||
// MOVV C_DCON20S_12S, r
|
||||
MOVV $0xfff80000fffff800, R4 // MOVV $-2251795518720000, R4 // 0400a00204000017
|
||||
MOVV $0xfff8000000000000, R4 // MOVV $-2251799813685248, R4 // 0400800204000017
|
||||
|
||||
// MOVV C_DCON12_12U, r
|
||||
MOVV $0x2730000000000800, R4 // MOVV $2823756966361303040, R4 // 0400a00384cc0903
|
||||
MOVV $0xf730000000000800, R4 // MOVV $-635007547459237888, R4 // 0400a00384cc3d03
|
||||
|
||||
// MOVV C_DCON20S_12U, r
|
||||
MOVV $0xfff8000000000800, R4 // MOVV $-2251799813683200, R4 // 0400a00304000017
|
||||
|
||||
// ADDV/AND C_DCON12_0, [r1], r2
|
||||
ADDV $0x3210000000000000, R4 // ADDV $3607383301523767296, R4 // 1e840c0384f81000
|
||||
ADDV $0x3210000000000000, R5, R4 // ADDV $3607383301523767296, R5, R4 // 1e840c03a4f81000
|
||||
ADDV $0xc210000000000000, R4 // ADDV $-4463067230724161536, R4 // 1e84300384f81000
|
||||
ADDV $0xc210000000000000, R5, R4 // ADDV $-4463067230724161536, R5, R4 // 1e843003a4f81000
|
||||
AND $0x3210000000000000, R4 // AND $3607383301523767296, R4 // 1e840c0384f81400
|
||||
AND $0x3210000000000000, R5, R4 // AND $3607383301523767296, R5, R4 // 1e840c03a4f81400
|
||||
AND $0xc210000000000000, R4 // AND $-4463067230724161536, R4 // 1e84300384f81400
|
||||
AND $0xc210000000000000, R5, R4 // AND $-4463067230724161536, R5, R4 // 1e843003a4f81400
|
||||
|
||||
// ADDV/AND C_UCON, [r1], r2
|
||||
ADDV $0x43210000, R4 // ADDV $1126236160, R4 // 1e42861484f81000
|
||||
ADDV $0x43210000, R5, R4 // ADDV $1126236160, R5, R4 // 1e428614a4f81000
|
||||
ADDV $0xffffffffc3210000, R4 // ADDV $-1021247488, R4 // 1e42861584f81000
|
||||
ADDV $0xffffffffc3210000, R5, R4 // ADDV $-1021247488, R5, R4 // 1e428615a4f81000
|
||||
AND $0x43210000, R4 // AND $1126236160, R4 // 1e42861484f81400
|
||||
AND $0x43210000, R5, R4 // AND $1126236160, R5, R4 // 1e428614a4f81400
|
||||
AND $0xffffffffc3210000, R4 // AND $-1021247488, R4 // 1e42861584f81400
|
||||
AND $0xffffffffc3210000, R5, R4 // AND $-1021247488, R5, R4 // 1e428615a4f81400
|
||||
|
||||
// AND C_ADDCON, [r1], r2
|
||||
AND $0xfffffffffffffc21, R4 // AND $-991, R4 // 1e84b00284f81400
|
||||
AND $0xfffffffffffffc21, R5, R4 // AND $-991, R5, R4 // 1e84b002a4f81400
|
||||
|
||||
102
src/cmd/asm/internal/asm/testdata/loong64enc3.s
vendored
102
src/cmd/asm/internal/asm/testdata/loong64enc3.s
vendored
@@ -11,16 +11,12 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0
|
||||
MOVV $4096(R4), R5 // 3e000014de03800385f81000
|
||||
ADD $74565, R4 // 5e020014de178d0384781000
|
||||
ADD $4097, R4 // 3e000014de07800384781000
|
||||
ADDW $74565, R4 // 5e020014de178d0384781000
|
||||
ADDW $4097, R4 // 3e000014de07800384781000
|
||||
ADDV $74565, R4 // 5e020014de178d0384f81000
|
||||
ADDV $4097, R4 // 3e000014de07800384f81000
|
||||
AND $74565, R4 // 5e020014de178d0384f81400
|
||||
AND $4097, R4 // 3e000014de07800384f81400
|
||||
ADD $74565, R4, R5 // 5e020014de178d0385781000
|
||||
ADD $4097, R4, R5 // 3e000014de07800385781000
|
||||
ADDW $74565, R4, R5 // 5e020014de178d0385781000
|
||||
ADDW $4097, R4, R5 // 3e000014de07800385781000
|
||||
ADDV $74565, R4, R5 // 5e020014de178d0385f81000
|
||||
ADDV $4097, R4, R5 // 3e000014de07800385f81000
|
||||
AND $74565, R4, R5 // 5e020014de178d0385f81400
|
||||
@@ -46,10 +42,8 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0
|
||||
MOVB R4, 4096(R5) // 3e000014de971000c4030029
|
||||
MOVBU R4, 65536(R5) // 1e020014de971000c4030029
|
||||
MOVBU R4, 4096(R5) // 3e000014de971000c4030029
|
||||
SC R4, 65536(R5) // 1e040010de971000c4030021
|
||||
SCV R4, 65536(R5) // 1e040010de971000c4030023
|
||||
LL 65536(R5), R4 // 1e040010de971000c4030020
|
||||
LLV 65536(R5), R4 // 1e040010de971000c4030022
|
||||
SC R4, 65536(R5) // 1e020014de971000c4030021
|
||||
SC R4, 4096(R5) // 3e000014de971000c4030021
|
||||
MOVW y+65540(FP), R4 // 1e020014de8f1000c4338028
|
||||
MOVWU y+65540(FP), R4 // 1e020014de8f1000c433802a
|
||||
MOVV y+65540(FP), R4 // 1e020014de8f1000c433c028
|
||||
@@ -70,20 +64,28 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0
|
||||
MOVV 4096(R5), R4 // 3e000014de971000c403c028
|
||||
MOVB 4096(R5), R4 // 3e000014de971000c4030028
|
||||
MOVBU 4096(R5), R4 // 3e000014de971000c403002a
|
||||
MOVW y+65540(FP), F4 // 1e020014de8f1000c433002b
|
||||
MOVF y+65540(FP), F4 // 1e020014de8f1000c433002b
|
||||
MOVD y+65540(FP), F4 // 1e020014de8f1000c433802b
|
||||
MOVW y+4097(FP), F4 // 3e000014de8f1000c427002b
|
||||
MOVF y+4097(FP), F4 // 3e000014de8f1000c427002b
|
||||
MOVD y+4097(FP), F4 // 3e000014de8f1000c427802b
|
||||
MOVW 65536(R5), F4 // 1e020014de971000c403002b
|
||||
MOVF 65536(R5), F4 // 1e020014de971000c403002b
|
||||
MOVD 65536(R5), F4 // 1e020014de971000c403802b
|
||||
MOVW 4096(R5), F4 // 3e000014de971000c403002b
|
||||
MOVF 4096(R5), F4 // 3e000014de971000c403002b
|
||||
MOVD 4096(R5), F4 // 3e000014de971000c403802b
|
||||
MOVW F4, result+65540(FP) // 1e020014de8f1000c433402b
|
||||
MOVF F4, result+65540(FP) // 1e020014de8f1000c433402b
|
||||
MOVD F4, result+65540(FP) // 1e020014de8f1000c433c02b
|
||||
MOVW F4, result+4097(FP) // 3e000014de8f1000c427402b
|
||||
MOVF F4, result+4097(FP) // 3e000014de8f1000c427402b
|
||||
MOVD F4, result+4097(FP) // 3e000014de8f1000c427c02b
|
||||
MOVW F4, 65536(R5) // 1e020014de971000c403402b
|
||||
MOVF F4, 65536(R5) // 1e020014de971000c403402b
|
||||
MOVD F4, 65536(R5) // 1e020014de971000c403c02b
|
||||
MOVW F4, 4096(R5) // 3e000014de971000c403402b
|
||||
MOVF F4, 4096(R5) // 3e000014de971000c403402b
|
||||
MOVD F4, 4096(R5) // 3e000014de971000c403c02b
|
||||
|
||||
@@ -111,6 +113,10 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0
|
||||
SGTU $74565, R4, R5 // 5e020014de178d0385f81200
|
||||
SGTU $4097, R4 // 3e000014de07800384f81200
|
||||
SGTU $4097, R4, R5 // 3e000014de07800385f81200
|
||||
ADDU $74565, R4 // 5e020014de178d0384781000
|
||||
ADDU $74565, R4, R5 // 5e020014de178d0385781000
|
||||
ADDU $4097, R4 // 3e000014de07800384781000
|
||||
ADDU $4097, R4, R5 // 3e000014de07800385781000
|
||||
ADDVU $4097, R4 // 3e000014de07800384f81000
|
||||
ADDVU $4097, R4, R5 // 3e000014de07800385f81000
|
||||
ADDVU $74565, R4 // 5e020014de178d0384f81000
|
||||
@@ -123,83 +129,3 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0
|
||||
XOR $74565, R4, R5 // 5e020014de178d0385f81500
|
||||
XOR $4097, R4 // 3e000014de07800384f81500
|
||||
XOR $4097, R4, R5 // 3e000014de07800385f81500
|
||||
|
||||
MOVWP R5, -32768(R4) // 1efcff13de931000c5038025
|
||||
MOVWP R5, 32768(R4) // 1e000010de931000c5038025
|
||||
MOVWP R5, 65536(R4) // 1e040010de931000c5030025
|
||||
MOVWP R5, 1048576(R4) // 1e400010de931000c5030025
|
||||
MOVVP R5, -32768(R4) // 1efcff13de931000c5038027
|
||||
MOVVP R5, 65536(R4) // 1e040010de931000c5030027
|
||||
MOVVP R5, 1048576(R4) // 1e400010de931000c5030027
|
||||
MOVWP -32768(R5), R4 // 1efcff13de971000c4038024
|
||||
MOVWP 2229248(R5), R4 // 1e880010de971000c4030424
|
||||
MOVWP -2145518592(R5), R4 // 1e740012de971000c403fc24
|
||||
MOVVP -32768(R5), R4 // 1efcff13de971000c4038026
|
||||
MOVVP 2229248(R5), R4 // 1e880010de971000c4030426
|
||||
MOVVP -2145518592(R5), R4 // 1e740012de971000c403fc26
|
||||
|
||||
|
||||
// MOVV C_DCON32_12S, r
|
||||
MOVV $0x27312345fffff800, R4 // MOVV $2824077224892692480, R4 // 0400a002a468241684cc0903
|
||||
MOVV $0xf7312345fffff800, R4 // MOVV $-634687288927848448, R4 // 0400a002a468241684cc3d03
|
||||
|
||||
// MOVV C_DCON32_0, r
|
||||
MOVV $0x2731234500000000, R4 // MOVV $2824077220597727232, R4 // 04008002a468241684cc0903
|
||||
MOVV $0xf731234500000000, R4 // MOVV $-634687293222813696, R4 // 04008002a468241684cc3d03
|
||||
|
||||
// MOVV C_DCON32_20, r
|
||||
MOVV $0x2731234512345000, R4 // MOVV $2824077220903145472, R4 // a4682414a468241684cc0903
|
||||
MOVV $0xf731234512345000, R4 // MOVV $-634687292917395456, R4 // a4682414a468241684cc3d03
|
||||
|
||||
// MOVV C_DCON12_32S, r
|
||||
MOVV $0x273fffff80000800, R4 // MOVV $2828260563841189888, R4 // 040000158400a00384cc0903
|
||||
MOVV $0xf73fffff80000800, R4 // MOVV $-630503949979351040, R4 // 040000158400a00384cc3d03
|
||||
|
||||
// MOVV C_DCON20S_32, r
|
||||
MOVV $0xfff8000080000800, R4 // MOVV $-2251797666199552, R4 // 040000158400a00304000017
|
||||
|
||||
// MOVV C_DCON32_12U, r
|
||||
MOVV $0x2731234500000800, R4 // MOVV $2824077220597729280, R4 // 0400a003a468241684cc0903
|
||||
MOVV $0xf731234500000800, R4 // MOVV $-634687293222811648, R4 // 0400a003a468241684cc3d03
|
||||
|
||||
// ADDV/AND C_DCON12_20S, [r1], r2
|
||||
ADDV $0x273fffff80000000, R4 // ADDV $2828260563841187840, R4 // 1e000015decf090384f81000
|
||||
ADDV $0x273fffff80000000, R4, R5 // ADDV $2828260563841187840, R4, R5 // 1e000015decf090385f81000
|
||||
AND $0x273fffff80000000, R4 // AND $2828260563841187840, R4 // 1e000015decf090384f81400
|
||||
AND $0x273fffff80000000, R4, R5 // AND $2828260563841187840, R4, R5 // 1e000015decf090385f81400
|
||||
|
||||
// ADDV/AND C_DCON20S_20, [r1], r2
|
||||
ADDV $0xfff800000f000000, R4 // ADDV $-2251799562027008, R4 // 1e001e141e00001784f81000
|
||||
ADDV $0xfff800000f000000, R4, R5 // ADDV $-2251799562027008, R4, R5 // 1e001e141e00001785f81000
|
||||
AND $0xfff800000f000000, R4 // AND $-2251799562027008, R4 // 1e001e141e00001784f81400
|
||||
AND $0xfff800000f000000, R4, R5 // AND $-2251799562027008, R4, R5 // 1e001e141e00001785f81400
|
||||
|
||||
// ADDV/AND C_DCON12_12S, [r1], r2
|
||||
ADDV $0x273ffffffffff800, R4 // ADDV $2828260565988669440, R4 // 1e00e002decf090384f81000
|
||||
ADDV $0x273ffffffffff800, R4, R5 // ADDV $2828260565988669440, R4, R5 // 1e00e002decf090385f81000
|
||||
AND $0x273ffffffffff800, R4 // AND $2828260565988669440, R4 // 1e00e002decf090384f81400
|
||||
AND $0x273ffffffffff800, R4, R5 // AND $2828260565988669440, R4, R5 // 1e00e002decf090385f81400
|
||||
|
||||
// ADDV/AND C_DCON20S_12S, [r1], r2
|
||||
ADDV $0xfff80000fffff800, R4 // ADDV $-2251795518720000, R4 // 1e00a0021e00001784f81000
|
||||
ADDV $0xfff80000fffff800, R4, R5 // ADDV $-2251795518720000, R4, R5 // 1e00a0021e00001785f81000
|
||||
AND $0xfff80000fffff800, R4 // AND $-2251795518720000, R4 // 1e00a0021e00001784f81400
|
||||
AND $0xfff80000fffff800, R4, R5 // AND $-2251795518720000, R4, R5 // 1e00a0021e00001785f81400
|
||||
|
||||
// ADDV/AND C_DCON20S_0, [r1], r2
|
||||
ADDV $0xfff8000000000000, R4 // ADDV $-2251799813685248, R4 // 1e0080021e00001784f81000
|
||||
ADDV $0xfff8000000000000, R4, R5 // ADDV $-2251799813685248, R4, R5 // 1e0080021e00001785f81000
|
||||
AND $0xfff8000000000000, R4 // AND $-2251799813685248, R4 // 1e0080021e00001784f81400
|
||||
AND $0xfff8000000000000, R4, R5 // AND $-2251799813685248, R4, R5 // 1e0080021e00001785f81400
|
||||
|
||||
// ADDV/AND C_DCON12_12U, [r1], r2
|
||||
ADDV $0x2730000000000800, R4 // ADDV $2823756966361303040, R4 // 1e00a003decf090384f81000
|
||||
ADDV $0x2730000000000800, R4, R5 // ADDV $2823756966361303040, R4, R5 // 1e00a003decf090385f81000
|
||||
AND $0x2730000000000800, R4 // AND $2823756966361303040, R4 // 1e00a003decf090384f81400
|
||||
AND $0x2730000000000800, R4, R5 // AND $2823756966361303040, R4, R5 // 1e00a003decf090385f81400
|
||||
|
||||
// ADDV/AND C_DCON20S_12U, [r1], r2
|
||||
ADDV $0xfff8000000000800, R4 // ADDV $-2251799813683200, R4 // 1e00a0031e00001784f81000
|
||||
ADDV $0xfff8000000000800, R4, R5 // ADDV $-2251799813683200, R4, R5 // 1e00a0031e00001785f81000
|
||||
AND $0xfff8000000000800, R4 // AND $-2251799813683200, R4 // 1e00a0031e00001784f81400
|
||||
AND $0xfff8000000000800, R4, R5 // AND $-2251799813683200, R4, R5 // 1e00a0031e00001785f81400
|
||||
|
||||
42
src/cmd/asm/internal/asm/testdata/loong64enc4.s
vendored
42
src/cmd/asm/internal/asm/testdata/loong64enc4.s
vendored
@@ -1,42 +0,0 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
#include "../../../../../runtime/textflag.h"
|
||||
|
||||
TEXT asmtest(SB),DUPOK|NOSPLIT,$0
|
||||
// ADDV/AND C_DCON32_12S, [r1], r2
|
||||
ADDV $0x27312345fffff800, R4 // ADDV $2824077224892692480, R4 // 1e00a002be682416decf090384f81000
|
||||
ADDV $0x27312345fffff800, R4, R5 // ADDV $2824077224892692480, R4, R5 // 1e00a002be682416decf090385f81000
|
||||
AND $0x27312345fffff800, R4 // AND $2824077224892692480, R4 // 1e00a002be682416decf090384f81400
|
||||
AND $0x27312345fffff800, R4, R5 // AND $2824077224892692480, R4, R5 // 1e00a002be682416decf090385f81400
|
||||
|
||||
// ADDV/AND C_DCON32_0, [r1], r2
|
||||
ADDV $0x2731234500000000, R4 // ADDV $2824077220597727232, R4 // 1e008002be682416decf090384f81000
|
||||
ADDV $0x2731234500000000, R4, R5 // ADDV $2824077220597727232, R4, R5 // 1e008002be682416decf090385f81000
|
||||
AND $0x2731234500000000, R4 // AND $2824077220597727232, R4 // 1e008002be682416decf090384f81400
|
||||
AND $0x2731234500000000, R4, R5 // AND $2824077220597727232, R4, R5 // 1e008002be682416decf090385f81400
|
||||
|
||||
// ADDV/AND C_DCON32_20, [r1], r2
|
||||
ADDV $0x2731234512345000, R4 // ADDV $2824077220903145472, R4 // be682414be682416decf090384f81000
|
||||
ADDV $0x2731234512345000, R4, R5 // ADDV $2824077220903145472, R4, R5 // be682414be682416decf090385f81000
|
||||
AND $0x2731234512345000, R4 // AND $2824077220903145472, R4 // be682414be682416decf090384f81400
|
||||
AND $0x2731234512345000, R4, R5 // AND $2824077220903145472, R4, R5 // be682414be682416decf090385f81400
|
||||
|
||||
// ADDV/AND C_DCON12_32S, [r1], r2
|
||||
ADDV $0x273fffff80000800, R4 // ADDV $2828260563841189888, R4 // 1e000015de03a003decf090384f81000
|
||||
ADDV $0x273fffff80000800, R4, R5 // ADDV $2828260563841189888, R4, R5 // 1e000015de03a003decf090385f81000
|
||||
AND $0x273fffff80000800, R4 // AND $2828260563841189888, R4 // 1e000015de03a003decf090384f81400
|
||||
AND $0x273fffff80000800, R4, R5 // AND $2828260563841189888, R4, R5 // 1e000015de03a003decf090385f81400
|
||||
|
||||
// ADDV/AND C_DCON20S_32, [r1], r2
|
||||
ADDV $0xfff8000080000800, R4 // ADDV $-2251797666199552, R4 // 1e000015de03a0031e00001784f81000
|
||||
ADDV $0xfff8000080000800, R4, R5 // ADDV $-2251797666199552, R4, R5 // 1e000015de03a0031e00001785f81000
|
||||
AND $0xfff8000080000800, R4 // AND $-2251797666199552, R4 // 1e000015de03a0031e00001784f81400
|
||||
AND $0xfff8000080000800, R4, R5 // AND $-2251797666199552, R4, R5 // 1e000015de03a0031e00001785f81400
|
||||
|
||||
// ADDV/AND C_DCON32_12U, [r1], r2
|
||||
ADDV $0x2731234500000800, R4 // ADDV $2824077220597729280, R4 // 1e00a003be682416decf090384f81000
|
||||
ADDV $0x2731234500000800, R4, R5 // ADDV $2824077220597729280, R4, R5 // 1e00a003be682416decf090385f81000
|
||||
AND $0x2731234500000800, R4 // AND $2824077220597729280, R4 // 1e00a003be682416decf090384f81400
|
||||
AND $0x2731234500000800, R4, R5 // AND $2824077220597729280, R4, R5 // 1e00a003be682416decf090385f81400
|
||||
22
src/cmd/asm/internal/asm/testdata/loong64enc5.s
vendored
22
src/cmd/asm/internal/asm/testdata/loong64enc5.s
vendored
@@ -1,22 +0,0 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
#include "../../../../../runtime/textflag.h"
|
||||
|
||||
TEXT asmtest(SB),DUPOK|NOSPLIT,$0
|
||||
// ADDV/AND C_DCON, [r1], r2
|
||||
ADDV $0xfedcba9876543210, R4 // ADDV $-81985529216486896, R4 // 7ea8ec14de4388031e539717deb73f0384f81000
|
||||
ADDV $0xfedcba9876543210, R5, R4 // ADDV $-81985529216486896, R5, R4 // 7ea8ec14de4388031e539717deb73f03a4f81000
|
||||
ADDV $0x4edcba9876543210, R4 // ADDV $5682621993817747984, R4 // 7ea8ec14de4388031e539717deb7130384f81000
|
||||
ADDV $0x4edcba9876543210, R5, R4 // ADDV $5682621993817747984, R5, R4 // 7ea8ec14de4388031e539717deb71303a4f81000
|
||||
AND $0x4edcba9876543210, R4 // AND $5682621993817747984, R4 // 7ea8ec14de4388031e539717deb7130384f81400
|
||||
AND $0x4edcba9876543210, R5, R4 // AND $5682621993817747984, R5, R4 // 7ea8ec14de4388031e539717deb71303a4f81400
|
||||
AND $0xfedcba9876543210, R4 // AND $-81985529216486896, R4 // 7ea8ec14de4388031e539717deb73f0384f81400
|
||||
AND $0xfedcba9876543210, R5, R4 // AND $-81985529216486896, R5, R4 // 7ea8ec14de4388031e539717deb73f03a4f81400
|
||||
|
||||
PRELDX 0(R7), $0x80001021, $0 // PRELDX (R7), $2147487777, $0 // 1e020014de0380031e000016de130003e0782c38
|
||||
PRELDX -1(R7), $0x1021, $2 // PRELDX -1(R7), $4129, $2 // fe030014deffbf031e000016de030003e2782c38
|
||||
PRELDX 8(R7), $0x80100800, $31 // PRELDX 8(R7), $2148534272, $31 // 1ee00714de238003fe1f0016de130003ff782c38
|
||||
PRELDX 16(R7), $0x202040, $1 // PRELDX 16(R7), $2105408, $1 // 1e200014de4380033e000216de030003e1782c38
|
||||
|
||||
12
src/cmd/asm/internal/asm/testdata/loong64enc6.s
vendored
12
src/cmd/asm/internal/asm/testdata/loong64enc6.s
vendored
@@ -1,12 +0,0 @@
|
||||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
#include "../../../../../runtime/textflag.h"
|
||||
|
||||
TEXT asmtest(SB),DUPOK|NOSPLIT,$0
|
||||
// MOVWP LOREG_64(Rx), Ry
|
||||
MOVWP 81985529216486896(R4), R5 // 9e571315dec3b703feac6816de4b000384f8100085000025
|
||||
MOVWP -81985529216486896(R4), R5 // 7ea8ec14de4388031e539717deb73f0384f8100085000025
|
||||
MOVWP R4, 81985529216486896(R5) // 9e571315dec3b703feac6816de4b0003a5f81000a4000025
|
||||
MOVWP R4, -81985529216486896(R5) // 7ea8ec14de4388031e539717deb73f03a5f81000a4000025
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user