mirror of
https://github.com/golang/go.git
synced 2026-01-30 15:42:04 +03:00
Compare commits
39 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f58c78a577 | ||
|
|
b212ba6829 | ||
|
|
f9cb33c7c9 | ||
|
|
4a842985bf | ||
|
|
f6f024f120 | ||
|
|
18b970277e | ||
|
|
91aa2f190a | ||
|
|
8bdb0b235a | ||
|
|
3a03ddf735 | ||
|
|
644555d34e | ||
|
|
11b64b428b | ||
|
|
2ac3bdf378 | ||
|
|
4925e0766f | ||
|
|
b18ba59aaf | ||
|
|
04242ac88f | ||
|
|
c5c1d069da | ||
|
|
abc4f092ac | ||
|
|
e79c297fa8 | ||
|
|
21a4e67ad5 | ||
|
|
328cf2e8b2 | ||
|
|
fcce86c4cf | ||
|
|
36c171763e | ||
|
|
678b07d5e5 | ||
|
|
14c9b1e00b | ||
|
|
c35f8a37d9 | ||
|
|
47a57bc4f0 | ||
|
|
2d97a87287 | ||
|
|
3969694203 | ||
|
|
1dd24caf08 | ||
|
|
ec5170397c | ||
|
|
fdd6dfd507 | ||
|
|
f7b9470992 | ||
|
|
4397d66bdd | ||
|
|
eb5a7b5050 | ||
|
|
72ab3ff68b | ||
|
|
c3ccb77d1e | ||
|
|
c3b47cb598 | ||
|
|
ddfd72f7d1 | ||
|
|
d6f4d9a2be |
1
AUTHORS
1
AUTHORS
@@ -1479,7 +1479,6 @@ Zheng Dayu <davidzheng23@gmail.com>
|
||||
Zhongtao Chen <chenzhongtao@126.com>
|
||||
Zhou Peng <p@ctriple.cn>
|
||||
Ziad Hatahet <hatahet@gmail.com>
|
||||
Zizhao Zhang <btw515wolf2@gmail.com>
|
||||
Zorion Arrizabalaga <zorionk@gmail.com>
|
||||
Максим Федосеев <max.faceless.frei@gmail.com>
|
||||
Роман Хавроненко <hagen1778@gmail.com>
|
||||
|
||||
@@ -1109,6 +1109,7 @@ Ian Lance Taylor <iant@golang.org>
|
||||
Ian Leue <ian@appboy.com>
|
||||
Ian Mckay <iann0036@gmail.com>
|
||||
Ian Tay <iantay@google.com>
|
||||
Ian Woolf <btw515wolf2@gmail.com>
|
||||
Ian Zapolsky <ianzapolsky@gmail.com>
|
||||
Ibrahim AshShohail <ibra.sho@gmail.com>
|
||||
Icarus Sparry <golang@icarus.freeuk.com>
|
||||
@@ -2748,7 +2749,6 @@ Zhongwei Yao <zhongwei.yao@arm.com>
|
||||
Zhou Peng <p@ctriple.cn>
|
||||
Ziad Hatahet <hatahet@gmail.com>
|
||||
Ziheng Liu <lzhfromustc@gmail.com>
|
||||
Zizhao Zhang <btw515wolf2@gmail.com>
|
||||
Zorion Arrizabalaga <zorionk@gmail.com>
|
||||
Zvonimir Pavlinovic <zpavlinovic@google.com>
|
||||
Zyad A. Ali <zyad.ali.me@gmail.com>
|
||||
|
||||
@@ -492,7 +492,6 @@ pkg syscall (windows-amd64), type CertRevocationInfo struct, OidSpecificInfo uin
|
||||
pkg syscall (windows-amd64), type CertSimpleChain struct, TrustListInfo uintptr
|
||||
pkg syscall (windows-amd64), type RawSockaddrAny struct, Pad [96]int8
|
||||
pkg testing, func MainStart(func(string, string) (bool, error), []InternalTest, []InternalBenchmark, []InternalExample) *M
|
||||
pkg testing, func MainStart(testDeps, []InternalTest, []InternalBenchmark, []InternalExample) *M
|
||||
pkg testing, func RegisterCover(Cover)
|
||||
pkg text/scanner, const GoTokens = 1012
|
||||
pkg text/template/parse, type DotNode bool
|
||||
|
||||
144
api/next.txt
144
api/next.txt
@@ -1,144 +0,0 @@
|
||||
pkg syscall (darwin-amd64), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
|
||||
pkg syscall (darwin-amd64), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
|
||||
pkg syscall (darwin-amd64), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
|
||||
pkg syscall (darwin-amd64), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
|
||||
pkg syscall (darwin-amd64-cgo), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
|
||||
pkg syscall (darwin-amd64-cgo), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
|
||||
pkg syscall (darwin-amd64-cgo), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
|
||||
pkg syscall (darwin-amd64-cgo), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
|
||||
pkg syscall (freebsd-386), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
|
||||
pkg syscall (freebsd-386), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
|
||||
pkg syscall (freebsd-386), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
|
||||
pkg syscall (freebsd-386), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
|
||||
pkg syscall (freebsd-386-cgo), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
|
||||
pkg syscall (freebsd-386-cgo), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
|
||||
pkg syscall (freebsd-386-cgo), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
|
||||
pkg syscall (freebsd-386-cgo), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
|
||||
pkg syscall (freebsd-amd64), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
|
||||
pkg syscall (freebsd-amd64), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
|
||||
pkg syscall (freebsd-amd64), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
|
||||
pkg syscall (freebsd-amd64), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
|
||||
pkg syscall (freebsd-amd64-cgo), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
|
||||
pkg syscall (freebsd-amd64-cgo), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
|
||||
pkg syscall (freebsd-amd64-cgo), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
|
||||
pkg syscall (freebsd-amd64-cgo), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
|
||||
pkg syscall (freebsd-arm), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
|
||||
pkg syscall (freebsd-arm), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
|
||||
pkg syscall (freebsd-arm), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
|
||||
pkg syscall (freebsd-arm), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
|
||||
pkg syscall (freebsd-arm-cgo), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
|
||||
pkg syscall (freebsd-arm-cgo), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
|
||||
pkg syscall (freebsd-arm-cgo), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
|
||||
pkg syscall (freebsd-arm-cgo), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
|
||||
pkg syscall (linux-386), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
|
||||
pkg syscall (linux-386), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
|
||||
pkg syscall (linux-386), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
|
||||
pkg syscall (linux-386), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
|
||||
pkg syscall (linux-386-cgo), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
|
||||
pkg syscall (linux-386-cgo), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
|
||||
pkg syscall (linux-386-cgo), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
|
||||
pkg syscall (linux-386-cgo), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
|
||||
pkg syscall (linux-amd64), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
|
||||
pkg syscall (linux-amd64), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
|
||||
pkg syscall (linux-amd64), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
|
||||
pkg syscall (linux-amd64), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
|
||||
pkg syscall (linux-amd64-cgo), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
|
||||
pkg syscall (linux-amd64-cgo), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
|
||||
pkg syscall (linux-amd64-cgo), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
|
||||
pkg syscall (linux-amd64-cgo), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
|
||||
pkg syscall (linux-arm), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
|
||||
pkg syscall (linux-arm), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
|
||||
pkg syscall (linux-arm), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
|
||||
pkg syscall (linux-arm), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
|
||||
pkg syscall (linux-arm-cgo), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
|
||||
pkg syscall (linux-arm-cgo), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
|
||||
pkg syscall (linux-arm-cgo), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
|
||||
pkg syscall (linux-arm-cgo), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
|
||||
pkg syscall (netbsd-386), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
|
||||
pkg syscall (netbsd-386), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
|
||||
pkg syscall (netbsd-386), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
|
||||
pkg syscall (netbsd-386), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
|
||||
pkg syscall (netbsd-386-cgo), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
|
||||
pkg syscall (netbsd-386-cgo), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
|
||||
pkg syscall (netbsd-386-cgo), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
|
||||
pkg syscall (netbsd-386-cgo), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
|
||||
pkg syscall (netbsd-amd64), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
|
||||
pkg syscall (netbsd-amd64), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
|
||||
pkg syscall (netbsd-amd64), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
|
||||
pkg syscall (netbsd-amd64), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
|
||||
pkg syscall (netbsd-amd64-cgo), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
|
||||
pkg syscall (netbsd-amd64-cgo), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
|
||||
pkg syscall (netbsd-amd64-cgo), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
|
||||
pkg syscall (netbsd-amd64-cgo), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
|
||||
pkg syscall (netbsd-arm), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
|
||||
pkg syscall (netbsd-arm), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
|
||||
pkg syscall (netbsd-arm), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
|
||||
pkg syscall (netbsd-arm), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
|
||||
pkg syscall (netbsd-arm-cgo), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
|
||||
pkg syscall (netbsd-arm-cgo), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
|
||||
pkg syscall (netbsd-arm-cgo), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
|
||||
pkg syscall (netbsd-arm-cgo), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
|
||||
pkg syscall (netbsd-arm64), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
|
||||
pkg syscall (netbsd-arm64), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
|
||||
pkg syscall (netbsd-arm64), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
|
||||
pkg syscall (netbsd-arm64), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
|
||||
pkg syscall (netbsd-arm64-cgo), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
|
||||
pkg syscall (netbsd-arm64-cgo), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
|
||||
pkg syscall (netbsd-arm64-cgo), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
|
||||
pkg syscall (netbsd-arm64-cgo), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
|
||||
pkg syscall (openbsd-386), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
|
||||
pkg syscall (openbsd-386), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
|
||||
pkg syscall (openbsd-386), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
|
||||
pkg syscall (openbsd-386), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
|
||||
pkg syscall (openbsd-386-cgo), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
|
||||
pkg syscall (openbsd-386-cgo), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
|
||||
pkg syscall (openbsd-386-cgo), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
|
||||
pkg syscall (openbsd-386-cgo), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
|
||||
pkg syscall (openbsd-amd64), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
|
||||
pkg syscall (openbsd-amd64), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
|
||||
pkg syscall (openbsd-amd64), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
|
||||
pkg syscall (openbsd-amd64), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
|
||||
pkg syscall (openbsd-amd64-cgo), func RecvfromInet4(int, []uint8, int, *SockaddrInet4) (int, error)
|
||||
pkg syscall (openbsd-amd64-cgo), func RecvfromInet6(int, []uint8, int, *SockaddrInet6) (int, error)
|
||||
pkg syscall (openbsd-amd64-cgo), func SendtoInet4(int, []uint8, int, SockaddrInet4) error
|
||||
pkg syscall (openbsd-amd64-cgo), func SendtoInet6(int, []uint8, int, SockaddrInet6) error
|
||||
pkg syscall (windows-386), func WSASendtoInet4(Handle, *WSABuf, uint32, *uint32, uint32, SockaddrInet4, *Overlapped, *uint8) error
|
||||
pkg syscall (windows-386), func WSASendtoInet6(Handle, *WSABuf, uint32, *uint32, uint32, SockaddrInet6, *Overlapped, *uint8) error
|
||||
pkg syscall (windows-amd64), func WSASendtoInet4(Handle, *WSABuf, uint32, *uint32, uint32, SockaddrInet4, *Overlapped, *uint8) error
|
||||
pkg syscall (windows-amd64), func WSASendtoInet6(Handle, *WSABuf, uint32, *uint32, uint32, SockaddrInet6, *Overlapped, *uint8) error
|
||||
pkg testing, func Fuzz(func(*F)) FuzzResult
|
||||
pkg testing, func MainStart(testDeps, []InternalTest, []InternalBenchmark, []InternalFuzzTarget, []InternalExample) *M
|
||||
pkg testing, func RunFuzzTargets(func(string, string) (bool, error), []InternalFuzzTarget) bool
|
||||
pkg testing, func RunFuzzing(func(string, string) (bool, error), []InternalFuzzTarget) bool
|
||||
pkg testing, method (*B) Setenv(string, string)
|
||||
pkg testing, method (*F) Add(...interface{})
|
||||
pkg testing, method (*F) Cleanup(func())
|
||||
pkg testing, method (*F) Error(...interface{})
|
||||
pkg testing, method (*F) Errorf(string, ...interface{})
|
||||
pkg testing, method (*F) Fail()
|
||||
pkg testing, method (*F) FailNow()
|
||||
pkg testing, method (*F) Failed() bool
|
||||
pkg testing, method (*F) Fatal(...interface{})
|
||||
pkg testing, method (*F) Fatalf(string, ...interface{})
|
||||
pkg testing, method (*F) Fuzz(interface{})
|
||||
pkg testing, method (*F) Helper()
|
||||
pkg testing, method (*F) Log(...interface{})
|
||||
pkg testing, method (*F) Logf(string, ...interface{})
|
||||
pkg testing, method (*F) Name() string
|
||||
pkg testing, method (*F) Setenv(string, string)
|
||||
pkg testing, method (*F) Skip(...interface{})
|
||||
pkg testing, method (*F) SkipNow()
|
||||
pkg testing, method (*F) Skipf(string, ...interface{})
|
||||
pkg testing, method (*F) Skipped() bool
|
||||
pkg testing, method (*F) TempDir() string
|
||||
pkg testing, method (*T) Setenv(string, string)
|
||||
pkg testing, method (FuzzResult) String() string
|
||||
pkg testing, type F struct
|
||||
pkg testing, type FuzzResult struct
|
||||
pkg testing, type FuzzResult struct, Crasher entry
|
||||
pkg testing, type FuzzResult struct, Error error
|
||||
pkg testing, type FuzzResult struct, N int
|
||||
pkg testing, type FuzzResult struct, T time.Duration
|
||||
pkg testing, type InternalFuzzTarget struct
|
||||
pkg testing, type InternalFuzzTarget struct, Fn func(*F)
|
||||
pkg testing, type InternalFuzzTarget struct, Name string
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
branch: dev.fuzz
|
||||
parent-branch: master
|
||||
branch: release-branch.go1.17
|
||||
parent-branch: master
|
||||
|
||||
@@ -125,8 +125,8 @@ it is a distinct program, so there are some differences.
|
||||
One is in constant evaluation.
|
||||
Constant expressions in the assembler are parsed using Go's operator
|
||||
precedence, not the C-like precedence of the original.
|
||||
Thus <code>3&1<<2</code> is 4, not 0—it parses as <code>(3&1)<<2</code>
|
||||
not <code>3&(1<<2)</code>.
|
||||
Thus <code>3&1<<2</code> is 4, not 0—it parses as <code>(3&1)<<2</code>
|
||||
not <code>3&(1<<2)</code>.
|
||||
Also, constants are always evaluated as 64-bit unsigned integers.
|
||||
Thus <code>-2</code> is not the integer value minus two,
|
||||
but the unsigned 64-bit integer with the same bit pattern.
|
||||
@@ -914,6 +914,8 @@ This assembler is used by GOARCH values ppc64 and ppc64le.
|
||||
Reference: <a href="/pkg/cmd/internal/obj/ppc64">Go PPC64 Assembly Instructions Reference Manual</a>
|
||||
</p>
|
||||
|
||||
</ul>
|
||||
|
||||
<h3 id="s390x">IBM z/Architecture, a.k.a. s390x</h3>
|
||||
|
||||
<p>
|
||||
|
||||
1240
doc/go1.17.html
Normal file
1240
doc/go1.17.html
Normal file
File diff suppressed because it is too large
Load Diff
114
doc/go1.18.html
114
doc/go1.18.html
@@ -1,114 +0,0 @@
|
||||
<!--{
|
||||
"Title": "Go 1.18 Release Notes",
|
||||
"Path": "/doc/go1.18"
|
||||
}-->
|
||||
|
||||
<!--
|
||||
NOTE: In this document and others in this directory, the convention is to
|
||||
set fixed-width phrases with non-fixed-width spaces, as in
|
||||
<code>hello</code> <code>world</code>.
|
||||
Do not send CLs removing the interior tags from such phrases.
|
||||
-->
|
||||
|
||||
<style>
|
||||
main ul li { margin: 0.5em 0; }
|
||||
</style>
|
||||
|
||||
<h2 id="introduction">DRAFT RELEASE NOTES — Introduction to Go 1.18</h2>
|
||||
|
||||
<p>
|
||||
<strong>
|
||||
Go 1.18 is not yet released. These are work-in-progress
|
||||
release notes. Go 1.18 is expected to be released in February 2022.
|
||||
</strong>
|
||||
</p>
|
||||
|
||||
<h2 id="language">Changes to the language</h2>
|
||||
|
||||
<p>
|
||||
TODO: complete this section
|
||||
</p>
|
||||
|
||||
<h2 id="ports">Ports</h2>
|
||||
|
||||
<p>
|
||||
TODO: complete this section, or delete if not needed
|
||||
</p>
|
||||
|
||||
<h2 id="tools">Tools</h2>
|
||||
|
||||
<p>
|
||||
TODO: complete this section, or delete if not needed
|
||||
</p>
|
||||
|
||||
<h3 id="go-command">Go command</h3>
|
||||
|
||||
<p>
|
||||
TODO: complete this section, or delete if not needed
|
||||
</p>
|
||||
|
||||
<h2 id="runtime">Runtime</h2>
|
||||
|
||||
<p>
|
||||
TODO: complete this section, or delete if not needed
|
||||
</p>
|
||||
|
||||
<h2 id="compiler">Compiler</h2>
|
||||
|
||||
<p>
|
||||
TODO: complete this section, or delete if not needed
|
||||
</p>
|
||||
|
||||
<h2 id="linker">Linker</h2>
|
||||
|
||||
<p>
|
||||
TODO: complete this section, or delete if not needed
|
||||
</p>
|
||||
|
||||
<h2 id="library">Core library</h2>
|
||||
|
||||
<p>
|
||||
TODO: complete this section
|
||||
</p>
|
||||
|
||||
<h3 id="minor_library_changes">Minor changes to the library</h3>
|
||||
|
||||
<p>
|
||||
As always, there are various minor changes and updates to the library,
|
||||
made with the Go 1 <a href="/doc/go1compat">promise of compatibility</a>
|
||||
in mind.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
TODO: complete this section
|
||||
</p>
|
||||
|
||||
<dl id="image/draw"><dt><a href="/pkg/image/draw/">image/draw</a></dt>
|
||||
<dd>
|
||||
<p><!-- CL 340049 -->
|
||||
The <code>Draw</code> and <code>DrawMask</code> fallback implementations
|
||||
(used when the arguments are not the most common image types) are now
|
||||
faster when those arguments implement the optional
|
||||
<a href="/pkg/image/draw/#RGBA64Image"><code>draw.RGBA64Image</code></a>
|
||||
and <a href="/pkg/image/#RGBA64Image"><code>image.RGBA64Image</code></a>
|
||||
interfaces that were added in Go 1.17.
|
||||
</p>
|
||||
</dd>
|
||||
</dl><!-- image/draw -->
|
||||
|
||||
<dl id="syscall"><dt><a href="/pkg/syscall/">syscall</a></dt>
|
||||
<dd>
|
||||
<p><!-- CL 336550 -->
|
||||
The new function <a href="/pkg/syscall/?GOOS=windows#SyscallN"><code>SyscallN</code></a>
|
||||
has been introduced for Windows, allowing for calls with arbitrary number
|
||||
of arguments. As results,
|
||||
<a href="/pkg/syscall/?GOOS=windows#Syscall"><code>Syscall</code></a>,
|
||||
<a href="/pkg/syscall/?GOOS=windows#Syscall6"><code>Syscall6</code></a>,
|
||||
<a href="/pkg/syscall/?GOOS=windows#Syscall9"><code>Syscall9</code></a>,
|
||||
<a href="/pkg/syscall/?GOOS=windows#Syscall12"><code>Syscall12</code></a>,
|
||||
<a href="/pkg/syscall/?GOOS=windows#Syscall15"><code>Syscall15</code></a>, and
|
||||
<a href="/pkg/syscall/?GOOS=windows#Syscall18"><code>Syscall18</code></a> are
|
||||
deprecated in favor of <a href="/pkg/syscall/?GOOS=windows#SyscallN"><code>SyscallN</code></a>.
|
||||
</p>
|
||||
</dd>
|
||||
</dl><!-- syscall -->
|
||||
@@ -1,6 +1,6 @@
|
||||
<!--{
|
||||
"Title": "The Go Programming Language Specification",
|
||||
"Subtitle": "Version of Sep 16, 2021",
|
||||
"Subtitle": "Version of Jul 26, 2021",
|
||||
"Path": "/ref/spec"
|
||||
}-->
|
||||
|
||||
@@ -3000,18 +3000,6 @@ method value; the saved copy is then used as the receiver in any calls,
|
||||
which may be executed later.
|
||||
</p>
|
||||
|
||||
<pre>
|
||||
type S struct { *T }
|
||||
type T int
|
||||
func (t T) M() { print(t) }
|
||||
|
||||
t := new(T)
|
||||
s := S{T: t}
|
||||
f := t.M // receiver *t is evaluated and stored in f
|
||||
g := s.M // receiver *(s.T) is evaluated and stored in g
|
||||
*t = 42 // does not affect stored receivers in f and g
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
The type <code>T</code> may be an interface or non-interface type.
|
||||
</p>
|
||||
@@ -3614,7 +3602,7 @@ var i = 1<<s // 1 has type int
|
||||
var j int32 = 1<<s // 1 has type int32; j == 0
|
||||
var k = uint64(1<<s) // 1 has type uint64; k == 1<<33
|
||||
var m int = 1.0<<s // 1.0 has type int; m == 1<<33
|
||||
var n = 1.0<<s == j // 1.0 has type int32; n == true
|
||||
var n = 1.0<<s == j // 1.0 has type int; n == true
|
||||
var o = 1<<s == 2<<s // 1 and 2 have type int; o == false
|
||||
var p = 1<<s == 1<<33 // 1 has type int; p == true
|
||||
var u = 1.0<<s // illegal: 1.0 has type float64, cannot shift
|
||||
@@ -4350,7 +4338,7 @@ t0 := (*[0]string)(t) // t0 == nil
|
||||
t1 := (*[1]string)(t) // panics: len([1]string) > len(t)
|
||||
|
||||
u := make([]byte, 0)
|
||||
u0 := (*[0]byte)(u) // u0 != nil
|
||||
u0 = (*[0]byte)(u) // u0 != nil
|
||||
</pre>
|
||||
|
||||
<h3 id="Constant_expressions">Constant expressions</h3>
|
||||
@@ -4561,8 +4549,9 @@ SimpleStmt = EmptyStmt | ExpressionStmt | SendStmt | IncDecStmt | Assignment | S
|
||||
<h3 id="Terminating_statements">Terminating statements</h3>
|
||||
|
||||
<p>
|
||||
A <i>terminating statement</i> interrupts the regular flow of control in
|
||||
a <a href="#Blocks">block</a>. The following statements are terminating:
|
||||
A <i>terminating statement</i> prevents execution of all statements that lexically
|
||||
appear after it in the same <a href="#Blocks">block</a>. The following statements
|
||||
are terminating:
|
||||
</p>
|
||||
|
||||
<ol>
|
||||
|
||||
@@ -4,7 +4,7 @@ The IANA asserts that the database is in the public domain.
|
||||
|
||||
For more information, see
|
||||
https://www.iana.org/time-zones
|
||||
ftp://ftp.iana.org/tz/code/tz-link.html
|
||||
https://datatracker.ietf.org/doc/html/rfc6557
|
||||
ftp://ftp.iana.org/tz/code/tz-link.htm
|
||||
http://tools.ietf.org/html/rfc6557
|
||||
|
||||
To rebuild the archive, read and run update.bash.
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build ignore
|
||||
// +build ignore
|
||||
|
||||
// This program can be used as go_android_GOARCH_exec by the Go tool.
|
||||
|
||||
12
misc/cgo/errors/testdata/err2.go
vendored
12
misc/cgo/errors/testdata/err2.go
vendored
@@ -91,18 +91,10 @@ func main() {
|
||||
|
||||
// issue 26745
|
||||
_ = func(i int) int {
|
||||
// typecheck reports at column 14 ('+'), but types2 reports at
|
||||
// column 10 ('C').
|
||||
// TODO(mdempsky): Investigate why, and see if types2 can be
|
||||
// updated to match typecheck behavior.
|
||||
return C.i + 1 // ERROR HERE: \b(10|14)\b
|
||||
return C.i + 1 // ERROR HERE: 14
|
||||
}
|
||||
_ = func(i int) {
|
||||
// typecheck reports at column 7 ('('), but types2 reports at
|
||||
// column 8 ('i'). The types2 position is more correct, but
|
||||
// updating typecheck here is fundamentally challenging because of
|
||||
// IR limitations.
|
||||
C.fi(i) // ERROR HERE: \b(7|8)\b
|
||||
C.fi(i) // ERROR HERE: 7
|
||||
}
|
||||
|
||||
C.fi = C.fi // ERROR HERE
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build ignore
|
||||
// +build ignore
|
||||
|
||||
// Compute Fibonacci numbers with two goroutines
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build ignore
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux && freebsd && openbsd
|
||||
// +build linux,freebsd,openbsd
|
||||
|
||||
package cgotest
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package cgotest
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux && cgo
|
||||
// +build linux,cgo
|
||||
|
||||
package cgotest
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
// Issue 18146: pthread_create failure during syscall.Exec.
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build darwin && cgo && !internal
|
||||
// +build darwin,cgo,!internal
|
||||
|
||||
package cgotest
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !darwin || !cgo || internal
|
||||
// +build !darwin !cgo internal
|
||||
|
||||
package cgotest
|
||||
|
||||
@@ -2,9 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !windows && !static && (!darwin || (!internal_pie && !arm64))
|
||||
// +build !windows
|
||||
// +build !static
|
||||
// +build !windows,!static
|
||||
// +build !darwin !internal_pie,!arm64
|
||||
|
||||
// Excluded in darwin internal linking PIE mode, as dynamic export is not
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build windows || static || (darwin && internal_pie) || (darwin && arm64)
|
||||
// +build windows static darwin,internal_pie darwin,arm64
|
||||
|
||||
package cgotest
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !android
|
||||
// +build !android
|
||||
|
||||
// Test that pthread_cancel works as expected
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package cgotest
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !android
|
||||
// +build !android
|
||||
|
||||
package cgotest
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !windows && !android
|
||||
// +build !windows,!android
|
||||
|
||||
// Test that the Go runtime still works if C code changes the signal stack.
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package cgotest
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package cgotest
|
||||
|
||||
9
misc/cgo/test/testdata/issue43639.go
vendored
9
misc/cgo/test/testdata/issue43639.go
vendored
@@ -1,9 +0,0 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cgotest
|
||||
|
||||
// Issue 43639: No runtime test needed, make sure package cgotest/issue43639 compiles well.
|
||||
|
||||
import _ "cgotest/issue43639"
|
||||
8
misc/cgo/test/testdata/issue43639/a.go
vendored
8
misc/cgo/test/testdata/issue43639/a.go
vendored
@@ -1,8 +0,0 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package issue43639
|
||||
|
||||
// #cgo CFLAGS: -W -Wall -Werror
|
||||
import "C"
|
||||
18
misc/cgo/testgodefs/testdata/issue48396.go
vendored
18
misc/cgo/testgodefs/testdata/issue48396.go
vendored
@@ -1,18 +0,0 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
//
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
/*
|
||||
// from <linux/kcm.h>
|
||||
struct issue48396 {
|
||||
int fd;
|
||||
int bpf_fd;
|
||||
};
|
||||
*/
|
||||
import "C"
|
||||
|
||||
type Issue48396 C.struct_issue48396
|
||||
3
misc/cgo/testgodefs/testdata/main.go
vendored
3
misc/cgo/testgodefs/testdata/main.go
vendored
@@ -28,9 +28,6 @@ var v7 = S{}
|
||||
// Test that #define'd type is fully defined
|
||||
var _ = issue38649{X: 0}
|
||||
|
||||
// Test that prefixes do not cause duplicate field names.
|
||||
var _ = Issue48396{Fd: 1, Bpf_fd: 2}
|
||||
|
||||
func main() {
|
||||
pass := true
|
||||
|
||||
|
||||
@@ -25,7 +25,6 @@ var filePrefixes = []string{
|
||||
"issue37621",
|
||||
"issue38649",
|
||||
"issue39534",
|
||||
"issue48396",
|
||||
}
|
||||
|
||||
func TestGoDefs(t *testing.T) {
|
||||
|
||||
@@ -1070,3 +1070,11 @@ func TestIssue44031(t *testing.T) {
|
||||
goCmd(t, "install", "-buildmode=shared", "-linkshared", "./issue44031/b")
|
||||
goCmd(t, "run", "-linkshared", "./issue44031/main")
|
||||
}
|
||||
|
||||
// Test that we use a variable from shared libraries (which implement an
|
||||
// interface in shared libraries.). A weak reference is used in the itab
|
||||
// in main process. It can cause unreacheble panic. See issue 47873.
|
||||
func TestIssue47873(t *testing.T) {
|
||||
goCmd(t, "install", "-buildmode=shared", "-linkshared", "./issue47837/a")
|
||||
goCmd(t, "run", "-linkshared", "./issue47837/main")
|
||||
}
|
||||
|
||||
@@ -4,7 +4,16 @@
|
||||
|
||||
package a
|
||||
|
||||
//go:noinline
|
||||
func F[T comparable](a, b T) bool {
|
||||
return a == b
|
||||
type A interface {
|
||||
M()
|
||||
}
|
||||
|
||||
//go:noinline
|
||||
func TheFuncWithArgA(a A) {
|
||||
a.M()
|
||||
}
|
||||
|
||||
type ImplA struct{}
|
||||
|
||||
//go:noinline
|
||||
func (A *ImplA) M() {}
|
||||
@@ -5,11 +5,10 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"b"
|
||||
"c"
|
||||
"testshared/issue47837/a"
|
||||
)
|
||||
|
||||
func main() {
|
||||
b.B()
|
||||
c.C()
|
||||
var vara a.ImplA
|
||||
a.TheFuncWithArgA(&vara)
|
||||
}
|
||||
@@ -2,7 +2,6 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !cgo
|
||||
// +build !cgo
|
||||
|
||||
package so_test
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build cgo
|
||||
// +build cgo
|
||||
|
||||
package so_test
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !cgo
|
||||
// +build !cgo
|
||||
|
||||
package so_test
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build cgo
|
||||
// +build cgo
|
||||
|
||||
package so_test
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package cgotlstest
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build ignore
|
||||
// +build ignore
|
||||
|
||||
// detect attempts to autodetect the correct
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build explicit
|
||||
// +build explicit
|
||||
|
||||
// Package experiment_toolid_test verifies that GOEXPERIMENT settings built
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
"use strict";
|
||||
|
||||
(() => {
|
||||
// Map multiple JavaScript environments to a single common API,
|
||||
@@ -568,6 +567,13 @@
|
||||
offset += 8;
|
||||
});
|
||||
|
||||
// The linker guarantees global data starts from at least wasmMinDataAddr.
|
||||
// Keep in sync with cmd/link/internal/ld/data.go:wasmMinDataAddr.
|
||||
const wasmMinDataAddr = 4096 + 8192;
|
||||
if (offset >= wasmMinDataAddr) {
|
||||
throw new Error("total length of command line and environment variables exceeds limit");
|
||||
}
|
||||
|
||||
this._inst.exports.run(argc, argv);
|
||||
if (this.exited) {
|
||||
this._resolveExitPromise();
|
||||
|
||||
@@ -316,10 +316,10 @@ func invertSparseEntries(src []sparseEntry, size int64) []sparseEntry {
|
||||
// fileState tracks the number of logical (includes sparse holes) and physical
|
||||
// (actual in tar archive) bytes remaining for the current file.
|
||||
//
|
||||
// Invariant: logicalRemaining >= physicalRemaining
|
||||
// Invariant: LogicalRemaining >= PhysicalRemaining
|
||||
type fileState interface {
|
||||
logicalRemaining() int64
|
||||
physicalRemaining() int64
|
||||
LogicalRemaining() int64
|
||||
PhysicalRemaining() int64
|
||||
}
|
||||
|
||||
// allowedFormats determines which formats can be used.
|
||||
@@ -413,22 +413,22 @@ func (h Header) allowedFormats() (format Format, paxHdrs map[string]string, err
|
||||
|
||||
// Check basic fields.
|
||||
var blk block
|
||||
v7 := blk.toV7()
|
||||
ustar := blk.toUSTAR()
|
||||
gnu := blk.toGNU()
|
||||
verifyString(h.Name, len(v7.name()), "Name", paxPath)
|
||||
verifyString(h.Linkname, len(v7.linkName()), "Linkname", paxLinkpath)
|
||||
verifyString(h.Uname, len(ustar.userName()), "Uname", paxUname)
|
||||
verifyString(h.Gname, len(ustar.groupName()), "Gname", paxGname)
|
||||
verifyNumeric(h.Mode, len(v7.mode()), "Mode", paxNone)
|
||||
verifyNumeric(int64(h.Uid), len(v7.uid()), "Uid", paxUid)
|
||||
verifyNumeric(int64(h.Gid), len(v7.gid()), "Gid", paxGid)
|
||||
verifyNumeric(h.Size, len(v7.size()), "Size", paxSize)
|
||||
verifyNumeric(h.Devmajor, len(ustar.devMajor()), "Devmajor", paxNone)
|
||||
verifyNumeric(h.Devminor, len(ustar.devMinor()), "Devminor", paxNone)
|
||||
verifyTime(h.ModTime, len(v7.modTime()), "ModTime", paxMtime)
|
||||
verifyTime(h.AccessTime, len(gnu.accessTime()), "AccessTime", paxAtime)
|
||||
verifyTime(h.ChangeTime, len(gnu.changeTime()), "ChangeTime", paxCtime)
|
||||
v7 := blk.V7()
|
||||
ustar := blk.USTAR()
|
||||
gnu := blk.GNU()
|
||||
verifyString(h.Name, len(v7.Name()), "Name", paxPath)
|
||||
verifyString(h.Linkname, len(v7.LinkName()), "Linkname", paxLinkpath)
|
||||
verifyString(h.Uname, len(ustar.UserName()), "Uname", paxUname)
|
||||
verifyString(h.Gname, len(ustar.GroupName()), "Gname", paxGname)
|
||||
verifyNumeric(h.Mode, len(v7.Mode()), "Mode", paxNone)
|
||||
verifyNumeric(int64(h.Uid), len(v7.UID()), "Uid", paxUid)
|
||||
verifyNumeric(int64(h.Gid), len(v7.GID()), "Gid", paxGid)
|
||||
verifyNumeric(h.Size, len(v7.Size()), "Size", paxSize)
|
||||
verifyNumeric(h.Devmajor, len(ustar.DevMajor()), "Devmajor", paxNone)
|
||||
verifyNumeric(h.Devminor, len(ustar.DevMinor()), "Devminor", paxNone)
|
||||
verifyTime(h.ModTime, len(v7.ModTime()), "ModTime", paxMtime)
|
||||
verifyTime(h.AccessTime, len(gnu.AccessTime()), "AccessTime", paxAtime)
|
||||
verifyTime(h.ChangeTime, len(gnu.ChangeTime()), "ChangeTime", paxCtime)
|
||||
|
||||
// Check for header-only types.
|
||||
var whyOnlyPAX, whyOnlyGNU string
|
||||
|
||||
@@ -156,28 +156,28 @@ var zeroBlock block
|
||||
type block [blockSize]byte
|
||||
|
||||
// Convert block to any number of formats.
|
||||
func (b *block) toV7() *headerV7 { return (*headerV7)(b) }
|
||||
func (b *block) toGNU() *headerGNU { return (*headerGNU)(b) }
|
||||
func (b *block) toSTAR() *headerSTAR { return (*headerSTAR)(b) }
|
||||
func (b *block) toUSTAR() *headerUSTAR { return (*headerUSTAR)(b) }
|
||||
func (b *block) toSparse() sparseArray { return sparseArray(b[:]) }
|
||||
func (b *block) V7() *headerV7 { return (*headerV7)(b) }
|
||||
func (b *block) GNU() *headerGNU { return (*headerGNU)(b) }
|
||||
func (b *block) STAR() *headerSTAR { return (*headerSTAR)(b) }
|
||||
func (b *block) USTAR() *headerUSTAR { return (*headerUSTAR)(b) }
|
||||
func (b *block) Sparse() sparseArray { return sparseArray(b[:]) }
|
||||
|
||||
// GetFormat checks that the block is a valid tar header based on the checksum.
|
||||
// It then attempts to guess the specific format based on magic values.
|
||||
// If the checksum fails, then FormatUnknown is returned.
|
||||
func (b *block) getFormat() Format {
|
||||
func (b *block) GetFormat() Format {
|
||||
// Verify checksum.
|
||||
var p parser
|
||||
value := p.parseOctal(b.toV7().chksum())
|
||||
chksum1, chksum2 := b.computeChecksum()
|
||||
value := p.parseOctal(b.V7().Chksum())
|
||||
chksum1, chksum2 := b.ComputeChecksum()
|
||||
if p.err != nil || (value != chksum1 && value != chksum2) {
|
||||
return FormatUnknown
|
||||
}
|
||||
|
||||
// Guess the magic values.
|
||||
magic := string(b.toUSTAR().magic())
|
||||
version := string(b.toUSTAR().version())
|
||||
trailer := string(b.toSTAR().trailer())
|
||||
magic := string(b.USTAR().Magic())
|
||||
version := string(b.USTAR().Version())
|
||||
trailer := string(b.STAR().Trailer())
|
||||
switch {
|
||||
case magic == magicUSTAR && trailer == trailerSTAR:
|
||||
return formatSTAR
|
||||
@@ -190,23 +190,23 @@ func (b *block) getFormat() Format {
|
||||
}
|
||||
}
|
||||
|
||||
// setFormat writes the magic values necessary for specified format
|
||||
// SetFormat writes the magic values necessary for specified format
|
||||
// and then updates the checksum accordingly.
|
||||
func (b *block) setFormat(format Format) {
|
||||
func (b *block) SetFormat(format Format) {
|
||||
// Set the magic values.
|
||||
switch {
|
||||
case format.has(formatV7):
|
||||
// Do nothing.
|
||||
case format.has(FormatGNU):
|
||||
copy(b.toGNU().magic(), magicGNU)
|
||||
copy(b.toGNU().version(), versionGNU)
|
||||
copy(b.GNU().Magic(), magicGNU)
|
||||
copy(b.GNU().Version(), versionGNU)
|
||||
case format.has(formatSTAR):
|
||||
copy(b.toSTAR().magic(), magicUSTAR)
|
||||
copy(b.toSTAR().version(), versionUSTAR)
|
||||
copy(b.toSTAR().trailer(), trailerSTAR)
|
||||
copy(b.STAR().Magic(), magicUSTAR)
|
||||
copy(b.STAR().Version(), versionUSTAR)
|
||||
copy(b.STAR().Trailer(), trailerSTAR)
|
||||
case format.has(FormatUSTAR | FormatPAX):
|
||||
copy(b.toUSTAR().magic(), magicUSTAR)
|
||||
copy(b.toUSTAR().version(), versionUSTAR)
|
||||
copy(b.USTAR().Magic(), magicUSTAR)
|
||||
copy(b.USTAR().Version(), versionUSTAR)
|
||||
default:
|
||||
panic("invalid format")
|
||||
}
|
||||
@@ -214,17 +214,17 @@ func (b *block) setFormat(format Format) {
|
||||
// Update checksum.
|
||||
// This field is special in that it is terminated by a NULL then space.
|
||||
var f formatter
|
||||
field := b.toV7().chksum()
|
||||
chksum, _ := b.computeChecksum() // Possible values are 256..128776
|
||||
field := b.V7().Chksum()
|
||||
chksum, _ := b.ComputeChecksum() // Possible values are 256..128776
|
||||
f.formatOctal(field[:7], chksum) // Never fails since 128776 < 262143
|
||||
field[7] = ' '
|
||||
}
|
||||
|
||||
// computeChecksum computes the checksum for the header block.
|
||||
// ComputeChecksum computes the checksum for the header block.
|
||||
// POSIX specifies a sum of the unsigned byte values, but the Sun tar used
|
||||
// signed byte values.
|
||||
// We compute and return both.
|
||||
func (b *block) computeChecksum() (unsigned, signed int64) {
|
||||
func (b *block) ComputeChecksum() (unsigned, signed int64) {
|
||||
for i, c := range b {
|
||||
if 148 <= i && i < 156 {
|
||||
c = ' ' // Treat the checksum field itself as all spaces.
|
||||
@@ -236,68 +236,68 @@ func (b *block) computeChecksum() (unsigned, signed int64) {
|
||||
}
|
||||
|
||||
// Reset clears the block with all zeros.
|
||||
func (b *block) reset() {
|
||||
func (b *block) Reset() {
|
||||
*b = block{}
|
||||
}
|
||||
|
||||
type headerV7 [blockSize]byte
|
||||
|
||||
func (h *headerV7) name() []byte { return h[000:][:100] }
|
||||
func (h *headerV7) mode() []byte { return h[100:][:8] }
|
||||
func (h *headerV7) uid() []byte { return h[108:][:8] }
|
||||
func (h *headerV7) gid() []byte { return h[116:][:8] }
|
||||
func (h *headerV7) size() []byte { return h[124:][:12] }
|
||||
func (h *headerV7) modTime() []byte { return h[136:][:12] }
|
||||
func (h *headerV7) chksum() []byte { return h[148:][:8] }
|
||||
func (h *headerV7) typeFlag() []byte { return h[156:][:1] }
|
||||
func (h *headerV7) linkName() []byte { return h[157:][:100] }
|
||||
func (h *headerV7) Name() []byte { return h[000:][:100] }
|
||||
func (h *headerV7) Mode() []byte { return h[100:][:8] }
|
||||
func (h *headerV7) UID() []byte { return h[108:][:8] }
|
||||
func (h *headerV7) GID() []byte { return h[116:][:8] }
|
||||
func (h *headerV7) Size() []byte { return h[124:][:12] }
|
||||
func (h *headerV7) ModTime() []byte { return h[136:][:12] }
|
||||
func (h *headerV7) Chksum() []byte { return h[148:][:8] }
|
||||
func (h *headerV7) TypeFlag() []byte { return h[156:][:1] }
|
||||
func (h *headerV7) LinkName() []byte { return h[157:][:100] }
|
||||
|
||||
type headerGNU [blockSize]byte
|
||||
|
||||
func (h *headerGNU) v7() *headerV7 { return (*headerV7)(h) }
|
||||
func (h *headerGNU) magic() []byte { return h[257:][:6] }
|
||||
func (h *headerGNU) version() []byte { return h[263:][:2] }
|
||||
func (h *headerGNU) userName() []byte { return h[265:][:32] }
|
||||
func (h *headerGNU) groupName() []byte { return h[297:][:32] }
|
||||
func (h *headerGNU) devMajor() []byte { return h[329:][:8] }
|
||||
func (h *headerGNU) devMinor() []byte { return h[337:][:8] }
|
||||
func (h *headerGNU) accessTime() []byte { return h[345:][:12] }
|
||||
func (h *headerGNU) changeTime() []byte { return h[357:][:12] }
|
||||
func (h *headerGNU) sparse() sparseArray { return sparseArray(h[386:][:24*4+1]) }
|
||||
func (h *headerGNU) realSize() []byte { return h[483:][:12] }
|
||||
func (h *headerGNU) V7() *headerV7 { return (*headerV7)(h) }
|
||||
func (h *headerGNU) Magic() []byte { return h[257:][:6] }
|
||||
func (h *headerGNU) Version() []byte { return h[263:][:2] }
|
||||
func (h *headerGNU) UserName() []byte { return h[265:][:32] }
|
||||
func (h *headerGNU) GroupName() []byte { return h[297:][:32] }
|
||||
func (h *headerGNU) DevMajor() []byte { return h[329:][:8] }
|
||||
func (h *headerGNU) DevMinor() []byte { return h[337:][:8] }
|
||||
func (h *headerGNU) AccessTime() []byte { return h[345:][:12] }
|
||||
func (h *headerGNU) ChangeTime() []byte { return h[357:][:12] }
|
||||
func (h *headerGNU) Sparse() sparseArray { return sparseArray(h[386:][:24*4+1]) }
|
||||
func (h *headerGNU) RealSize() []byte { return h[483:][:12] }
|
||||
|
||||
type headerSTAR [blockSize]byte
|
||||
|
||||
func (h *headerSTAR) v7() *headerV7 { return (*headerV7)(h) }
|
||||
func (h *headerSTAR) magic() []byte { return h[257:][:6] }
|
||||
func (h *headerSTAR) version() []byte { return h[263:][:2] }
|
||||
func (h *headerSTAR) userName() []byte { return h[265:][:32] }
|
||||
func (h *headerSTAR) groupName() []byte { return h[297:][:32] }
|
||||
func (h *headerSTAR) devMajor() []byte { return h[329:][:8] }
|
||||
func (h *headerSTAR) devMinor() []byte { return h[337:][:8] }
|
||||
func (h *headerSTAR) prefix() []byte { return h[345:][:131] }
|
||||
func (h *headerSTAR) accessTime() []byte { return h[476:][:12] }
|
||||
func (h *headerSTAR) changeTime() []byte { return h[488:][:12] }
|
||||
func (h *headerSTAR) trailer() []byte { return h[508:][:4] }
|
||||
func (h *headerSTAR) V7() *headerV7 { return (*headerV7)(h) }
|
||||
func (h *headerSTAR) Magic() []byte { return h[257:][:6] }
|
||||
func (h *headerSTAR) Version() []byte { return h[263:][:2] }
|
||||
func (h *headerSTAR) UserName() []byte { return h[265:][:32] }
|
||||
func (h *headerSTAR) GroupName() []byte { return h[297:][:32] }
|
||||
func (h *headerSTAR) DevMajor() []byte { return h[329:][:8] }
|
||||
func (h *headerSTAR) DevMinor() []byte { return h[337:][:8] }
|
||||
func (h *headerSTAR) Prefix() []byte { return h[345:][:131] }
|
||||
func (h *headerSTAR) AccessTime() []byte { return h[476:][:12] }
|
||||
func (h *headerSTAR) ChangeTime() []byte { return h[488:][:12] }
|
||||
func (h *headerSTAR) Trailer() []byte { return h[508:][:4] }
|
||||
|
||||
type headerUSTAR [blockSize]byte
|
||||
|
||||
func (h *headerUSTAR) v7() *headerV7 { return (*headerV7)(h) }
|
||||
func (h *headerUSTAR) magic() []byte { return h[257:][:6] }
|
||||
func (h *headerUSTAR) version() []byte { return h[263:][:2] }
|
||||
func (h *headerUSTAR) userName() []byte { return h[265:][:32] }
|
||||
func (h *headerUSTAR) groupName() []byte { return h[297:][:32] }
|
||||
func (h *headerUSTAR) devMajor() []byte { return h[329:][:8] }
|
||||
func (h *headerUSTAR) devMinor() []byte { return h[337:][:8] }
|
||||
func (h *headerUSTAR) prefix() []byte { return h[345:][:155] }
|
||||
func (h *headerUSTAR) V7() *headerV7 { return (*headerV7)(h) }
|
||||
func (h *headerUSTAR) Magic() []byte { return h[257:][:6] }
|
||||
func (h *headerUSTAR) Version() []byte { return h[263:][:2] }
|
||||
func (h *headerUSTAR) UserName() []byte { return h[265:][:32] }
|
||||
func (h *headerUSTAR) GroupName() []byte { return h[297:][:32] }
|
||||
func (h *headerUSTAR) DevMajor() []byte { return h[329:][:8] }
|
||||
func (h *headerUSTAR) DevMinor() []byte { return h[337:][:8] }
|
||||
func (h *headerUSTAR) Prefix() []byte { return h[345:][:155] }
|
||||
|
||||
type sparseArray []byte
|
||||
|
||||
func (s sparseArray) entry(i int) sparseElem { return sparseElem(s[i*24:]) }
|
||||
func (s sparseArray) isExtended() []byte { return s[24*s.maxEntries():][:1] }
|
||||
func (s sparseArray) maxEntries() int { return len(s) / 24 }
|
||||
func (s sparseArray) Entry(i int) sparseElem { return sparseElem(s[i*24:]) }
|
||||
func (s sparseArray) IsExtended() []byte { return s[24*s.MaxEntries():][:1] }
|
||||
func (s sparseArray) MaxEntries() int { return len(s) / 24 }
|
||||
|
||||
type sparseElem []byte
|
||||
|
||||
func (s sparseElem) offset() []byte { return s[00:][:12] }
|
||||
func (s sparseElem) length() []byte { return s[12:][:12] }
|
||||
func (s sparseElem) Offset() []byte { return s[00:][:12] }
|
||||
func (s sparseElem) Length() []byte { return s[12:][:12] }
|
||||
|
||||
@@ -65,7 +65,7 @@ func (tr *Reader) next() (*Header, error) {
|
||||
format := FormatUSTAR | FormatPAX | FormatGNU
|
||||
for {
|
||||
// Discard the remainder of the file and any padding.
|
||||
if err := discard(tr.r, tr.curr.physicalRemaining()); err != nil {
|
||||
if err := discard(tr.r, tr.curr.PhysicalRemaining()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := tryReadFull(tr.r, tr.blk[:tr.pad]); err != nil {
|
||||
@@ -355,7 +355,7 @@ func (tr *Reader) readHeader() (*Header, *block, error) {
|
||||
}
|
||||
|
||||
// Verify the header matches a known format.
|
||||
format := tr.blk.getFormat()
|
||||
format := tr.blk.GetFormat()
|
||||
if format == FormatUnknown {
|
||||
return nil, nil, ErrHeader
|
||||
}
|
||||
@@ -364,30 +364,30 @@ func (tr *Reader) readHeader() (*Header, *block, error) {
|
||||
hdr := new(Header)
|
||||
|
||||
// Unpack the V7 header.
|
||||
v7 := tr.blk.toV7()
|
||||
hdr.Typeflag = v7.typeFlag()[0]
|
||||
hdr.Name = p.parseString(v7.name())
|
||||
hdr.Linkname = p.parseString(v7.linkName())
|
||||
hdr.Size = p.parseNumeric(v7.size())
|
||||
hdr.Mode = p.parseNumeric(v7.mode())
|
||||
hdr.Uid = int(p.parseNumeric(v7.uid()))
|
||||
hdr.Gid = int(p.parseNumeric(v7.gid()))
|
||||
hdr.ModTime = time.Unix(p.parseNumeric(v7.modTime()), 0)
|
||||
v7 := tr.blk.V7()
|
||||
hdr.Typeflag = v7.TypeFlag()[0]
|
||||
hdr.Name = p.parseString(v7.Name())
|
||||
hdr.Linkname = p.parseString(v7.LinkName())
|
||||
hdr.Size = p.parseNumeric(v7.Size())
|
||||
hdr.Mode = p.parseNumeric(v7.Mode())
|
||||
hdr.Uid = int(p.parseNumeric(v7.UID()))
|
||||
hdr.Gid = int(p.parseNumeric(v7.GID()))
|
||||
hdr.ModTime = time.Unix(p.parseNumeric(v7.ModTime()), 0)
|
||||
|
||||
// Unpack format specific fields.
|
||||
if format > formatV7 {
|
||||
ustar := tr.blk.toUSTAR()
|
||||
hdr.Uname = p.parseString(ustar.userName())
|
||||
hdr.Gname = p.parseString(ustar.groupName())
|
||||
hdr.Devmajor = p.parseNumeric(ustar.devMajor())
|
||||
hdr.Devminor = p.parseNumeric(ustar.devMinor())
|
||||
ustar := tr.blk.USTAR()
|
||||
hdr.Uname = p.parseString(ustar.UserName())
|
||||
hdr.Gname = p.parseString(ustar.GroupName())
|
||||
hdr.Devmajor = p.parseNumeric(ustar.DevMajor())
|
||||
hdr.Devminor = p.parseNumeric(ustar.DevMinor())
|
||||
|
||||
var prefix string
|
||||
switch {
|
||||
case format.has(FormatUSTAR | FormatPAX):
|
||||
hdr.Format = format
|
||||
ustar := tr.blk.toUSTAR()
|
||||
prefix = p.parseString(ustar.prefix())
|
||||
ustar := tr.blk.USTAR()
|
||||
prefix = p.parseString(ustar.Prefix())
|
||||
|
||||
// For Format detection, check if block is properly formatted since
|
||||
// the parser is more liberal than what USTAR actually permits.
|
||||
@@ -396,23 +396,23 @@ func (tr *Reader) readHeader() (*Header, *block, error) {
|
||||
hdr.Format = FormatUnknown // Non-ASCII characters in block.
|
||||
}
|
||||
nul := func(b []byte) bool { return int(b[len(b)-1]) == 0 }
|
||||
if !(nul(v7.size()) && nul(v7.mode()) && nul(v7.uid()) && nul(v7.gid()) &&
|
||||
nul(v7.modTime()) && nul(ustar.devMajor()) && nul(ustar.devMinor())) {
|
||||
if !(nul(v7.Size()) && nul(v7.Mode()) && nul(v7.UID()) && nul(v7.GID()) &&
|
||||
nul(v7.ModTime()) && nul(ustar.DevMajor()) && nul(ustar.DevMinor())) {
|
||||
hdr.Format = FormatUnknown // Numeric fields must end in NUL
|
||||
}
|
||||
case format.has(formatSTAR):
|
||||
star := tr.blk.toSTAR()
|
||||
prefix = p.parseString(star.prefix())
|
||||
hdr.AccessTime = time.Unix(p.parseNumeric(star.accessTime()), 0)
|
||||
hdr.ChangeTime = time.Unix(p.parseNumeric(star.changeTime()), 0)
|
||||
star := tr.blk.STAR()
|
||||
prefix = p.parseString(star.Prefix())
|
||||
hdr.AccessTime = time.Unix(p.parseNumeric(star.AccessTime()), 0)
|
||||
hdr.ChangeTime = time.Unix(p.parseNumeric(star.ChangeTime()), 0)
|
||||
case format.has(FormatGNU):
|
||||
hdr.Format = format
|
||||
var p2 parser
|
||||
gnu := tr.blk.toGNU()
|
||||
if b := gnu.accessTime(); b[0] != 0 {
|
||||
gnu := tr.blk.GNU()
|
||||
if b := gnu.AccessTime(); b[0] != 0 {
|
||||
hdr.AccessTime = time.Unix(p2.parseNumeric(b), 0)
|
||||
}
|
||||
if b := gnu.changeTime(); b[0] != 0 {
|
||||
if b := gnu.ChangeTime(); b[0] != 0 {
|
||||
hdr.ChangeTime = time.Unix(p2.parseNumeric(b), 0)
|
||||
}
|
||||
|
||||
@@ -439,8 +439,8 @@ func (tr *Reader) readHeader() (*Header, *block, error) {
|
||||
// See https://golang.org/issues/21005
|
||||
if p2.err != nil {
|
||||
hdr.AccessTime, hdr.ChangeTime = time.Time{}, time.Time{}
|
||||
ustar := tr.blk.toUSTAR()
|
||||
if s := p.parseString(ustar.prefix()); isASCII(s) {
|
||||
ustar := tr.blk.USTAR()
|
||||
if s := p.parseString(ustar.Prefix()); isASCII(s) {
|
||||
prefix = s
|
||||
}
|
||||
hdr.Format = FormatUnknown // Buggy file is not GNU
|
||||
@@ -465,38 +465,38 @@ func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) (sparseDatas, err
|
||||
// Make sure that the input format is GNU.
|
||||
// Unfortunately, the STAR format also has a sparse header format that uses
|
||||
// the same type flag but has a completely different layout.
|
||||
if blk.getFormat() != FormatGNU {
|
||||
if blk.GetFormat() != FormatGNU {
|
||||
return nil, ErrHeader
|
||||
}
|
||||
hdr.Format.mayOnlyBe(FormatGNU)
|
||||
|
||||
var p parser
|
||||
hdr.Size = p.parseNumeric(blk.toGNU().realSize())
|
||||
hdr.Size = p.parseNumeric(blk.GNU().RealSize())
|
||||
if p.err != nil {
|
||||
return nil, p.err
|
||||
}
|
||||
s := blk.toGNU().sparse()
|
||||
spd := make(sparseDatas, 0, s.maxEntries())
|
||||
s := blk.GNU().Sparse()
|
||||
spd := make(sparseDatas, 0, s.MaxEntries())
|
||||
for {
|
||||
for i := 0; i < s.maxEntries(); i++ {
|
||||
for i := 0; i < s.MaxEntries(); i++ {
|
||||
// This termination condition is identical to GNU and BSD tar.
|
||||
if s.entry(i).offset()[0] == 0x00 {
|
||||
if s.Entry(i).Offset()[0] == 0x00 {
|
||||
break // Don't return, need to process extended headers (even if empty)
|
||||
}
|
||||
offset := p.parseNumeric(s.entry(i).offset())
|
||||
length := p.parseNumeric(s.entry(i).length())
|
||||
offset := p.parseNumeric(s.Entry(i).Offset())
|
||||
length := p.parseNumeric(s.Entry(i).Length())
|
||||
if p.err != nil {
|
||||
return nil, p.err
|
||||
}
|
||||
spd = append(spd, sparseEntry{Offset: offset, Length: length})
|
||||
}
|
||||
|
||||
if s.isExtended()[0] > 0 {
|
||||
if s.IsExtended()[0] > 0 {
|
||||
// There are more entries. Read an extension header and parse its entries.
|
||||
if _, err := mustReadFull(tr.r, blk[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s = blk.toSparse()
|
||||
s = blk.Sparse()
|
||||
continue
|
||||
}
|
||||
return spd, nil // Done
|
||||
@@ -678,13 +678,11 @@ func (fr *regFileReader) WriteTo(w io.Writer) (int64, error) {
|
||||
return io.Copy(w, struct{ io.Reader }{fr})
|
||||
}
|
||||
|
||||
// logicalRemaining implements fileState.logicalRemaining.
|
||||
func (fr regFileReader) logicalRemaining() int64 {
|
||||
func (fr regFileReader) LogicalRemaining() int64 {
|
||||
return fr.nb
|
||||
}
|
||||
|
||||
// logicalRemaining implements fileState.physicalRemaining.
|
||||
func (fr regFileReader) physicalRemaining() int64 {
|
||||
func (fr regFileReader) PhysicalRemaining() int64 {
|
||||
return fr.nb
|
||||
}
|
||||
|
||||
@@ -696,9 +694,9 @@ type sparseFileReader struct {
|
||||
}
|
||||
|
||||
func (sr *sparseFileReader) Read(b []byte) (n int, err error) {
|
||||
finished := int64(len(b)) >= sr.logicalRemaining()
|
||||
finished := int64(len(b)) >= sr.LogicalRemaining()
|
||||
if finished {
|
||||
b = b[:sr.logicalRemaining()]
|
||||
b = b[:sr.LogicalRemaining()]
|
||||
}
|
||||
|
||||
b0 := b
|
||||
@@ -726,7 +724,7 @@ func (sr *sparseFileReader) Read(b []byte) (n int, err error) {
|
||||
return n, errMissData // Less data in dense file than sparse file
|
||||
case err != nil:
|
||||
return n, err
|
||||
case sr.logicalRemaining() == 0 && sr.physicalRemaining() > 0:
|
||||
case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0:
|
||||
return n, errUnrefData // More data in dense file than sparse file
|
||||
case finished:
|
||||
return n, io.EOF
|
||||
@@ -748,7 +746,7 @@ func (sr *sparseFileReader) WriteTo(w io.Writer) (n int64, err error) {
|
||||
|
||||
var writeLastByte bool
|
||||
pos0 := sr.pos
|
||||
for sr.logicalRemaining() > 0 && !writeLastByte && err == nil {
|
||||
for sr.LogicalRemaining() > 0 && !writeLastByte && err == nil {
|
||||
var nf int64 // Size of fragment
|
||||
holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset()
|
||||
if sr.pos < holeStart { // In a data fragment
|
||||
@@ -756,7 +754,7 @@ func (sr *sparseFileReader) WriteTo(w io.Writer) (n int64, err error) {
|
||||
nf, err = io.CopyN(ws, sr.fr, nf)
|
||||
} else { // In a hole fragment
|
||||
nf = holeEnd - sr.pos
|
||||
if sr.physicalRemaining() == 0 {
|
||||
if sr.PhysicalRemaining() == 0 {
|
||||
writeLastByte = true
|
||||
nf--
|
||||
}
|
||||
@@ -781,18 +779,18 @@ func (sr *sparseFileReader) WriteTo(w io.Writer) (n int64, err error) {
|
||||
return n, errMissData // Less data in dense file than sparse file
|
||||
case err != nil:
|
||||
return n, err
|
||||
case sr.logicalRemaining() == 0 && sr.physicalRemaining() > 0:
|
||||
case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0:
|
||||
return n, errUnrefData // More data in dense file than sparse file
|
||||
default:
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (sr sparseFileReader) logicalRemaining() int64 {
|
||||
func (sr sparseFileReader) LogicalRemaining() int64 {
|
||||
return sr.sp[len(sr.sp)-1].endOffset() - sr.pos
|
||||
}
|
||||
func (sr sparseFileReader) physicalRemaining() int64 {
|
||||
return sr.fr.physicalRemaining()
|
||||
func (sr sparseFileReader) PhysicalRemaining() int64 {
|
||||
return sr.fr.PhysicalRemaining()
|
||||
}
|
||||
|
||||
type zeroReader struct{}
|
||||
|
||||
@@ -1021,12 +1021,12 @@ func TestParsePAX(t *testing.T) {
|
||||
|
||||
func TestReadOldGNUSparseMap(t *testing.T) {
|
||||
populateSparseMap := func(sa sparseArray, sps []string) []string {
|
||||
for i := 0; len(sps) > 0 && i < sa.maxEntries(); i++ {
|
||||
copy(sa.entry(i), sps[0])
|
||||
for i := 0; len(sps) > 0 && i < sa.MaxEntries(); i++ {
|
||||
copy(sa.Entry(i), sps[0])
|
||||
sps = sps[1:]
|
||||
}
|
||||
if len(sps) > 0 {
|
||||
copy(sa.isExtended(), "\x80")
|
||||
copy(sa.IsExtended(), "\x80")
|
||||
}
|
||||
return sps
|
||||
}
|
||||
@@ -1034,19 +1034,19 @@ func TestReadOldGNUSparseMap(t *testing.T) {
|
||||
makeInput := func(format Format, size string, sps ...string) (out []byte) {
|
||||
// Write the initial GNU header.
|
||||
var blk block
|
||||
gnu := blk.toGNU()
|
||||
sparse := gnu.sparse()
|
||||
copy(gnu.realSize(), size)
|
||||
gnu := blk.GNU()
|
||||
sparse := gnu.Sparse()
|
||||
copy(gnu.RealSize(), size)
|
||||
sps = populateSparseMap(sparse, sps)
|
||||
if format != FormatUnknown {
|
||||
blk.setFormat(format)
|
||||
blk.SetFormat(format)
|
||||
}
|
||||
out = append(out, blk[:]...)
|
||||
|
||||
// Write extended sparse blocks.
|
||||
for len(sps) > 0 {
|
||||
var blk block
|
||||
sps = populateSparseMap(blk.toSparse(), sps)
|
||||
sps = populateSparseMap(blk.Sparse(), sps)
|
||||
out = append(out, blk[:]...)
|
||||
}
|
||||
return out
|
||||
@@ -1359,7 +1359,7 @@ func TestFileReader(t *testing.T) {
|
||||
wantCnt int64
|
||||
wantErr error
|
||||
}
|
||||
testRemaining struct { // logicalRemaining() == wantLCnt, physicalRemaining() == wantPCnt
|
||||
testRemaining struct { // LogicalRemaining() == wantLCnt, PhysicalRemaining() == wantPCnt
|
||||
wantLCnt int64
|
||||
wantPCnt int64
|
||||
}
|
||||
@@ -1596,11 +1596,11 @@ func TestFileReader(t *testing.T) {
|
||||
t.Errorf("test %d.%d, expected %d more operations", i, j, len(f.ops))
|
||||
}
|
||||
case testRemaining:
|
||||
if got := fr.logicalRemaining(); got != tf.wantLCnt {
|
||||
t.Errorf("test %d.%d, logicalRemaining() = %d, want %d", i, j, got, tf.wantLCnt)
|
||||
if got := fr.LogicalRemaining(); got != tf.wantLCnt {
|
||||
t.Errorf("test %d.%d, LogicalRemaining() = %d, want %d", i, j, got, tf.wantLCnt)
|
||||
}
|
||||
if got := fr.physicalRemaining(); got != tf.wantPCnt {
|
||||
t.Errorf("test %d.%d, physicalRemaining() = %d, want %d", i, j, got, tf.wantPCnt)
|
||||
if got := fr.PhysicalRemaining(); got != tf.wantPCnt {
|
||||
t.Errorf("test %d.%d, PhysicalRemaining() = %d, want %d", i, j, got, tf.wantPCnt)
|
||||
}
|
||||
default:
|
||||
t.Fatalf("test %d.%d, unknown test operation: %T", i, j, tf)
|
||||
|
||||
@@ -50,7 +50,7 @@ func (tw *Writer) Flush() error {
|
||||
if tw.err != nil {
|
||||
return tw.err
|
||||
}
|
||||
if nb := tw.curr.logicalRemaining(); nb > 0 {
|
||||
if nb := tw.curr.LogicalRemaining(); nb > 0 {
|
||||
return fmt.Errorf("archive/tar: missed writing %d bytes", nb)
|
||||
}
|
||||
if _, tw.err = tw.w.Write(zeroBlock[:tw.pad]); tw.err != nil {
|
||||
@@ -117,8 +117,8 @@ func (tw *Writer) writeUSTARHeader(hdr *Header) error {
|
||||
// Pack the main header.
|
||||
var f formatter
|
||||
blk := tw.templateV7Plus(hdr, f.formatString, f.formatOctal)
|
||||
f.formatString(blk.toUSTAR().prefix(), namePrefix)
|
||||
blk.setFormat(FormatUSTAR)
|
||||
f.formatString(blk.USTAR().Prefix(), namePrefix)
|
||||
blk.SetFormat(FormatUSTAR)
|
||||
if f.err != nil {
|
||||
return f.err // Should never happen since header is validated
|
||||
}
|
||||
@@ -208,7 +208,7 @@ func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error {
|
||||
var f formatter // Ignore errors since they are expected
|
||||
fmtStr := func(b []byte, s string) { f.formatString(b, toASCII(s)) }
|
||||
blk := tw.templateV7Plus(hdr, fmtStr, f.formatOctal)
|
||||
blk.setFormat(FormatPAX)
|
||||
blk.SetFormat(FormatPAX)
|
||||
if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -250,10 +250,10 @@ func (tw *Writer) writeGNUHeader(hdr *Header) error {
|
||||
var spb []byte
|
||||
blk := tw.templateV7Plus(hdr, f.formatString, f.formatNumeric)
|
||||
if !hdr.AccessTime.IsZero() {
|
||||
f.formatNumeric(blk.toGNU().accessTime(), hdr.AccessTime.Unix())
|
||||
f.formatNumeric(blk.GNU().AccessTime(), hdr.AccessTime.Unix())
|
||||
}
|
||||
if !hdr.ChangeTime.IsZero() {
|
||||
f.formatNumeric(blk.toGNU().changeTime(), hdr.ChangeTime.Unix())
|
||||
f.formatNumeric(blk.GNU().ChangeTime(), hdr.ChangeTime.Unix())
|
||||
}
|
||||
// TODO(dsnet): Re-enable this when adding sparse support.
|
||||
// See https://golang.org/issue/22735
|
||||
@@ -293,7 +293,7 @@ func (tw *Writer) writeGNUHeader(hdr *Header) error {
|
||||
f.formatNumeric(blk.GNU().RealSize(), realSize)
|
||||
}
|
||||
*/
|
||||
blk.setFormat(FormatGNU)
|
||||
blk.SetFormat(FormatGNU)
|
||||
if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -321,28 +321,28 @@ type (
|
||||
// The block returned is only valid until the next call to
|
||||
// templateV7Plus or writeRawFile.
|
||||
func (tw *Writer) templateV7Plus(hdr *Header, fmtStr stringFormatter, fmtNum numberFormatter) *block {
|
||||
tw.blk.reset()
|
||||
tw.blk.Reset()
|
||||
|
||||
modTime := hdr.ModTime
|
||||
if modTime.IsZero() {
|
||||
modTime = time.Unix(0, 0)
|
||||
}
|
||||
|
||||
v7 := tw.blk.toV7()
|
||||
v7.typeFlag()[0] = hdr.Typeflag
|
||||
fmtStr(v7.name(), hdr.Name)
|
||||
fmtStr(v7.linkName(), hdr.Linkname)
|
||||
fmtNum(v7.mode(), hdr.Mode)
|
||||
fmtNum(v7.uid(), int64(hdr.Uid))
|
||||
fmtNum(v7.gid(), int64(hdr.Gid))
|
||||
fmtNum(v7.size(), hdr.Size)
|
||||
fmtNum(v7.modTime(), modTime.Unix())
|
||||
v7 := tw.blk.V7()
|
||||
v7.TypeFlag()[0] = hdr.Typeflag
|
||||
fmtStr(v7.Name(), hdr.Name)
|
||||
fmtStr(v7.LinkName(), hdr.Linkname)
|
||||
fmtNum(v7.Mode(), hdr.Mode)
|
||||
fmtNum(v7.UID(), int64(hdr.Uid))
|
||||
fmtNum(v7.GID(), int64(hdr.Gid))
|
||||
fmtNum(v7.Size(), hdr.Size)
|
||||
fmtNum(v7.ModTime(), modTime.Unix())
|
||||
|
||||
ustar := tw.blk.toUSTAR()
|
||||
fmtStr(ustar.userName(), hdr.Uname)
|
||||
fmtStr(ustar.groupName(), hdr.Gname)
|
||||
fmtNum(ustar.devMajor(), hdr.Devmajor)
|
||||
fmtNum(ustar.devMinor(), hdr.Devminor)
|
||||
ustar := tw.blk.USTAR()
|
||||
fmtStr(ustar.UserName(), hdr.Uname)
|
||||
fmtStr(ustar.GroupName(), hdr.Gname)
|
||||
fmtNum(ustar.DevMajor(), hdr.Devmajor)
|
||||
fmtNum(ustar.DevMinor(), hdr.Devminor)
|
||||
|
||||
return &tw.blk
|
||||
}
|
||||
@@ -351,7 +351,7 @@ func (tw *Writer) templateV7Plus(hdr *Header, fmtStr stringFormatter, fmtNum num
|
||||
// It uses format to encode the header format and will write data as the body.
|
||||
// It uses default values for all of the other fields (as BSD and GNU tar does).
|
||||
func (tw *Writer) writeRawFile(name, data string, flag byte, format Format) error {
|
||||
tw.blk.reset()
|
||||
tw.blk.Reset()
|
||||
|
||||
// Best effort for the filename.
|
||||
name = toASCII(name)
|
||||
@@ -361,15 +361,15 @@ func (tw *Writer) writeRawFile(name, data string, flag byte, format Format) erro
|
||||
name = strings.TrimRight(name, "/")
|
||||
|
||||
var f formatter
|
||||
v7 := tw.blk.toV7()
|
||||
v7.typeFlag()[0] = flag
|
||||
f.formatString(v7.name(), name)
|
||||
f.formatOctal(v7.mode(), 0)
|
||||
f.formatOctal(v7.uid(), 0)
|
||||
f.formatOctal(v7.gid(), 0)
|
||||
f.formatOctal(v7.size(), int64(len(data))) // Must be < 8GiB
|
||||
f.formatOctal(v7.modTime(), 0)
|
||||
tw.blk.setFormat(format)
|
||||
v7 := tw.blk.V7()
|
||||
v7.TypeFlag()[0] = flag
|
||||
f.formatString(v7.Name(), name)
|
||||
f.formatOctal(v7.Mode(), 0)
|
||||
f.formatOctal(v7.UID(), 0)
|
||||
f.formatOctal(v7.GID(), 0)
|
||||
f.formatOctal(v7.Size(), int64(len(data))) // Must be < 8GiB
|
||||
f.formatOctal(v7.ModTime(), 0)
|
||||
tw.blk.SetFormat(format)
|
||||
if f.err != nil {
|
||||
return f.err // Only occurs if size condition is violated
|
||||
}
|
||||
@@ -511,13 +511,10 @@ func (fw *regFileWriter) ReadFrom(r io.Reader) (int64, error) {
|
||||
return io.Copy(struct{ io.Writer }{fw}, r)
|
||||
}
|
||||
|
||||
// logicalRemaining implements fileState.logicalRemaining.
|
||||
func (fw regFileWriter) logicalRemaining() int64 {
|
||||
func (fw regFileWriter) LogicalRemaining() int64 {
|
||||
return fw.nb
|
||||
}
|
||||
|
||||
// logicalRemaining implements fileState.physicalRemaining.
|
||||
func (fw regFileWriter) physicalRemaining() int64 {
|
||||
func (fw regFileWriter) PhysicalRemaining() int64 {
|
||||
return fw.nb
|
||||
}
|
||||
|
||||
@@ -529,9 +526,9 @@ type sparseFileWriter struct {
|
||||
}
|
||||
|
||||
func (sw *sparseFileWriter) Write(b []byte) (n int, err error) {
|
||||
overwrite := int64(len(b)) > sw.logicalRemaining()
|
||||
overwrite := int64(len(b)) > sw.LogicalRemaining()
|
||||
if overwrite {
|
||||
b = b[:sw.logicalRemaining()]
|
||||
b = b[:sw.LogicalRemaining()]
|
||||
}
|
||||
|
||||
b0 := b
|
||||
@@ -559,7 +556,7 @@ func (sw *sparseFileWriter) Write(b []byte) (n int, err error) {
|
||||
return n, errMissData // Not possible; implies bug in validation logic
|
||||
case err != nil:
|
||||
return n, err
|
||||
case sw.logicalRemaining() == 0 && sw.physicalRemaining() > 0:
|
||||
case sw.LogicalRemaining() == 0 && sw.PhysicalRemaining() > 0:
|
||||
return n, errUnrefData // Not possible; implies bug in validation logic
|
||||
case overwrite:
|
||||
return n, ErrWriteTooLong
|
||||
@@ -581,12 +578,12 @@ func (sw *sparseFileWriter) ReadFrom(r io.Reader) (n int64, err error) {
|
||||
|
||||
var readLastByte bool
|
||||
pos0 := sw.pos
|
||||
for sw.logicalRemaining() > 0 && !readLastByte && err == nil {
|
||||
for sw.LogicalRemaining() > 0 && !readLastByte && err == nil {
|
||||
var nf int64 // Size of fragment
|
||||
dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset()
|
||||
if sw.pos < dataStart { // In a hole fragment
|
||||
nf = dataStart - sw.pos
|
||||
if sw.physicalRemaining() == 0 {
|
||||
if sw.PhysicalRemaining() == 0 {
|
||||
readLastByte = true
|
||||
nf--
|
||||
}
|
||||
@@ -616,18 +613,18 @@ func (sw *sparseFileWriter) ReadFrom(r io.Reader) (n int64, err error) {
|
||||
return n, errMissData // Not possible; implies bug in validation logic
|
||||
case err != nil:
|
||||
return n, err
|
||||
case sw.logicalRemaining() == 0 && sw.physicalRemaining() > 0:
|
||||
case sw.LogicalRemaining() == 0 && sw.PhysicalRemaining() > 0:
|
||||
return n, errUnrefData // Not possible; implies bug in validation logic
|
||||
default:
|
||||
return n, ensureEOF(rs)
|
||||
}
|
||||
}
|
||||
|
||||
func (sw sparseFileWriter) logicalRemaining() int64 {
|
||||
func (sw sparseFileWriter) LogicalRemaining() int64 {
|
||||
return sw.sp[len(sw.sp)-1].endOffset() - sw.pos
|
||||
}
|
||||
func (sw sparseFileWriter) physicalRemaining() int64 {
|
||||
return sw.fw.physicalRemaining()
|
||||
func (sw sparseFileWriter) PhysicalRemaining() int64 {
|
||||
return sw.fw.PhysicalRemaining()
|
||||
}
|
||||
|
||||
// zeroWriter may only be written with NULs, otherwise it returns errWriteHole.
|
||||
|
||||
@@ -987,11 +987,11 @@ func TestIssue12594(t *testing.T) {
|
||||
// The prefix field should never appear in the GNU format.
|
||||
var blk block
|
||||
copy(blk[:], b.Bytes())
|
||||
prefix := string(blk.toUSTAR().prefix())
|
||||
prefix := string(blk.USTAR().Prefix())
|
||||
if i := strings.IndexByte(prefix, 0); i >= 0 {
|
||||
prefix = prefix[:i] // Truncate at the NUL terminator
|
||||
}
|
||||
if blk.getFormat() == FormatGNU && len(prefix) > 0 && strings.HasPrefix(name, prefix) {
|
||||
if blk.GetFormat() == FormatGNU && len(prefix) > 0 && strings.HasPrefix(name, prefix) {
|
||||
t.Errorf("test %d, found prefix in GNU format: %s", i, prefix)
|
||||
}
|
||||
|
||||
@@ -1029,7 +1029,7 @@ func TestFileWriter(t *testing.T) {
|
||||
wantCnt int64
|
||||
wantErr error
|
||||
}
|
||||
testRemaining struct { // logicalRemaining() == wantLCnt, physicalRemaining() == wantPCnt
|
||||
testRemaining struct { // LogicalRemaining() == wantLCnt, PhysicalRemaining() == wantPCnt
|
||||
wantLCnt int64
|
||||
wantPCnt int64
|
||||
}
|
||||
@@ -1292,11 +1292,11 @@ func TestFileWriter(t *testing.T) {
|
||||
t.Errorf("test %d.%d, expected %d more operations", i, j, len(f.ops))
|
||||
}
|
||||
case testRemaining:
|
||||
if got := fw.logicalRemaining(); got != tf.wantLCnt {
|
||||
t.Errorf("test %d.%d, logicalRemaining() = %d, want %d", i, j, got, tf.wantLCnt)
|
||||
if got := fw.LogicalRemaining(); got != tf.wantLCnt {
|
||||
t.Errorf("test %d.%d, LogicalRemaining() = %d, want %d", i, j, got, tf.wantLCnt)
|
||||
}
|
||||
if got := fw.physicalRemaining(); got != tf.wantPCnt {
|
||||
t.Errorf("test %d.%d, physicalRemaining() = %d, want %d", i, j, got, tf.wantPCnt)
|
||||
if got := fw.PhysicalRemaining(); got != tf.wantPCnt {
|
||||
t.Errorf("test %d.%d, PhysicalRemaining() = %d, want %d", i, j, got, tf.wantPCnt)
|
||||
}
|
||||
default:
|
||||
t.Fatalf("test %d.%d, unknown test operation: %T", i, j, tf)
|
||||
|
||||
@@ -741,6 +741,9 @@ func (r *Reader) initFileList() {
|
||||
for _, file := range r.File {
|
||||
isDir := len(file.Name) > 0 && file.Name[len(file.Name)-1] == '/'
|
||||
name := toValidName(file.Name)
|
||||
if name == "" {
|
||||
continue
|
||||
}
|
||||
for dir := path.Dir(name); dir != "."; dir = path.Dir(dir) {
|
||||
dirs[dir] = true
|
||||
}
|
||||
@@ -782,8 +785,11 @@ func fileEntryLess(x, y string) bool {
|
||||
func (r *Reader) Open(name string) (fs.File, error) {
|
||||
r.initFileList()
|
||||
|
||||
if !fs.ValidPath(name) {
|
||||
return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrInvalid}
|
||||
}
|
||||
e := r.openLookup(name)
|
||||
if e == nil || !fs.ValidPath(name) {
|
||||
if e == nil {
|
||||
return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist}
|
||||
}
|
||||
if e.isDir {
|
||||
@@ -797,7 +803,7 @@ func (r *Reader) Open(name string) (fs.File, error) {
|
||||
}
|
||||
|
||||
func split(name string) (dir, elem string, isDir bool) {
|
||||
if name[len(name)-1] == '/' {
|
||||
if len(name) > 0 && name[len(name)-1] == '/' {
|
||||
isDir = true
|
||||
name = name[:len(name)-1]
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -1202,6 +1203,15 @@ func TestCVE202127919(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("Error reading file: %v", err)
|
||||
}
|
||||
if len(r.File) != 1 {
|
||||
t.Fatalf("No entries in the file list")
|
||||
}
|
||||
if r.File[0].Name != "../test.txt" {
|
||||
t.Errorf("Unexpected entry name: %s", r.File[0].Name)
|
||||
}
|
||||
if _, err := r.File[0].Open(); err != nil {
|
||||
t.Errorf("Error opening file: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadDataDescriptor(t *testing.T) {
|
||||
@@ -1402,3 +1412,121 @@ func TestCVE202139293(t *testing.T) {
|
||||
t.Fatalf("unexpected error, got: %v, want: %v", err, ErrFormat)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCVE202141772(t *testing.T) {
|
||||
// Archive contains a file whose name is exclusively made up of '/', '\'
|
||||
// characters, or "../", "..\" paths, which would previously cause a panic.
|
||||
//
|
||||
// Length Method Size Cmpr Date Time CRC-32 Name
|
||||
// -------- ------ ------- ---- ---------- ----- -------- ----
|
||||
// 0 Stored 0 0% 08-05-2021 18:32 00000000 /
|
||||
// 0 Stored 0 0% 09-14-2021 12:59 00000000 //
|
||||
// 0 Stored 0 0% 09-14-2021 12:59 00000000 \
|
||||
// 11 Stored 11 0% 09-14-2021 13:04 0d4a1185 /test.txt
|
||||
// -------- ------- --- -------
|
||||
// 11 11 0% 4 files
|
||||
data := []byte{
|
||||
0x50, 0x4b, 0x03, 0x04, 0x0a, 0x00, 0x00, 0x08,
|
||||
0x00, 0x00, 0x06, 0x94, 0x05, 0x53, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x2f, 0x50,
|
||||
0x4b, 0x03, 0x04, 0x0a, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x78, 0x67, 0x2e, 0x53, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x02, 0x00, 0x00, 0x00, 0x2f, 0x2f, 0x50,
|
||||
0x4b, 0x03, 0x04, 0x0a, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x78, 0x67, 0x2e, 0x53, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x01, 0x00, 0x00, 0x00, 0x5c, 0x50, 0x4b,
|
||||
0x03, 0x04, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x91, 0x68, 0x2e, 0x53, 0x85, 0x11, 0x4a, 0x0d,
|
||||
0x0b, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00,
|
||||
0x09, 0x00, 0x00, 0x00, 0x2f, 0x74, 0x65, 0x73,
|
||||
0x74, 0x2e, 0x74, 0x78, 0x74, 0x68, 0x65, 0x6c,
|
||||
0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64,
|
||||
0x50, 0x4b, 0x01, 0x02, 0x14, 0x03, 0x0a, 0x00,
|
||||
0x00, 0x08, 0x00, 0x00, 0x06, 0x94, 0x05, 0x53,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00,
|
||||
0xed, 0x41, 0x00, 0x00, 0x00, 0x00, 0x2f, 0x50,
|
||||
0x4b, 0x01, 0x02, 0x3f, 0x00, 0x0a, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x78, 0x67, 0x2e, 0x53, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x02, 0x00, 0x24, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00,
|
||||
0x00, 0x1f, 0x00, 0x00, 0x00, 0x2f, 0x2f, 0x0a,
|
||||
0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x18, 0x00, 0x93, 0x98, 0x25, 0x57, 0x25,
|
||||
0xa9, 0xd7, 0x01, 0x93, 0x98, 0x25, 0x57, 0x25,
|
||||
0xa9, 0xd7, 0x01, 0x93, 0x98, 0x25, 0x57, 0x25,
|
||||
0xa9, 0xd7, 0x01, 0x50, 0x4b, 0x01, 0x02, 0x3f,
|
||||
0x00, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78,
|
||||
0x67, 0x2e, 0x53, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x20, 0x00, 0x00, 0x00, 0x3f, 0x00, 0x00,
|
||||
0x00, 0x5c, 0x0a, 0x00, 0x20, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x01, 0x00, 0x18, 0x00, 0x93, 0x98,
|
||||
0x25, 0x57, 0x25, 0xa9, 0xd7, 0x01, 0x93, 0x98,
|
||||
0x25, 0x57, 0x25, 0xa9, 0xd7, 0x01, 0x93, 0x98,
|
||||
0x25, 0x57, 0x25, 0xa9, 0xd7, 0x01, 0x50, 0x4b,
|
||||
0x01, 0x02, 0x3f, 0x00, 0x0a, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x91, 0x68, 0x2e, 0x53, 0x85, 0x11,
|
||||
0x4a, 0x0d, 0x0b, 0x00, 0x00, 0x00, 0x0b, 0x00,
|
||||
0x00, 0x00, 0x09, 0x00, 0x24, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
|
||||
0x5e, 0x00, 0x00, 0x00, 0x2f, 0x74, 0x65, 0x73,
|
||||
0x74, 0x2e, 0x74, 0x78, 0x74, 0x0a, 0x00, 0x20,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x18,
|
||||
0x00, 0xa9, 0x80, 0x51, 0x01, 0x26, 0xa9, 0xd7,
|
||||
0x01, 0x31, 0xd1, 0x57, 0x01, 0x26, 0xa9, 0xd7,
|
||||
0x01, 0xdf, 0x48, 0x85, 0xf9, 0x25, 0xa9, 0xd7,
|
||||
0x01, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00,
|
||||
0x00, 0x04, 0x00, 0x04, 0x00, 0x31, 0x01, 0x00,
|
||||
0x00, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
}
|
||||
r, err := NewReader(bytes.NewReader([]byte(data)), int64(len(data)))
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading the archive: %v", err)
|
||||
}
|
||||
entryNames := []string{`/`, `//`, `\`, `/test.txt`}
|
||||
var names []string
|
||||
for _, f := range r.File {
|
||||
names = append(names, f.Name)
|
||||
if _, err := f.Open(); err != nil {
|
||||
t.Errorf("Error opening %q: %v", f.Name, err)
|
||||
}
|
||||
if _, err := r.Open(f.Name); err == nil {
|
||||
t.Errorf("Opening %q with fs.FS API succeeded", f.Name)
|
||||
}
|
||||
}
|
||||
if !reflect.DeepEqual(names, entryNames) {
|
||||
t.Errorf("Unexpected file entries: %q", names)
|
||||
}
|
||||
if _, err := r.Open(""); err == nil {
|
||||
t.Errorf("Opening %q with fs.FS API succeeded", "")
|
||||
}
|
||||
if _, err := r.Open("test.txt"); err != nil {
|
||||
t.Errorf("Error opening %q with fs.FS API: %v", "test.txt", err)
|
||||
}
|
||||
dirEntries, err := fs.ReadDir(r, ".")
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading the root directory: %v", err)
|
||||
}
|
||||
if len(dirEntries) != 1 || dirEntries[0].Name() != "test.txt" {
|
||||
t.Errorf("Unexpected directory entries")
|
||||
for _, dirEntry := range dirEntries {
|
||||
_, err := r.Open(dirEntry.Name())
|
||||
t.Logf("%q (Open error: %v)", dirEntry.Name(), err)
|
||||
}
|
||||
t.FailNow()
|
||||
}
|
||||
info, err := dirEntries[0].Info()
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading info entry: %v", err)
|
||||
}
|
||||
if name := info.Name(); name != "test.txt" {
|
||||
t.Errorf("Inconsistent name in info entry: %v", name)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -68,12 +68,7 @@ func (b *Reader) Size() int { return len(b.buf) }
|
||||
|
||||
// Reset discards any buffered data, resets all state, and switches
|
||||
// the buffered reader to read from r.
|
||||
// Calling Reset on the zero value of Reader initializes the internal buffer
|
||||
// to the default size.
|
||||
func (b *Reader) Reset(r io.Reader) {
|
||||
if b.buf == nil {
|
||||
b.buf = make([]byte, defaultBufSize)
|
||||
}
|
||||
b.reset(b.buf, r)
|
||||
}
|
||||
|
||||
@@ -595,12 +590,7 @@ func (b *Writer) Size() int { return len(b.buf) }
|
||||
|
||||
// Reset discards any unflushed buffered data, clears any error, and
|
||||
// resets b to write its output to w.
|
||||
// Calling Reset on the zero value of Writer initializes the internal buffer
|
||||
// to the default size.
|
||||
func (b *Writer) Reset(w io.Writer) {
|
||||
if b.buf == nil {
|
||||
b.buf = make([]byte, defaultBufSize)
|
||||
}
|
||||
b.err = nil
|
||||
b.n = 0
|
||||
b.wr = w
|
||||
@@ -633,14 +623,6 @@ func (b *Writer) Flush() error {
|
||||
// Available returns how many bytes are unused in the buffer.
|
||||
func (b *Writer) Available() int { return len(b.buf) - b.n }
|
||||
|
||||
// AvailableBuffer returns an empty buffer with b.Available() capacity.
|
||||
// This buffer is intended to be appended to and
|
||||
// passed to an immediately succeeding Write call.
|
||||
// The buffer is only valid until the next write operation on b.
|
||||
func (b *Writer) AvailableBuffer() []byte {
|
||||
return b.buf[b.n:][:0]
|
||||
}
|
||||
|
||||
// Buffered returns the number of bytes that have been written into the current buffer.
|
||||
func (b *Writer) Buffered() int { return b.n }
|
||||
|
||||
|
||||
@@ -10,8 +10,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"testing/iotest"
|
||||
@@ -610,37 +608,6 @@ func TestWriter(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriterAppend(t *testing.T) {
|
||||
got := new(bytes.Buffer)
|
||||
var want []byte
|
||||
rn := rand.New(rand.NewSource(0))
|
||||
w := NewWriterSize(got, 64)
|
||||
for i := 0; i < 100; i++ {
|
||||
// Obtain a buffer to append to.
|
||||
b := w.AvailableBuffer()
|
||||
if w.Available() != cap(b) {
|
||||
t.Fatalf("Available() = %v, want %v", w.Available(), cap(b))
|
||||
}
|
||||
|
||||
// While not recommended, it is valid to append to a shifted buffer.
|
||||
// This forces Write to copy the the input.
|
||||
if rn.Intn(8) == 0 && cap(b) > 0 {
|
||||
b = b[1:1:cap(b)]
|
||||
}
|
||||
|
||||
// Append a random integer of varying width.
|
||||
n := int64(rn.Intn(1 << rn.Intn(30)))
|
||||
want = append(strconv.AppendInt(want, n, 10), ' ')
|
||||
b = append(strconv.AppendInt(b, n, 10), ' ')
|
||||
w.Write(b)
|
||||
}
|
||||
w.Flush()
|
||||
|
||||
if !bytes.Equal(got.Bytes(), want) {
|
||||
t.Errorf("output mismatch:\ngot %s\nwant %s", got.Bytes(), want)
|
||||
}
|
||||
}
|
||||
|
||||
// Check that write errors are returned properly.
|
||||
|
||||
type errorWriterTest struct {
|
||||
@@ -1345,7 +1312,6 @@ func TestReaderReset(t *testing.T) {
|
||||
if string(buf) != "foo" {
|
||||
t.Errorf("buf = %q; want foo", buf)
|
||||
}
|
||||
|
||||
r.Reset(strings.NewReader("bar bar"))
|
||||
all, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
@@ -1354,23 +1320,12 @@ func TestReaderReset(t *testing.T) {
|
||||
if string(all) != "bar bar" {
|
||||
t.Errorf("ReadAll = %q; want bar bar", all)
|
||||
}
|
||||
|
||||
*r = Reader{} // zero out the Reader
|
||||
r.Reset(strings.NewReader("bar bar"))
|
||||
all, err = io.ReadAll(r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(all) != "bar bar" {
|
||||
t.Errorf("ReadAll = %q; want bar bar", all)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriterReset(t *testing.T) {
|
||||
var buf1, buf2, buf3 bytes.Buffer
|
||||
var buf1, buf2 bytes.Buffer
|
||||
w := NewWriter(&buf1)
|
||||
w.WriteString("foo")
|
||||
|
||||
w.Reset(&buf2) // and not flushed
|
||||
w.WriteString("bar")
|
||||
w.Flush()
|
||||
@@ -1380,17 +1335,6 @@ func TestWriterReset(t *testing.T) {
|
||||
if buf2.String() != "bar" {
|
||||
t.Errorf("buf2 = %q; want bar", buf2.String())
|
||||
}
|
||||
|
||||
*w = Writer{} // zero out the Writer
|
||||
w.Reset(&buf3) // and not flushed
|
||||
w.WriteString("bar")
|
||||
w.Flush()
|
||||
if buf1.String() != "" {
|
||||
t.Errorf("buf1 = %q; want empty", buf1.String())
|
||||
}
|
||||
if buf3.String() != "bar" {
|
||||
t.Errorf("buf3 = %q; want bar", buf3.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestReaderDiscard(t *testing.T) {
|
||||
|
||||
@@ -20,18 +20,6 @@ func ExampleWriter() {
|
||||
// Output: Hello, world!
|
||||
}
|
||||
|
||||
func ExampleWriter_AvailableBuffer() {
|
||||
w := bufio.NewWriter(os.Stdout)
|
||||
for _, i := range []int64{1, 2, 3, 4} {
|
||||
b := w.AvailableBuffer()
|
||||
b = strconv.AppendInt(b, i, 10)
|
||||
b = append(b, ' ')
|
||||
w.Write(b)
|
||||
}
|
||||
w.Flush()
|
||||
// Output: 1 2 3 4
|
||||
}
|
||||
|
||||
// The simplest use of a Scanner, to read standard input as a set of lines.
|
||||
func ExampleScanner_lines() {
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
|
||||
@@ -888,6 +888,11 @@ func (as *asciiSet) contains(c byte) bool {
|
||||
}
|
||||
|
||||
func makeCutsetFunc(cutset string) func(r rune) bool {
|
||||
if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
|
||||
return func(r rune) bool {
|
||||
return r == rune(cutset[0])
|
||||
}
|
||||
}
|
||||
if as, isASCII := makeASCIISet(cutset); isASCII {
|
||||
return func(r rune) bool {
|
||||
return r < utf8.RuneSelf && as.contains(byte(r))
|
||||
@@ -906,44 +911,21 @@ func makeCutsetFunc(cutset string) func(r rune) bool {
|
||||
// Trim returns a subslice of s by slicing off all leading and
|
||||
// trailing UTF-8-encoded code points contained in cutset.
|
||||
func Trim(s []byte, cutset string) []byte {
|
||||
if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
|
||||
return trimLeftByte(trimRightByte(s, cutset[0]), cutset[0])
|
||||
}
|
||||
return TrimFunc(s, makeCutsetFunc(cutset))
|
||||
}
|
||||
|
||||
// TrimLeft returns a subslice of s by slicing off all leading
|
||||
// UTF-8-encoded code points contained in cutset.
|
||||
func TrimLeft(s []byte, cutset string) []byte {
|
||||
if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
|
||||
return trimLeftByte(s, cutset[0])
|
||||
}
|
||||
return TrimLeftFunc(s, makeCutsetFunc(cutset))
|
||||
}
|
||||
|
||||
func trimLeftByte(s []byte, c byte) []byte {
|
||||
for len(s) > 0 && s[0] == c {
|
||||
s = s[1:]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// TrimRight returns a subslice of s by slicing off all trailing
|
||||
// UTF-8-encoded code points that are contained in cutset.
|
||||
func TrimRight(s []byte, cutset string) []byte {
|
||||
if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
|
||||
return trimRightByte(s, cutset[0])
|
||||
}
|
||||
return TrimRightFunc(s, makeCutsetFunc(cutset))
|
||||
}
|
||||
|
||||
func trimRightByte(s []byte, c byte) []byte {
|
||||
for len(s) > 0 && s[len(s)-1] == c {
|
||||
s = s[:len(s)-1]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// TrimSpace returns a subslice of s by slicing off all leading and
|
||||
// trailing white space, as defined by Unicode.
|
||||
func TrimSpace(s []byte) []byte {
|
||||
|
||||
@@ -1251,9 +1251,7 @@ var trimTests = []TrimTest{
|
||||
{"TrimLeft", "abba", "ab", ""},
|
||||
{"TrimRight", "abba", "ab", ""},
|
||||
{"TrimLeft", "abba", "a", "bba"},
|
||||
{"TrimLeft", "abba", "b", "abba"},
|
||||
{"TrimRight", "abba", "a", "abb"},
|
||||
{"TrimRight", "abba", "b", "abba"},
|
||||
{"Trim", "<tag>", "<>", "tag"},
|
||||
{"Trim", "* listitem", " *", "listitem"},
|
||||
{"Trim", `"quote"`, `"`, "quote"},
|
||||
@@ -1965,13 +1963,6 @@ func BenchmarkTrimASCII(b *testing.B) {
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkTrimByte(b *testing.B) {
|
||||
x := []byte(" the quick brown fox ")
|
||||
for i := 0; i < b.N; i++ {
|
||||
Trim(x, " ")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkIndexPeriodic(b *testing.B) {
|
||||
key := []byte{1, 1}
|
||||
for _, skip := range [...]int{2, 4, 8, 16, 32, 64} {
|
||||
|
||||
@@ -165,21 +165,27 @@ func ARM64RegisterExtension(a *obj.Addr, ext string, reg, num int16, isAmount, i
|
||||
}
|
||||
}
|
||||
if reg <= arm64.REG_R31 && reg >= arm64.REG_R0 {
|
||||
if !isAmount {
|
||||
return errors.New("invalid register extension")
|
||||
}
|
||||
switch ext {
|
||||
case "UXTB":
|
||||
if !isAmount {
|
||||
return errors.New("invalid register extension")
|
||||
}
|
||||
if a.Type == obj.TYPE_MEM {
|
||||
return errors.New("invalid shift for the register offset addressing mode")
|
||||
}
|
||||
a.Reg = arm64.REG_UXTB + Rnum
|
||||
case "UXTH":
|
||||
if !isAmount {
|
||||
return errors.New("invalid register extension")
|
||||
}
|
||||
if a.Type == obj.TYPE_MEM {
|
||||
return errors.New("invalid shift for the register offset addressing mode")
|
||||
}
|
||||
a.Reg = arm64.REG_UXTH + Rnum
|
||||
case "UXTW":
|
||||
if !isAmount {
|
||||
return errors.New("invalid register extension")
|
||||
}
|
||||
// effective address of memory is a base register value and an offset register value.
|
||||
if a.Type == obj.TYPE_MEM {
|
||||
a.Index = arm64.REG_UXTW + Rnum
|
||||
@@ -187,33 +193,48 @@ func ARM64RegisterExtension(a *obj.Addr, ext string, reg, num int16, isAmount, i
|
||||
a.Reg = arm64.REG_UXTW + Rnum
|
||||
}
|
||||
case "UXTX":
|
||||
if !isAmount {
|
||||
return errors.New("invalid register extension")
|
||||
}
|
||||
if a.Type == obj.TYPE_MEM {
|
||||
return errors.New("invalid shift for the register offset addressing mode")
|
||||
}
|
||||
a.Reg = arm64.REG_UXTX + Rnum
|
||||
case "SXTB":
|
||||
if a.Type == obj.TYPE_MEM {
|
||||
return errors.New("invalid shift for the register offset addressing mode")
|
||||
if !isAmount {
|
||||
return errors.New("invalid register extension")
|
||||
}
|
||||
a.Reg = arm64.REG_SXTB + Rnum
|
||||
case "SXTH":
|
||||
if !isAmount {
|
||||
return errors.New("invalid register extension")
|
||||
}
|
||||
if a.Type == obj.TYPE_MEM {
|
||||
return errors.New("invalid shift for the register offset addressing mode")
|
||||
}
|
||||
a.Reg = arm64.REG_SXTH + Rnum
|
||||
case "SXTW":
|
||||
if !isAmount {
|
||||
return errors.New("invalid register extension")
|
||||
}
|
||||
if a.Type == obj.TYPE_MEM {
|
||||
a.Index = arm64.REG_SXTW + Rnum
|
||||
} else {
|
||||
a.Reg = arm64.REG_SXTW + Rnum
|
||||
}
|
||||
case "SXTX":
|
||||
if !isAmount {
|
||||
return errors.New("invalid register extension")
|
||||
}
|
||||
if a.Type == obj.TYPE_MEM {
|
||||
a.Index = arm64.REG_SXTX + Rnum
|
||||
} else {
|
||||
a.Reg = arm64.REG_SXTX + Rnum
|
||||
}
|
||||
case "LSL":
|
||||
if !isAmount {
|
||||
return errors.New("invalid register extension")
|
||||
}
|
||||
a.Index = arm64.REG_LSL + Rnum
|
||||
default:
|
||||
return errors.New("unsupported general register extension type: " + ext)
|
||||
|
||||
@@ -793,13 +793,6 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) {
|
||||
return
|
||||
}
|
||||
}
|
||||
if p.arch.Family == sys.RISCV64 {
|
||||
prog.From = a[0]
|
||||
prog.Reg = p.getRegister(prog, op, &a[1])
|
||||
prog.SetRestArgs([]obj.Addr{a[2]})
|
||||
prog.To = a[3]
|
||||
break
|
||||
}
|
||||
if p.arch.Family == sys.S390X {
|
||||
if a[1].Type != obj.TYPE_REG {
|
||||
p.errorf("second operand must be a register in %s instruction", op)
|
||||
|
||||
14
src/cmd/asm/internal/asm/testdata/arm64.s
vendored
14
src/cmd/asm/internal/asm/testdata/arm64.s
vendored
@@ -334,8 +334,6 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
|
||||
EONW $0x6006000060060, R5 // EONW $1689262177517664, R5 // 1b0c8052db00a072a5003b4a
|
||||
ORNW $0x6006000060060, R5 // ORNW $1689262177517664, R5 // 1b0c8052db00a072a5003b2a
|
||||
BICSW $0x6006000060060, R5 // BICSW $1689262177517664, R5 // 1b0c8052db00a072a5003b6a
|
||||
AND $1, ZR // fb0340b2ff031b8a
|
||||
ANDW $1, ZR // fb030032ff031b0a
|
||||
// TODO: this could have better encoding
|
||||
ANDW $-1, R10 // 1b0080124a011b0a
|
||||
AND $8, R0, RSP // 1f007d92
|
||||
@@ -371,9 +369,9 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
|
||||
MOVD $-1, R1 // 01008092
|
||||
MOVD $0x210000, R0 // MOVD $2162688, R0 // 2004a0d2
|
||||
MOVD $0xffffffffffffaaaa, R1 // MOVD $-21846, R1 // a1aa8a92
|
||||
MOVW $1, ZR // 3f008052
|
||||
MOVW $1, ZR
|
||||
MOVW $1, R1
|
||||
MOVD $1, ZR // 3f0080d2
|
||||
MOVD $1, ZR
|
||||
MOVD $1, R1
|
||||
MOVK $1, R1
|
||||
MOVD $0x1000100010001000, RSP // MOVD $1152939097061330944, RSP // ff8304b2
|
||||
@@ -388,10 +386,10 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
|
||||
VMOVQ $0x8040201008040202, $0x7040201008040201, V20 // VMOVQ $-9205322385119247870, $8088500183983456769, V20
|
||||
|
||||
// mov(to/from sp)
|
||||
MOVD $0x1002(RSP), R1 // MOVD $4098(RSP), R1 // e107409121080091
|
||||
MOVD $0x1708(RSP), RSP // MOVD $5896(RSP), RSP // ff074091ff231c91
|
||||
MOVD $0x2001(R7), R1 // MOVD $8193(R7), R1 // e108409121040091
|
||||
MOVD $0xffffff(R7), R1 // MOVD $16777215(R7), R1 // e1fc7f9121fc3f91
|
||||
MOVD $0x1002(RSP), R1 // MOVD $4098(RSP), R1 // fb074091610b0091
|
||||
MOVD $0x1708(RSP), RSP // MOVD $5896(RSP), RSP // fb0740917f231c91
|
||||
MOVD $0x2001(R7), R1 // MOVD $8193(R7), R1 // fb08409161070091
|
||||
MOVD $0xffffff(R7), R1 // MOVD $16777215(R7), R1 // fbfc7f9161ff3f91
|
||||
MOVD $-0x1(R7), R1 // MOVD $-1(R7), R1 // e10400d1
|
||||
MOVD $-0x30(R7), R1 // MOVD $-48(R7), R1 // e1c000d1
|
||||
MOVD $-0x708(R7), R1 // MOVD $-1800(R7), R1 // e1201cd1
|
||||
|
||||
19
src/cmd/asm/internal/asm/testdata/arm64error.s
vendored
19
src/cmd/asm/internal/asm/testdata/arm64error.s
vendored
@@ -3,7 +3,7 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
TEXT errors(SB),$0
|
||||
AND $1, RSP // ERROR "illegal source register"
|
||||
AND $1, RSP // ERROR "illegal combination"
|
||||
ANDS $1, R0, RSP // ERROR "illegal combination"
|
||||
ADDSW R7->32, R14, R13 // ERROR "shift amount out of range 0 to 31"
|
||||
ADD R1.UXTB<<5, R2, R3 // ERROR "shift amount out of range 0 to 4"
|
||||
@@ -406,12 +406,12 @@ TEXT errors(SB),$0
|
||||
VBIF V0.D2, V1.D2, V2.D2 // ERROR "invalid arrangement"
|
||||
VUADDW V9.B8, V12.H8, V14.B8 // ERROR "invalid arrangement"
|
||||
VUADDW2 V9.B8, V12.S4, V14.S4 // ERROR "operand mismatch"
|
||||
VUMAX V1.D2, V2.D2, V3.D2 // ERROR "invalid arrangement"
|
||||
VUMIN V1.D2, V2.D2, V3.D2 // ERROR "invalid arrangement"
|
||||
VUMAX V1.D2, V2.D2, V3.D2 // ERROR "invalid arrangement"
|
||||
VUMIN V1.D2, V2.D2, V3.D2 // ERROR "invalid arrangement"
|
||||
VUMAX V1.B8, V2.B8, V3.B16 // ERROR "operand mismatch"
|
||||
VUMIN V1.H4, V2.S4, V3.H4 // ERROR "operand mismatch"
|
||||
VSLI $64, V7.D2, V8.D2 // ERROR "shift out of range"
|
||||
VUSRA $0, V7.D2, V8.D2 // ERROR "shift out of range"
|
||||
VUSRA $0, V7.D2, V8.D2 // ERROR "shift out of range"
|
||||
CASPD (R3, R4), (R2), (R8, R9) // ERROR "source register pair must start from even register"
|
||||
CASPD (R2, R3), (R2), (R9, R10) // ERROR "destination register pair must start from even register"
|
||||
CASPD (R2, R4), (R2), (R8, R9) // ERROR "source register pair must be contiguous"
|
||||
@@ -419,15 +419,4 @@ TEXT errors(SB),$0
|
||||
ADD R1>>2, RSP, R3 // ERROR "illegal combination"
|
||||
ADDS R2<<3, R3, RSP // ERROR "unexpected SP reference"
|
||||
CMP R1<<5, RSP // ERROR "the left shift amount out of range 0 to 4"
|
||||
MOVD.P y+8(FP), R1 // ERROR "illegal combination"
|
||||
MOVD.W x-8(SP), R1 // ERROR "illegal combination"
|
||||
LDP.P x+8(FP), (R0, R1) // ERROR "illegal combination"
|
||||
LDP.W x+8(SP), (R0, R1) // ERROR "illegal combination"
|
||||
ADD $0x1234567, R27, R3 // ERROR "cannot use REGTMP as source"
|
||||
ADD $0x3fffffffc000, R27, R5 // ERROR "cannot use REGTMP as source"
|
||||
AND $0x22220000, R27, R4 // ERROR "cannot use REGTMP as source"
|
||||
ANDW $0x6006000060060, R27, R5 // ERROR "cannot use REGTMP as source"
|
||||
STP (R3, R4), 0x1234567(R27) // ERROR "REGTMP used in large offset store"
|
||||
LDP 0x1234567(R27), (R3, R4) // ERROR "REGTMP used in large offset load"
|
||||
STP (R26, R27), 700(R2) // ERROR "cannot use REGTMP as source"
|
||||
RET
|
||||
|
||||
15
src/cmd/asm/internal/asm/testdata/ppc64.s
vendored
15
src/cmd/asm/internal/asm/testdata/ppc64.s
vendored
@@ -649,8 +649,6 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0
|
||||
LXVB16X (R3)(R4), VS1 // 7c241ed8
|
||||
LXVW4X (R3)(R4), VS1 // 7c241e18
|
||||
LXV 16(R3), VS1 // f4230011
|
||||
LXV 16(R3), VS33 // f4230019
|
||||
LXV 16(R3), V1 // f4230019
|
||||
LXVL R3, R4, VS1 // 7c23221a
|
||||
LXVLL R3, R4, VS1 // 7c23225a
|
||||
LXVX R3, R4, VS1 // 7c232218
|
||||
@@ -670,13 +668,8 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0
|
||||
MTFPRD R3, F0 // 7c030166
|
||||
MFVRD V0, R3 // 7c030067
|
||||
MFVSRLD VS63,R4 // 7fe40267
|
||||
MFVSRLD V31,R4 // 7fe40267
|
||||
MFVSRWZ VS33,R4 // 7c2400e7
|
||||
MFVSRWZ V1,R4 // 7c2400e7
|
||||
MTVSRD R3, VS1 // 7c230166
|
||||
MTVSRDD R3, R4, VS1 // 7c232366
|
||||
MTVSRDD R3, R4, VS33 // 7c232367
|
||||
MTVSRDD R3, R4, V1 // 7c232367
|
||||
MTVRD R3, V13 // 7da30167
|
||||
MTVSRWA R4, VS31 // 7fe401a6
|
||||
MTVSRWS R4, VS32 // 7c040327
|
||||
@@ -685,8 +678,6 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0
|
||||
XXBRW VS1, VS2 // f04f0f6c
|
||||
XXBRH VS2, VS3 // f067176c
|
||||
XXLAND VS1, VS2, VS3 // f0611410
|
||||
XXLAND V1, V2, V3 // f0611417
|
||||
XXLAND VS33, VS34, VS35 // f0611417
|
||||
XXLANDC VS1, VS2, VS3 // f0611450
|
||||
XXLEQV VS0, VS1, VS2 // f0400dd0
|
||||
XXLNAND VS0, VS1, VS2 // f0400d90
|
||||
@@ -696,17 +687,11 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0
|
||||
XXLORQ VS1, VS2, VS3 // f0611490
|
||||
XXLXOR VS1, VS2, VS3 // f06114d0
|
||||
XXSEL VS1, VS2, VS3, VS4 // f08110f0
|
||||
XXSEL VS33, VS34, VS35, VS36 // f08110ff
|
||||
XXSEL V1, V2, V3, V4 // f08110ff
|
||||
XXMRGHW VS1, VS2, VS3 // f0611090
|
||||
XXMRGLW VS1, VS2, VS3 // f0611190
|
||||
XXSPLTW VS1, $1, VS2 // f0410a90
|
||||
XXSPLTW VS33, $1, VS34 // f0410a93
|
||||
XXSPLTW V1, $1, V2 // f0410a93
|
||||
XXPERM VS1, VS2, VS3 // f06110d0
|
||||
XXSLDWI VS1, VS2, $1, VS3 // f0611110
|
||||
XXSLDWI V1, V2, $1, V3 // f0611117
|
||||
XXSLDWI VS33, VS34, $1, VS35 // f0611117
|
||||
XSCVDPSP VS1, VS2 // f0400c24
|
||||
XVCVDPSP VS1, VS2 // f0400e24
|
||||
XSCVSXDDP VS1, VS2 // f0400de0
|
||||
|
||||
91
src/cmd/asm/internal/asm/testdata/riscv64.s
vendored
91
src/cmd/asm/internal/asm/testdata/riscv64.s
vendored
@@ -10,35 +10,20 @@ start:
|
||||
|
||||
// 2.4: Integer Computational Instructions
|
||||
|
||||
ADDI $2047, X5 // 9382f27f
|
||||
ADDI $-2048, X5 // 93820280
|
||||
ADDI $2048, X5 // 9382024093820240
|
||||
ADDI $-2049, X5 // 938202c09382f2bf
|
||||
ADDI $4094, X5 // 9382f27f9382f27f
|
||||
ADDI $-4096, X5 // 9382028093820280
|
||||
ADDI $4095, X5 // b71f00009b8fffffb382f201
|
||||
ADDI $-4097, X5 // b7ffffff9b8fffffb382f201
|
||||
ADDI $2047, X5, X6 // 1383f27f
|
||||
ADDI $-2048, X5, X6 // 13830280
|
||||
ADDI $2048, X5, X6 // 1383024013030340
|
||||
ADDI $-2049, X5, X6 // 138302c01303f3bf
|
||||
ADDI $4094, X5, X6 // 1383f27f1303f37f
|
||||
ADDI $-4096, X5, X6 // 1383028013030380
|
||||
ADDI $4095, X5, X6 // b71f00009b8fffff3383f201
|
||||
ADDI $-4097, X5, X6 // b7ffffff9b8fffff3383f201
|
||||
ADDI $2047, X5 // 9382f27f
|
||||
ADDI $-2048, X5 // 93820280
|
||||
|
||||
SLTI $55, X5, X7 // 93a37203
|
||||
SLTIU $55, X5, X7 // 93b37203
|
||||
|
||||
ANDI $1, X5, X6 // 13f31200
|
||||
ANDI $1, X5 // 93f21200
|
||||
ANDI $2048, X5 // b71f00009b8f0f80b3f2f201
|
||||
ORI $1, X5, X6 // 13e31200
|
||||
ORI $1, X5 // 93e21200
|
||||
ORI $2048, X5 // b71f00009b8f0f80b3e2f201
|
||||
XORI $1, X5, X6 // 13c31200
|
||||
XORI $1, X5 // 93c21200
|
||||
XORI $2048, X5 // b71f00009b8f0f80b3c2f201
|
||||
|
||||
SLLI $1, X5, X6 // 13931200
|
||||
SLLI $1, X5 // 93921200
|
||||
@@ -101,15 +86,20 @@ start:
|
||||
SRA $1, X5 // 93d21240
|
||||
|
||||
// 2.5: Control Transfer Instructions
|
||||
JAL X5, 2(PC) // ef028000
|
||||
|
||||
// These jumps and branches get printed as a jump or branch
|
||||
// to 2 because they transfer control to the second instruction
|
||||
// in the function (the first instruction being an invisible
|
||||
// stack pointer adjustment).
|
||||
JAL X5, start // JAL X5, 2 // eff25ff0
|
||||
JALR X6, (X5) // 67830200
|
||||
JALR X6, 4(X5) // 67834200
|
||||
BEQ X5, X6, 2(PC) // 63846200
|
||||
BNE X5, X6, 2(PC) // 63946200
|
||||
BLT X5, X6, 2(PC) // 63c46200
|
||||
BLTU X5, X6, 2(PC) // 63e46200
|
||||
BGE X5, X6, 2(PC) // 63d46200
|
||||
BGEU X5, X6, 2(PC) // 63f46200
|
||||
BEQ X5, X6, start // BEQ X5, X6, 2 // e38c62ee
|
||||
BNE X5, X6, start // BNE X5, X6, 2 // e39a62ee
|
||||
BLT X5, X6, start // BLT X5, X6, 2 // e3c862ee
|
||||
BLTU X5, X6, start // BLTU X5, X6, 2 // e3e662ee
|
||||
BGE X5, X6, start // BGE X5, X6, 2 // e3d462ee
|
||||
BGEU X5, X6, start // BGEU X5, X6, 2 // e3f262ee
|
||||
|
||||
// 2.6: Load and Store Instructions
|
||||
LW (X5), X6 // 03a30200
|
||||
@@ -229,10 +219,6 @@ start:
|
||||
FMVSX X5, F0 // 538002f0
|
||||
FMVXW F0, X5 // d30200e0
|
||||
FMVWX X5, F0 // 538002f0
|
||||
FMADDS F1, F2, F3, F4 // 43822018
|
||||
FMSUBS F1, F2, F3, F4 // 47822018
|
||||
FNMSUBS F1, F2, F3, F4 // 4b822018
|
||||
FNMADDS F1, F2, F3, F4 // 4f822018
|
||||
|
||||
// 11.8: Single-Precision Floating-Point Compare Instructions
|
||||
FEQS F0, F1, X7 // d3a300a0
|
||||
@@ -273,10 +259,6 @@ start:
|
||||
FSGNJXD F1, F0, F2 // 53211022
|
||||
FMVXD F0, X5 // d30200e2
|
||||
FMVDX X5, F0 // 538002f2
|
||||
FMADDD F1, F2, F3, F4 // 4382201a
|
||||
FMSUBD F1, F2, F3, F4 // 4782201a
|
||||
FNMSUBD F1, F2, F3, F4 // 4b82201a
|
||||
FNMADDD F1, F2, F3, F4 // 4f82201a
|
||||
|
||||
// 12.6: Double-Precision Floating-Point Classify Instruction
|
||||
FCLASSD F0, X5 // d31200e2
|
||||
@@ -295,17 +277,11 @@ start:
|
||||
|
||||
// MOV pseudo-instructions
|
||||
MOV X5, X6 // 13830200
|
||||
MOV $2047, X5 // 9302f07f
|
||||
MOV $-2048, X5 // 93020080
|
||||
MOV $2048, X5 // b71200009b820280
|
||||
MOV $-2049, X5 // b7f2ffff9b82f27f
|
||||
MOV $4096, X5 // b7120000
|
||||
MOV $2147479552, X5 // b7f2ff7f
|
||||
MOV $2147483647, X5 // b70200809b82f2ff
|
||||
MOV $-2147483647, X5 // b70200809b821200
|
||||
MOV $2047, X5 // 9b02f07f
|
||||
MOV $-2048, X5 // 9b020080
|
||||
|
||||
// Converted to load of symbol (AUIPC + LD)
|
||||
MOV $4294967296, X5 // 9702000083b20200
|
||||
// Converted to load of symbol.
|
||||
MOV $4294967296, X5 // 97020000
|
||||
|
||||
MOV (X5), X6 // 03b30200
|
||||
MOV 4(X5), X6 // 03b34200
|
||||
@@ -349,11 +325,10 @@ start:
|
||||
NEGW X5 // bb025040
|
||||
NEGW X5, X6 // 3b035040
|
||||
|
||||
// This jumps to the second instruction in the function (the
|
||||
// first instruction is an invisible stack pointer adjustment).
|
||||
JMP start // JMP 2
|
||||
|
||||
JMP 2(PC) // 6f008000
|
||||
// These jumps can get printed as jumps to 2 because they go to the
|
||||
// second instruction in the function (the first instruction is an
|
||||
// invisible stack pointer adjustment).
|
||||
JMP start // JMP 2 // 6ff01fc2
|
||||
JMP (X5) // 67800200
|
||||
JMP 4(X5) // 67804200
|
||||
|
||||
@@ -366,28 +341,26 @@ start:
|
||||
JMP asmtest(SB) // 970f0000
|
||||
|
||||
// Branch pseudo-instructions
|
||||
BEQZ X5, 2(PC) // 63840200
|
||||
BGEZ X5, 2(PC) // 63d40200
|
||||
BGT X5, X6, 2(PC) // 63445300
|
||||
BGTU X5, X6, 2(PC) // 63645300
|
||||
BGTZ X5, 2(PC) // 63445000
|
||||
BLE X5, X6, 2(PC) // 63545300
|
||||
BLEU X5, X6, 2(PC) // 63745300
|
||||
BLEZ X5, 2(PC) // 63545000
|
||||
BLTZ X5, 2(PC) // 63c40200
|
||||
BNEZ X5, 2(PC) // 63940200
|
||||
BEQZ X5, start // BEQZ X5, 2 // e38202c0
|
||||
BGEZ X5, start // BGEZ X5, 2 // e3d002c0
|
||||
BGT X5, X6, start // BGT X5, X6, 2 // e34e53be
|
||||
BGTU X5, X6, start // BGTU X5, X6, 2 // e36c53be
|
||||
BGTZ X5, start // BGTZ X5, 2 // e34a50be
|
||||
BLE X5, X6, start // BLE X5, X6, 2 // e35853be
|
||||
BLEU X5, X6, start // BLEU X5, X6, 2 // e37653be
|
||||
BLEZ X5, start // BLEZ X5, 2 // e35450be
|
||||
BLTZ X5, start // BLTZ X5, 2 // e3c202be
|
||||
BNEZ X5, start // BNEZ X5, 2 // e39002be
|
||||
|
||||
// Set pseudo-instructions
|
||||
SEQZ X15, X15 // 93b71700
|
||||
SNEZ X15, X15 // b337f000
|
||||
|
||||
// F extension
|
||||
FABSS F0, F1 // d3200020
|
||||
FNEGS F0, F1 // d3100020
|
||||
FNES F0, F1, X7 // d3a300a093c31300
|
||||
|
||||
// D extension
|
||||
FABSD F0, F1 // d3200022
|
||||
FNEGD F0, F1 // d3100022
|
||||
FNED F0, F1, X5 // d3a200a293c21200
|
||||
FLTD F0, F1, X5 // d39200a2
|
||||
|
||||
12
src/cmd/asm/internal/asm/testdata/riscv64error.s
vendored
12
src/cmd/asm/internal/asm/testdata/riscv64error.s
vendored
@@ -3,14 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
TEXT errors(SB),$0
|
||||
MOV $errors(SB), (X5) // ERROR "address load must target register"
|
||||
MOV $8(SP), (X5) // ERROR "address load must target register"
|
||||
MOVB $8(SP), X5 // ERROR "unsupported address load"
|
||||
MOVH $8(SP), X5 // ERROR "unsupported address load"
|
||||
MOVW $8(SP), X5 // ERROR "unsupported address load"
|
||||
MOVF $8(SP), X5 // ERROR "unsupported address load"
|
||||
MOV $1234, 0(SP) // ERROR "constant load must target register"
|
||||
MOV $1234, 8(SP) // ERROR "constant load must target register"
|
||||
MOV $0, 0(SP) // ERROR "constant load must target register"
|
||||
MOV $0, 8(SP) // ERROR "constant load must target register"
|
||||
MOV $1234, 0(SP) // ERROR "constant load must target register"
|
||||
@@ -19,8 +11,4 @@ TEXT errors(SB),$0
|
||||
MOVH $1, X5 // ERROR "unsupported constant load"
|
||||
MOVW $1, X5 // ERROR "unsupported constant load"
|
||||
MOVF $1, X5 // ERROR "unsupported constant load"
|
||||
MOVBU X5, (X6) // ERROR "unsupported unsigned store"
|
||||
MOVHU X5, (X6) // ERROR "unsupported unsigned store"
|
||||
MOVWU X5, (X6) // ERROR "unsupported unsigned store"
|
||||
|
||||
RET
|
||||
|
||||
@@ -23,13 +23,10 @@ import (
|
||||
"internal/xcoff"
|
||||
"math"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"cmd/internal/str"
|
||||
)
|
||||
|
||||
var debugDefine = flag.Bool("debug-define", false, "print relevant #defines")
|
||||
@@ -385,7 +382,7 @@ func (p *Package) guessKinds(f *File) []*Name {
|
||||
stderr = p.gccErrors(b.Bytes())
|
||||
}
|
||||
if stderr == "" {
|
||||
fatalf("%s produced no output\non input:\n%s", gccBaseCmd[0], b.Bytes())
|
||||
fatalf("%s produced no output\non input:\n%s", p.gccBaseCmd()[0], b.Bytes())
|
||||
}
|
||||
|
||||
completed := false
|
||||
@@ -460,7 +457,7 @@ func (p *Package) guessKinds(f *File) []*Name {
|
||||
}
|
||||
|
||||
if !completed {
|
||||
fatalf("%s did not produce error at completed:1\non input:\n%s\nfull error output:\n%s", gccBaseCmd[0], b.Bytes(), stderr)
|
||||
fatalf("%s did not produce error at completed:1\non input:\n%s\nfull error output:\n%s", p.gccBaseCmd()[0], b.Bytes(), stderr)
|
||||
}
|
||||
|
||||
for i, n := range names {
|
||||
@@ -491,7 +488,7 @@ func (p *Package) guessKinds(f *File) []*Name {
|
||||
// to users debugging preamble mistakes. See issue 8442.
|
||||
preambleErrors := p.gccErrors([]byte(f.Preamble))
|
||||
if len(preambleErrors) > 0 {
|
||||
error_(token.NoPos, "\n%s errors for preamble:\n%s", gccBaseCmd[0], preambleErrors)
|
||||
error_(token.NoPos, "\n%s errors for preamble:\n%s", p.gccBaseCmd()[0], preambleErrors)
|
||||
}
|
||||
|
||||
fatalf("unresolved names")
|
||||
@@ -1548,37 +1545,20 @@ func gofmtPos(n ast.Expr, pos token.Pos) string {
|
||||
return fmt.Sprintf("/*line :%d:%d*/%s", p.Line, p.Column, s)
|
||||
}
|
||||
|
||||
// checkGCCBaseCmd returns the start of the compiler command line.
|
||||
// gccBaseCmd returns the start of the compiler command line.
|
||||
// It uses $CC if set, or else $GCC, or else the compiler recorded
|
||||
// during the initial build as defaultCC.
|
||||
// defaultCC is defined in zdefaultcc.go, written by cmd/dist.
|
||||
//
|
||||
// The compiler command line is split into arguments on whitespace. Quotes
|
||||
// are understood, so arguments may contain whitespace.
|
||||
//
|
||||
// checkGCCBaseCmd confirms that the compiler exists in PATH, returning
|
||||
// an error if it does not.
|
||||
func checkGCCBaseCmd() ([]string, error) {
|
||||
func (p *Package) gccBaseCmd() []string {
|
||||
// Use $CC if set, since that's what the build uses.
|
||||
value := os.Getenv("CC")
|
||||
if value == "" {
|
||||
// Try $GCC if set, since that's what we used to use.
|
||||
value = os.Getenv("GCC")
|
||||
if ret := strings.Fields(os.Getenv("CC")); len(ret) > 0 {
|
||||
return ret
|
||||
}
|
||||
if value == "" {
|
||||
value = defaultCC(goos, goarch)
|
||||
// Try $GCC if set, since that's what we used to use.
|
||||
if ret := strings.Fields(os.Getenv("GCC")); len(ret) > 0 {
|
||||
return ret
|
||||
}
|
||||
args, err := str.SplitQuotedFields(value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(args) == 0 {
|
||||
return nil, errors.New("CC not set and no default found")
|
||||
}
|
||||
if _, err := exec.LookPath(args[0]); err != nil {
|
||||
return nil, fmt.Errorf("C compiler %q not found: %v", args[0], err)
|
||||
}
|
||||
return args[:len(args):len(args)], nil
|
||||
return strings.Fields(defaultCC(goos, goarch))
|
||||
}
|
||||
|
||||
// gccMachine returns the gcc -m flag to use, either "-m32", "-m64" or "-marm".
|
||||
@@ -1624,7 +1604,7 @@ func gccTmp() string {
|
||||
// gccCmd returns the gcc command line to use for compiling
|
||||
// the input.
|
||||
func (p *Package) gccCmd() []string {
|
||||
c := append(gccBaseCmd,
|
||||
c := append(p.gccBaseCmd(),
|
||||
"-w", // no warnings
|
||||
"-Wno-error", // warnings are not errors
|
||||
"-o"+gccTmp(), // write object to tmp
|
||||
@@ -2025,7 +2005,7 @@ func (p *Package) gccDebug(stdin []byte, nnames int) (d *dwarf.Data, ints []int6
|
||||
// #defines that gcc encountered while processing the input
|
||||
// and its included files.
|
||||
func (p *Package) gccDefines(stdin []byte) string {
|
||||
base := append(gccBaseCmd, "-E", "-dM", "-xc")
|
||||
base := append(p.gccBaseCmd(), "-E", "-dM", "-xc")
|
||||
base = append(base, p.gccMachine()...)
|
||||
stdout, _ := runGcc(stdin, append(append(base, p.GccOptions...), "-"))
|
||||
return stdout
|
||||
@@ -3030,31 +3010,6 @@ func upper(s string) string {
|
||||
// so that all fields are exported.
|
||||
func godefsFields(fld []*ast.Field) {
|
||||
prefix := fieldPrefix(fld)
|
||||
|
||||
// Issue 48396: check for duplicate field names.
|
||||
if prefix != "" {
|
||||
names := make(map[string]bool)
|
||||
fldLoop:
|
||||
for _, f := range fld {
|
||||
for _, n := range f.Names {
|
||||
name := n.Name
|
||||
if name == "_" {
|
||||
continue
|
||||
}
|
||||
if name != prefix {
|
||||
name = strings.TrimPrefix(n.Name, prefix)
|
||||
}
|
||||
name = upper(name)
|
||||
if names[name] {
|
||||
// Field name conflict: don't remove prefix.
|
||||
prefix = ""
|
||||
break fldLoop
|
||||
}
|
||||
names[name] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
npad := 0
|
||||
for _, f := range fld {
|
||||
for _, n := range f.Names {
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
@@ -247,7 +248,6 @@ var importSyscall = flag.Bool("import_syscall", true, "import syscall in generat
|
||||
var trimpath = flag.String("trimpath", "", "applies supplied rewrites or trims prefixes to recorded source file paths")
|
||||
|
||||
var goarch, goos, gomips, gomips64 string
|
||||
var gccBaseCmd []string
|
||||
|
||||
func main() {
|
||||
objabi.AddVersionFlag() // -V
|
||||
@@ -305,10 +305,10 @@ func main() {
|
||||
p := newPackage(args[:i])
|
||||
|
||||
// We need a C compiler to be available. Check this.
|
||||
var err error
|
||||
gccBaseCmd, err = checkGCCBaseCmd()
|
||||
gccName := p.gccBaseCmd()[0]
|
||||
_, err := exec.LookPath(gccName)
|
||||
if err != nil {
|
||||
fatalf("%v", err)
|
||||
fatalf("C compiler %q not found: %v", gccName, err)
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
|
||||
@@ -59,9 +59,9 @@ func (p *Package) writeDefs() {
|
||||
// Write C main file for using gcc to resolve imports.
|
||||
fmt.Fprintf(fm, "int main() { return 0; }\n")
|
||||
if *importRuntimeCgo {
|
||||
fmt.Fprintf(fm, "void crosscall2(void(*fn)(void*) __attribute__((unused)), void *a __attribute__((unused)), int c __attribute__((unused)), __SIZE_TYPE__ ctxt __attribute__((unused))) { }\n")
|
||||
fmt.Fprintf(fm, "void crosscall2(void(*fn)(void*), void *a, int c, __SIZE_TYPE__ ctxt) { }\n")
|
||||
fmt.Fprintf(fm, "__SIZE_TYPE__ _cgo_wait_runtime_init_done(void) { return 0; }\n")
|
||||
fmt.Fprintf(fm, "void _cgo_release_context(__SIZE_TYPE__ ctxt __attribute__((unused))) { }\n")
|
||||
fmt.Fprintf(fm, "void _cgo_release_context(__SIZE_TYPE__ ctxt) { }\n")
|
||||
fmt.Fprintf(fm, "char* _cgo_topofstack(void) { return (char*)0; }\n")
|
||||
} else {
|
||||
// If we're not importing runtime/cgo, we *are* runtime/cgo,
|
||||
@@ -70,8 +70,8 @@ func (p *Package) writeDefs() {
|
||||
fmt.Fprintf(fm, "__SIZE_TYPE__ _cgo_wait_runtime_init_done(void);\n")
|
||||
fmt.Fprintf(fm, "void _cgo_release_context(__SIZE_TYPE__);\n")
|
||||
}
|
||||
fmt.Fprintf(fm, "void _cgo_allocate(void *a __attribute__((unused)), int c __attribute__((unused))) { }\n")
|
||||
fmt.Fprintf(fm, "void _cgo_panic(void *a __attribute__((unused)), int c __attribute__((unused))) { }\n")
|
||||
fmt.Fprintf(fm, "void _cgo_allocate(void *a, int c) { }\n")
|
||||
fmt.Fprintf(fm, "void _cgo_panic(void *a, int c) { }\n")
|
||||
fmt.Fprintf(fm, "void _cgo_reginit(void) { }\n")
|
||||
|
||||
// Write second Go output: definitions of _C_xxx.
|
||||
@@ -1054,10 +1054,9 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) {
|
||||
|
||||
fmt.Fprintf(fm, "void _cgoexp%s_%s(void* p){}\n", cPrefix, exp.ExpName)
|
||||
|
||||
fmt.Fprintf(fgo2, "\t")
|
||||
|
||||
if gccResult != "void" {
|
||||
// Write results back to frame.
|
||||
fmt.Fprintf(fgo2, "\t")
|
||||
forFieldList(fntype.Results,
|
||||
func(i int, aname string, atype ast.Expr) {
|
||||
if i > 0 {
|
||||
@@ -1459,10 +1458,10 @@ const gccProlog = `
|
||||
(have a negative array count) and an inscrutable error will come
|
||||
out of the compiler and hopefully mention "name".
|
||||
*/
|
||||
#define __cgo_compile_assert_eq(x, y, name) typedef char name[(x-y)*(x-y)*-2UL+1UL];
|
||||
#define __cgo_compile_assert_eq(x, y, name) typedef char name[(x-y)*(x-y)*-2+1];
|
||||
|
||||
/* Check at compile time that the sizes we use match our expectations. */
|
||||
#define __cgo_size_assert(t, n) __cgo_compile_assert_eq(sizeof(t), (size_t)n, _cgo_sizeof_##t##_is_not_##n)
|
||||
#define __cgo_size_assert(t, n) __cgo_compile_assert_eq(sizeof(t), n, _cgo_sizeof_##t##_is_not_##n)
|
||||
|
||||
__cgo_size_assert(char, 1)
|
||||
__cgo_size_assert(short, 2)
|
||||
|
||||
@@ -505,227 +505,6 @@ control bits specified by the ELF AMD64 ABI.
|
||||
|
||||
The x87 floating-point control word is not used by Go on amd64.
|
||||
|
||||
### arm64 architecture
|
||||
|
||||
The arm64 architecture uses R0 – R15 for integer arguments and results.
|
||||
|
||||
It uses F0 – F15 for floating-point arguments and results.
|
||||
|
||||
*Rationale*: 16 integer registers and 16 floating-point registers are
|
||||
more than enough for passing arguments and results for practically all
|
||||
functions (see Appendix). While there are more registers available,
|
||||
using more registers provides little benefit. Additionally, it will add
|
||||
overhead on code paths where the number of arguments are not statically
|
||||
known (e.g. reflect call), and will consume more stack space when there
|
||||
is only limited stack space available to fit in the nosplit limit.
|
||||
|
||||
Registers R16 and R17 are permanent scratch registers. They are also
|
||||
used as scratch registers by the linker (Go linker and external
|
||||
linker) in trampolines.
|
||||
|
||||
Register R18 is reserved and never used. It is reserved for the OS
|
||||
on some platforms (e.g. macOS).
|
||||
|
||||
Registers R19 – R25 are permanent scratch registers. In addition,
|
||||
R27 is a permanent scratch register used by the assembler when
|
||||
expanding instructions.
|
||||
|
||||
Floating-point registers F16 – F31 are also permanent scratch
|
||||
registers.
|
||||
|
||||
Special-purpose registers are as follows:
|
||||
|
||||
| Register | Call meaning | Return meaning | Body meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| RSP | Stack pointer | Same | Same |
|
||||
| R30 | Link register | Same | Scratch (non-leaf functions) |
|
||||
| R29 | Frame pointer | Same | Same |
|
||||
| R28 | Current goroutine | Same | Same |
|
||||
| R27 | Scratch | Scratch | Scratch |
|
||||
| R26 | Closure context pointer | Scratch | Scratch |
|
||||
| R18 | Reserved (not used) | Same | Same |
|
||||
| ZR | Zero value | Same | Same |
|
||||
|
||||
*Rationale*: These register meanings are compatible with Go’s
|
||||
stack-based calling convention.
|
||||
|
||||
*Rationale*: The link register, R30, holds the function return
|
||||
address at the function entry. For functions that have frames
|
||||
(including most non-leaf functions), R30 is saved to stack in the
|
||||
function prologue and restored in the epilogue. Within the function
|
||||
body, R30 can be used as a scratch register.
|
||||
|
||||
*Implementation note*: Registers with fixed meaning at calls but not
|
||||
in function bodies must be initialized by "injected" calls such as
|
||||
signal-based panics.
|
||||
|
||||
#### Stack layout
|
||||
|
||||
The stack pointer, RSP, grows down and is always aligned to 16 bytes.
|
||||
|
||||
*Rationale*: The arm64 architecture requires the stack pointer to be
|
||||
16-byte aligned.
|
||||
|
||||
A function's stack frame, after the frame is created, is laid out as
|
||||
follows:
|
||||
|
||||
+------------------------------+
|
||||
| ... locals ... |
|
||||
| ... outgoing arguments ... |
|
||||
| return PC | ← RSP points to
|
||||
| frame pointer on entry |
|
||||
+------------------------------+ ↓ lower addresses
|
||||
|
||||
The "return PC" is loaded to the link register, R30, as part of the
|
||||
arm64 `CALL` operation.
|
||||
|
||||
On entry, a function subtracts from RSP to open its stack frame, and
|
||||
saves the values of R30 and R29 at the bottom of the frame.
|
||||
Specifically, R30 is saved at 0(RSP) and R29 is saved at -8(RSP),
|
||||
after RSP is updated.
|
||||
|
||||
A leaf function that does not require any stack space may omit the
|
||||
saved R30 and R29.
|
||||
|
||||
The Go ABI's use of R29 as a frame pointer register is compatible with
|
||||
arm64 architecture requirement so that Go can inter-operate with platform
|
||||
debuggers and profilers.
|
||||
|
||||
This stack layout is used by both register-based (ABIInternal) and
|
||||
stack-based (ABI0) calling conventions.
|
||||
|
||||
#### Flags
|
||||
|
||||
The arithmetic status flags (NZCV) are treated like scratch registers
|
||||
and not preserved across calls.
|
||||
All other bits in PSTATE are system flags and are not modified by Go.
|
||||
|
||||
The floating-point status register (FPSR) is treated like scratch
|
||||
registers and not preserved across calls.
|
||||
|
||||
At calls, the floating-point control register (FPCR) bits are always
|
||||
set as follows:
|
||||
|
||||
| Flag | Bit | Value | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| DN | 25 | 0 | Propagate NaN operands |
|
||||
| FZ | 24 | 0 | Do not flush to zero |
|
||||
| RC | 23/22 | 0 (RN) | Round to nearest, choose even if tied |
|
||||
| IDE | 15 | 0 | Denormal operations trap disabled |
|
||||
| IXE | 12 | 0 | Inexact trap disabled |
|
||||
| UFE | 11 | 0 | Underflow trap disabled |
|
||||
| OFE | 10 | 0 | Overflow trap disabled |
|
||||
| DZE | 9 | 0 | Divide-by-zero trap disabled |
|
||||
| IOE | 8 | 0 | Invalid operations trap disabled |
|
||||
| NEP | 2 | 0 | Scalar operations do not affect higher elements in vector registers |
|
||||
| AH | 1 | 0 | No alternate handling of de-normal inputs |
|
||||
| FIZ | 0 | 0 | Do not zero de-normals |
|
||||
|
||||
*Rationale*: Having a fixed FPCR control configuration allows Go
|
||||
functions to use floating-point and vector (SIMD) operations without
|
||||
modifying or saving the FPCR.
|
||||
Functions are allowed to modify it between calls (as long as they
|
||||
restore it), but as of this writing Go code never does.
|
||||
|
||||
### ppc64 architecture
|
||||
|
||||
The ppc64 architecture uses R3 – R10 and R14 – R17 for integer arguments
|
||||
and results.
|
||||
|
||||
It uses F1 – F12 for floating-point arguments and results.
|
||||
|
||||
Register R31 is a permanent scratch register in Go.
|
||||
|
||||
Special-purpose registers used within Go generated code and Go
|
||||
assembly code are as follows:
|
||||
|
||||
| Register | Call meaning | Return meaning | Body meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| R0 | Zero value | Same | Same |
|
||||
| R1 | Stack pointer | Same | Same |
|
||||
| R2 | TOC register | Same | Same |
|
||||
| R11 | Closure context pointer | Scratch | Scratch |
|
||||
| R12 | Function address on indirect calls | Scratch | Scratch |
|
||||
| R13 | TLS pointer | Same | Same |
|
||||
| R20,R21 | Scratch | Scratch | Used by duffcopy, duffzero |
|
||||
| R30 | Current goroutine | Same | Same |
|
||||
| R31 | Scratch | Scratch | Scratch |
|
||||
| LR | Link register | Link register | Scratch |
|
||||
*Rationale*: These register meanings are compatible with Go’s
|
||||
stack-based calling convention.
|
||||
|
||||
The link register, LR, holds the function return
|
||||
address at the function entry and is set to the correct return
|
||||
address before exiting the function. It is also used
|
||||
in some cases as the function address when doing an indirect call.
|
||||
|
||||
The register R2 contains the address of the TOC (table of contents) which
|
||||
contains data or code addresses used when generating position independent
|
||||
code. Non-Go code generated when using cgo contains TOC-relative addresses
|
||||
which depend on R2 holding a valid TOC. Go code compiled with -shared or
|
||||
-dynlink initializes and maintains R2 and uses it in some cases for
|
||||
function calls; Go code compiled without these options does not modify R2.
|
||||
|
||||
When making a function call R12 contains the function address for use by the
|
||||
code to generate R2 at the beginning of the function. R12 can be used for
|
||||
other purposes within the body of the function, such as trampoline generation.
|
||||
|
||||
R20 and R21 are used in duffcopy and duffzero which could be generated
|
||||
before arguments are saved so should not be used for register arguments.
|
||||
|
||||
The Count register CTR can be used as the call target for some branch instructions.
|
||||
It holds the return address when preemption has occurred.
|
||||
|
||||
On PPC64 when a float32 is loaded it becomes a float64 in the register, which is
|
||||
different from other platforms and that needs to be recognized by the internal
|
||||
implementation of reflection so that float32 arguments are passed correctly.
|
||||
|
||||
Registers R18 - R29 and F13 - F31 are considered scratch registers.
|
||||
|
||||
#### Stack layout
|
||||
|
||||
The stack pointer, R1, grows down and is aligned to 8 bytes in Go, but changed
|
||||
to 16 bytes when calling cgo.
|
||||
|
||||
A function's stack frame, after the frame is created, is laid out as
|
||||
follows:
|
||||
|
||||
+------------------------------+
|
||||
| ... locals ... |
|
||||
| ... outgoing arguments ... |
|
||||
| 24 TOC register R2 save | When compiled with -shared/-dynlink
|
||||
| 16 Unused in Go | Not used in Go
|
||||
| 8 CR save | nonvolatile CR fields
|
||||
| 0 return PC | ← R1 points to
|
||||
+------------------------------+ ↓ lower addresses
|
||||
|
||||
The "return PC" is loaded to the link register, LR, as part of the
|
||||
ppc64 `BL` operations.
|
||||
|
||||
On entry to a non-leaf function, the stack frame size is subtracted from R1 to
|
||||
create its stack frame, and saves the value of LR at the bottom of the frame.
|
||||
|
||||
A leaf function that does not require any stack space does not modify R1 and
|
||||
does not save LR.
|
||||
|
||||
*NOTE*: We might need to save the frame pointer on the stack as
|
||||
in the PPC64 ELF v2 ABI so Go can inter-operate with platform debuggers
|
||||
and profilers.
|
||||
|
||||
This stack layout is used by both register-based (ABIInternal) and
|
||||
stack-based (ABI0) calling conventions.
|
||||
|
||||
#### Flags
|
||||
|
||||
The condition register consists of 8 condition code register fields
|
||||
CR0-CR7. Go generated code only sets and uses CR0, commonly set by
|
||||
compare functions and use to determine the target of a conditional
|
||||
branch. The generated code does not set or use CR1-CR7.
|
||||
|
||||
The floating point status and control register (FPSCR) is initialized
|
||||
to 0 by the kernel at startup of the Go program and not changed by
|
||||
the Go generated code.
|
||||
|
||||
## Future directions
|
||||
|
||||
### Spill path improvements
|
||||
|
||||
@@ -144,7 +144,7 @@ func (pa *ABIParamAssignment) RegisterTypesAndOffsets() ([]*types.Type, []int64)
|
||||
}
|
||||
|
||||
func appendParamTypes(rts []*types.Type, t *types.Type) []*types.Type {
|
||||
w := t.Size()
|
||||
w := t.Width
|
||||
if w == 0 {
|
||||
return rts
|
||||
}
|
||||
@@ -193,12 +193,12 @@ func appendParamTypes(rts []*types.Type, t *types.Type) []*types.Type {
|
||||
// to input offsets, and returns the longer slice and the next unused offset.
|
||||
func appendParamOffsets(offsets []int64, at int64, t *types.Type) ([]int64, int64) {
|
||||
at = align(at, t)
|
||||
w := t.Size()
|
||||
w := t.Width
|
||||
if w == 0 {
|
||||
return offsets, at
|
||||
}
|
||||
if t.IsScalar() || t.IsPtrShaped() {
|
||||
if t.IsComplex() || int(t.Size()) > types.RegSize { // complex and *int64 on 32-bit
|
||||
if t.IsComplex() || int(t.Width) > types.RegSize { // complex and *int64 on 32-bit
|
||||
s := w / 2
|
||||
return append(offsets, at, at+s), at + w
|
||||
} else {
|
||||
@@ -214,7 +214,7 @@ func appendParamOffsets(offsets []int64, at int64, t *types.Type) ([]int64, int6
|
||||
case types.TSTRUCT:
|
||||
for i, f := range t.FieldSlice() {
|
||||
offsets, at = appendParamOffsets(offsets, at, f.Type)
|
||||
if f.Type.Size() == 0 && i == t.NumFields()-1 {
|
||||
if f.Type.Width == 0 && i == t.NumFields()-1 {
|
||||
at++ // last field has zero width
|
||||
}
|
||||
}
|
||||
@@ -531,7 +531,7 @@ type assignState struct {
|
||||
|
||||
// align returns a rounded up to t's alignment
|
||||
func align(a int64, t *types.Type) int64 {
|
||||
return alignTo(a, int(uint8(t.Alignment())))
|
||||
return alignTo(a, int(t.Align))
|
||||
}
|
||||
|
||||
// alignTo returns a rounded up to t, where t must be 0 or a power of 2.
|
||||
@@ -546,7 +546,7 @@ func alignTo(a int64, t int) int64 {
|
||||
// specified type.
|
||||
func (state *assignState) stackSlot(t *types.Type) int64 {
|
||||
rv := align(state.stackOffset, t)
|
||||
state.stackOffset = rv + t.Size()
|
||||
state.stackOffset = rv + t.Width
|
||||
return rv
|
||||
}
|
||||
|
||||
@@ -554,7 +554,7 @@ func (state *assignState) stackSlot(t *types.Type) int64 {
|
||||
// that we've just determined to be register-assignable. The number of registers
|
||||
// needed is assumed to be stored in state.pUsed.
|
||||
func (state *assignState) allocateRegs(regs []RegIndex, t *types.Type) []RegIndex {
|
||||
if t.Size() == 0 {
|
||||
if t.Width == 0 {
|
||||
return regs
|
||||
}
|
||||
ri := state.rUsed.intRegs
|
||||
@@ -647,7 +647,7 @@ func (state *assignState) floatUsed() int {
|
||||
// can register allocate, FALSE otherwise (and updates state
|
||||
// accordingly).
|
||||
func (state *assignState) regassignIntegral(t *types.Type) bool {
|
||||
regsNeeded := int(types.Rnd(t.Size(), int64(types.PtrSize)) / int64(types.PtrSize))
|
||||
regsNeeded := int(types.Rnd(t.Width, int64(types.PtrSize)) / int64(types.PtrSize))
|
||||
if t.IsComplex() {
|
||||
regsNeeded = 2
|
||||
}
|
||||
@@ -722,17 +722,14 @@ func setup() {
|
||||
types.NewField(nxp, fname("len"), ui),
|
||||
types.NewField(nxp, fname("cap"), ui),
|
||||
})
|
||||
types.CalcStructSize(synthSlice)
|
||||
synthString = types.NewStruct(types.NoPkg, []*types.Field{
|
||||
types.NewField(nxp, fname("data"), unsp),
|
||||
types.NewField(nxp, fname("len"), ui),
|
||||
})
|
||||
types.CalcStructSize(synthString)
|
||||
synthIface = types.NewStruct(types.NoPkg, []*types.Field{
|
||||
types.NewField(nxp, fname("f1"), unsp),
|
||||
types.NewField(nxp, fname("f2"), unsp),
|
||||
})
|
||||
types.CalcStructSize(synthIface)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -767,10 +764,10 @@ func (state *assignState) regassign(pt *types.Type) bool {
|
||||
// ABIParamResultInfo held in 'state'.
|
||||
func (state *assignState) assignParamOrReturn(pt *types.Type, n types.Object, isReturn bool) ABIParamAssignment {
|
||||
state.pUsed = RegAmounts{}
|
||||
if pt.Size() == types.BADWIDTH {
|
||||
if pt.Width == types.BADWIDTH {
|
||||
base.Fatalf("should never happen")
|
||||
panic("unreachable")
|
||||
} else if pt.Size() == 0 {
|
||||
} else if pt.Width == 0 {
|
||||
return state.stackAllocate(pt, n)
|
||||
} else if state.regassign(pt) {
|
||||
return state.regAllocate(pt, n, isReturn)
|
||||
|
||||
@@ -18,10 +18,11 @@ func Init(arch *ssagen.ArchInfo) {
|
||||
|
||||
arch.ZeroRange = zerorange
|
||||
arch.Ginsnop = ginsnop
|
||||
arch.Ginsnopdefer = ginsnop
|
||||
|
||||
arch.SSAMarkMoves = ssaMarkMoves
|
||||
arch.SSAGenValue = ssaGenValue
|
||||
arch.SSAGenBlock = ssaGenBlock
|
||||
arch.LoadRegResult = loadRegResult
|
||||
arch.LoadRegResults = loadRegResults
|
||||
arch.SpillArgReg = spillArgReg
|
||||
}
|
||||
|
||||
@@ -57,6 +57,7 @@ func dzDI(b int64) int64 {
|
||||
func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
|
||||
const (
|
||||
r13 = 1 << iota // if R13 is already zeroed.
|
||||
x15 // if X15 is already zeroed. Note: in new ABI, X15 is always zero.
|
||||
)
|
||||
|
||||
if cnt == 0 {
|
||||
@@ -84,6 +85,11 @@ func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.
|
||||
}
|
||||
p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_R13, 0, obj.TYPE_MEM, x86.REG_SP, off)
|
||||
} else if !isPlan9 && cnt <= int64(8*types.RegSize) {
|
||||
if !buildcfg.Experiment.RegabiG && *state&x15 == 0 {
|
||||
p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_REG, x86.REG_X15, 0)
|
||||
*state |= x15
|
||||
}
|
||||
|
||||
for i := int64(0); i < cnt/16; i++ {
|
||||
p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16)
|
||||
}
|
||||
@@ -92,6 +98,10 @@ func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.
|
||||
p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16))
|
||||
}
|
||||
} else if !isPlan9 && (cnt <= int64(128*types.RegSize)) {
|
||||
if !buildcfg.Experiment.RegabiG && *state&x15 == 0 {
|
||||
p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_REG, x86.REG_X15, 0)
|
||||
*state |= x15
|
||||
}
|
||||
// Save DI to r12. With the amd64 Go register abi, DI can contain
|
||||
// an incoming parameter, whereas R12 is always scratch.
|
||||
p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_DI, 0, obj.TYPE_REG, x86.REG_R12, 0)
|
||||
|
||||
@@ -822,13 +822,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Reg = v.Args[0].Reg()
|
||||
ssagen.AddAux2(&p.To, v, sc.Off64())
|
||||
case ssa.OpAMD64MOVOstoreconst:
|
||||
sc := v.AuxValAndOff()
|
||||
if sc.Val() != 0 {
|
||||
v.Fatalf("MOVO for non zero constants not implemented: %s", v.LongString())
|
||||
}
|
||||
|
||||
if s.ABI != obj.ABIInternal {
|
||||
case ssa.OpAMD64MOVOstorezero:
|
||||
if !buildcfg.Experiment.RegabiG || s.ABI != obj.ABIInternal {
|
||||
// zero X15 manually
|
||||
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
|
||||
}
|
||||
@@ -837,8 +832,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p.From.Reg = x86.REG_X15
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Reg = v.Args[0].Reg()
|
||||
ssagen.AddAux2(&p.To, v, sc.Off64())
|
||||
|
||||
ssagen.AddAux(&p.To, v)
|
||||
case ssa.OpAMD64MOVQstoreconstidx1, ssa.OpAMD64MOVQstoreconstidx8, ssa.OpAMD64MOVLstoreconstidx1, ssa.OpAMD64MOVLstoreconstidx4, ssa.OpAMD64MOVWstoreconstidx1, ssa.OpAMD64MOVWstoreconstidx2, ssa.OpAMD64MOVBstoreconstidx1,
|
||||
ssa.OpAMD64ADDLconstmodifyidx1, ssa.OpAMD64ADDLconstmodifyidx4, ssa.OpAMD64ADDLconstmodifyidx8, ssa.OpAMD64ADDQconstmodifyidx1, ssa.OpAMD64ADDQconstmodifyidx8,
|
||||
ssa.OpAMD64ANDLconstmodifyidx1, ssa.OpAMD64ANDLconstmodifyidx4, ssa.OpAMD64ANDLconstmodifyidx8, ssa.OpAMD64ANDQconstmodifyidx1, ssa.OpAMD64ANDQconstmodifyidx8,
|
||||
@@ -920,7 +914,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = v.Reg()
|
||||
case ssa.OpAMD64DUFFZERO:
|
||||
if s.ABI != obj.ABIInternal {
|
||||
if !buildcfg.Experiment.RegabiG || s.ABI != obj.ABIInternal {
|
||||
// zero X15 manually
|
||||
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
|
||||
}
|
||||
@@ -1003,30 +997,22 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
// Closure pointer is DX.
|
||||
ssagen.CheckLoweredGetClosurePtr(v)
|
||||
case ssa.OpAMD64LoweredGetG:
|
||||
if s.ABI == obj.ABIInternal {
|
||||
if buildcfg.Experiment.RegabiG && s.ABI == obj.ABIInternal {
|
||||
v.Fatalf("LoweredGetG should not appear in ABIInternal")
|
||||
}
|
||||
r := v.Reg()
|
||||
getgFromTLS(s, r)
|
||||
case ssa.OpAMD64CALLstatic, ssa.OpAMD64CALLtail:
|
||||
if s.ABI == obj.ABI0 && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABIInternal {
|
||||
case ssa.OpAMD64CALLstatic:
|
||||
if buildcfg.Experiment.RegabiG && s.ABI == obj.ABI0 && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABIInternal {
|
||||
// zeroing X15 when entering ABIInternal from ABI0
|
||||
if buildcfg.GOOS != "plan9" { // do not use SSE on Plan 9
|
||||
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
|
||||
}
|
||||
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
|
||||
// set G register from TLS
|
||||
getgFromTLS(s, x86.REG_R14)
|
||||
}
|
||||
if v.Op == ssa.OpAMD64CALLtail {
|
||||
s.TailCall(v)
|
||||
break
|
||||
}
|
||||
s.Call(v)
|
||||
if s.ABI == obj.ABIInternal && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABI0 {
|
||||
if buildcfg.Experiment.RegabiG && s.ABI == obj.ABIInternal && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABI0 {
|
||||
// zeroing X15 when entering ABIInternal from ABI0
|
||||
if buildcfg.GOOS != "plan9" { // do not use SSE on Plan 9
|
||||
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
|
||||
}
|
||||
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
|
||||
// set G register from TLS
|
||||
getgFromTLS(s, x86.REG_R14)
|
||||
}
|
||||
@@ -1235,10 +1221,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Reg = v.Args[0].Reg()
|
||||
ssagen.AddAux(&p.To, v)
|
||||
case ssa.OpAMD64PrefetchT0, ssa.OpAMD64PrefetchNTA:
|
||||
p := s.Prog(v.Op.Asm())
|
||||
p.From.Type = obj.TYPE_MEM
|
||||
p.From.Reg = v.Args[0].Reg()
|
||||
case ssa.OpClobber:
|
||||
p := s.Prog(x86.AMOVL)
|
||||
p.From.Type = obj.TYPE_CONST
|
||||
@@ -1318,9 +1300,20 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
|
||||
p.To.Type = obj.TYPE_BRANCH
|
||||
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
|
||||
}
|
||||
case ssa.BlockExit, ssa.BlockRetJmp:
|
||||
case ssa.BlockExit:
|
||||
case ssa.BlockRet:
|
||||
s.Prog(obj.ARET)
|
||||
case ssa.BlockRetJmp:
|
||||
if buildcfg.Experiment.RegabiG && s.ABI == obj.ABI0 && b.Aux.(*obj.LSym).ABI() == obj.ABIInternal {
|
||||
// zeroing X15 when entering ABIInternal from ABI0
|
||||
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
|
||||
// set G register from TLS
|
||||
getgFromTLS(s, x86.REG_R14)
|
||||
}
|
||||
p := s.Prog(obj.ARET)
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Name = obj.NAME_EXTERN
|
||||
p.To.Sym = b.Aux.(*obj.LSym)
|
||||
|
||||
case ssa.BlockAMD64EQF:
|
||||
s.CombJump(b, next, &eqfJumps)
|
||||
@@ -1355,15 +1348,20 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
|
||||
}
|
||||
}
|
||||
|
||||
func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
|
||||
p := s.Prog(loadByType(t))
|
||||
p.From.Type = obj.TYPE_MEM
|
||||
p.From.Name = obj.NAME_AUTO
|
||||
p.From.Sym = n.Linksym()
|
||||
p.From.Offset = n.FrameOffset() + off
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = reg
|
||||
return p
|
||||
func loadRegResults(s *ssagen.State, f *ssa.Func) {
|
||||
for _, o := range f.OwnAux.ABIInfo().OutParams() {
|
||||
n := o.Name.(*ir.Name)
|
||||
rts, offs := o.RegisterTypesAndOffsets()
|
||||
for i := range o.Registers {
|
||||
p := s.Prog(loadByType(rts[i]))
|
||||
p.From.Type = obj.TYPE_MEM
|
||||
p.From.Name = obj.NAME_AUTO
|
||||
p.From.Sym = n.Linksym()
|
||||
p.From.Offset = n.FrameOffset() + offs[i]
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = ssa.ObjRegForAbiReg(o.Registers[i], f.Config)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
|
||||
|
||||
@@ -18,6 +18,7 @@ func Init(arch *ssagen.ArchInfo) {
|
||||
arch.SoftFloat = buildcfg.GOARM == 5
|
||||
arch.ZeroRange = zerorange
|
||||
arch.Ginsnop = ginsnop
|
||||
arch.Ginsnopdefer = ginsnop
|
||||
|
||||
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
|
||||
arch.SSAGenValue = ssaGenValue
|
||||
|
||||
@@ -696,8 +696,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p.To.Reg = v.Reg()
|
||||
case ssa.OpARMCALLstatic, ssa.OpARMCALLclosure, ssa.OpARMCALLinter:
|
||||
s.Call(v)
|
||||
case ssa.OpARMCALLtail:
|
||||
s.TailCall(v)
|
||||
case ssa.OpARMCALLudiv:
|
||||
p := s.Prog(obj.ACALL)
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
@@ -938,11 +936,17 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
|
||||
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
|
||||
}
|
||||
|
||||
case ssa.BlockExit, ssa.BlockRetJmp:
|
||||
case ssa.BlockExit:
|
||||
|
||||
case ssa.BlockRet:
|
||||
s.Prog(obj.ARET)
|
||||
|
||||
case ssa.BlockRetJmp:
|
||||
p := s.Prog(obj.ARET)
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Name = obj.NAME_EXTERN
|
||||
p.To.Sym = b.Aux.(*obj.LSym)
|
||||
|
||||
case ssa.BlockARMEQ, ssa.BlockARMNE,
|
||||
ssa.BlockARMLT, ssa.BlockARMGE,
|
||||
ssa.BlockARMLE, ssa.BlockARMGT,
|
||||
|
||||
@@ -18,10 +18,9 @@ func Init(arch *ssagen.ArchInfo) {
|
||||
arch.PadFrame = padframe
|
||||
arch.ZeroRange = zerorange
|
||||
arch.Ginsnop = ginsnop
|
||||
arch.Ginsnopdefer = ginsnop
|
||||
|
||||
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
|
||||
arch.SSAGenValue = ssaGenValue
|
||||
arch.SSAGenBlock = ssaGenBlock
|
||||
arch.LoadRegResult = loadRegResult
|
||||
arch.SpillArgReg = spillArgReg
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"cmd/compile/internal/base"
|
||||
"cmd/compile/internal/ir"
|
||||
"cmd/compile/internal/logopt"
|
||||
"cmd/compile/internal/objw"
|
||||
"cmd/compile/internal/ssa"
|
||||
"cmd/compile/internal/ssagen"
|
||||
"cmd/compile/internal/types"
|
||||
@@ -162,18 +161,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p.From.Type = obj.TYPE_REG
|
||||
p.From.Reg = v.Args[0].Reg()
|
||||
ssagen.AddrAuto(&p.To, v)
|
||||
case ssa.OpArgIntReg, ssa.OpArgFloatReg:
|
||||
// The assembler needs to wrap the entry safepoint/stack growth code with spill/unspill
|
||||
// The loop only runs once.
|
||||
for _, a := range v.Block.Func.RegArgs {
|
||||
// Pass the spill/unspill information along to the assembler, offset by size of
|
||||
// the saved LR slot.
|
||||
addr := ssagen.SpillSlotAddr(a, arm64.REGSP, base.Ctxt.FixedFrameSize())
|
||||
s.FuncInfo().AddSpill(
|
||||
obj.RegSpill{Reg: a.Reg, Addr: addr, Unspill: loadByType(a.Type), Spill: storeByType(a.Type)})
|
||||
}
|
||||
v.Block.Func.RegArgs = nil
|
||||
ssagen.CheckArgReg(v)
|
||||
case ssa.OpARM64ADD,
|
||||
ssa.OpARM64SUB,
|
||||
ssa.OpARM64AND,
|
||||
@@ -315,8 +302,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm64.SHIFT_LR, v.AuxInt)
|
||||
case ssa.OpARM64MVNshiftRA, ssa.OpARM64NEGshiftRA:
|
||||
genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm64.SHIFT_AR, v.AuxInt)
|
||||
case ssa.OpARM64MVNshiftRO:
|
||||
genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm64.SHIFT_ROR, v.AuxInt)
|
||||
case ssa.OpARM64ADDshiftLL,
|
||||
ssa.OpARM64SUBshiftLL,
|
||||
ssa.OpARM64ANDshiftLL,
|
||||
@@ -344,13 +329,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
ssa.OpARM64ORNshiftRA,
|
||||
ssa.OpARM64BICshiftRA:
|
||||
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_AR, v.AuxInt)
|
||||
case ssa.OpARM64ANDshiftRO,
|
||||
ssa.OpARM64ORshiftRO,
|
||||
ssa.OpARM64XORshiftRO,
|
||||
ssa.OpARM64EONshiftRO,
|
||||
ssa.OpARM64ORNshiftRO,
|
||||
ssa.OpARM64BICshiftRO:
|
||||
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_ROR, v.AuxInt)
|
||||
case ssa.OpARM64MOVDconst:
|
||||
p := s.Prog(v.Op.Asm())
|
||||
p.From.Type = obj.TYPE_CONST
|
||||
@@ -398,8 +376,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_LR, v.AuxInt)
|
||||
case ssa.OpARM64CMPshiftRA, ssa.OpARM64CMNshiftRA, ssa.OpARM64TSTshiftRA:
|
||||
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_AR, v.AuxInt)
|
||||
case ssa.OpARM64TSTshiftRO:
|
||||
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_ROR, v.AuxInt)
|
||||
case ssa.OpARM64MOVDaddr:
|
||||
p := s.Prog(arm64.AMOVD)
|
||||
p.From.Type = obj.TYPE_ADDR
|
||||
@@ -1057,8 +1033,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p4.To.SetTarget(p)
|
||||
case ssa.OpARM64CALLstatic, ssa.OpARM64CALLclosure, ssa.OpARM64CALLinter:
|
||||
s.Call(v)
|
||||
case ssa.OpARM64CALLtail:
|
||||
s.TailCall(v)
|
||||
case ssa.OpARM64LoweredWB:
|
||||
p := s.Prog(obj.ACALL)
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
@@ -1108,12 +1082,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p.From.Reg = condBits[v.Op]
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = v.Reg()
|
||||
case ssa.OpARM64PRFM:
|
||||
p := s.Prog(v.Op.Asm())
|
||||
p.From.Type = obj.TYPE_MEM
|
||||
p.From.Reg = v.Args[0].Reg()
|
||||
p.To.Type = obj.TYPE_CONST
|
||||
p.To.Offset = v.AuxInt
|
||||
case ssa.OpARM64LoweredGetClosurePtr:
|
||||
// Closure pointer is R26 (arm64.REGCTXT).
|
||||
ssagen.CheckLoweredGetClosurePtr(v)
|
||||
@@ -1133,34 +1101,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
v.Fatalf("FlagConstant op should never make it to codegen %v", v.LongString())
|
||||
case ssa.OpARM64InvertFlags:
|
||||
v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
|
||||
case ssa.OpClobber:
|
||||
// MOVW $0xdeaddead, REGTMP
|
||||
// MOVW REGTMP, (slot)
|
||||
// MOVW REGTMP, 4(slot)
|
||||
p := s.Prog(arm64.AMOVW)
|
||||
p.From.Type = obj.TYPE_CONST
|
||||
p.From.Offset = 0xdeaddead
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = arm64.REGTMP
|
||||
p = s.Prog(arm64.AMOVW)
|
||||
p.From.Type = obj.TYPE_REG
|
||||
p.From.Reg = arm64.REGTMP
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Reg = arm64.REGSP
|
||||
ssagen.AddAux(&p.To, v)
|
||||
p = s.Prog(arm64.AMOVW)
|
||||
p.From.Type = obj.TYPE_REG
|
||||
p.From.Reg = arm64.REGTMP
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Reg = arm64.REGSP
|
||||
ssagen.AddAux2(&p.To, v, v.AuxInt+4)
|
||||
case ssa.OpClobberReg:
|
||||
x := uint64(0xdeaddeaddeaddead)
|
||||
p := s.Prog(arm64.AMOVD)
|
||||
p.From.Type = obj.TYPE_CONST
|
||||
p.From.Offset = int64(x)
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = v.Reg()
|
||||
case ssa.OpClobber, ssa.OpClobberReg:
|
||||
// TODO: implement for clobberdead experiment. Nop is ok for now.
|
||||
default:
|
||||
v.Fatalf("genValue not implemented: %s", v.LongString())
|
||||
}
|
||||
@@ -1254,11 +1196,17 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
|
||||
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
|
||||
}
|
||||
|
||||
case ssa.BlockExit, ssa.BlockRetJmp:
|
||||
case ssa.BlockExit:
|
||||
|
||||
case ssa.BlockRet:
|
||||
s.Prog(obj.ARET)
|
||||
|
||||
case ssa.BlockRetJmp:
|
||||
p := s.Prog(obj.ARET)
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Name = obj.NAME_EXTERN
|
||||
p.To.Sym = b.Aux.(*obj.LSym)
|
||||
|
||||
case ssa.BlockARM64EQ, ssa.BlockARM64NE,
|
||||
ssa.BlockARM64LT, ssa.BlockARM64GE,
|
||||
ssa.BlockARM64LE, ssa.BlockARM64GT,
|
||||
@@ -1318,22 +1266,3 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
|
||||
b.Fatalf("branch not implemented: %s", b.LongString())
|
||||
}
|
||||
}
|
||||
|
||||
func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
|
||||
p := s.Prog(loadByType(t))
|
||||
p.From.Type = obj.TYPE_MEM
|
||||
p.From.Name = obj.NAME_AUTO
|
||||
p.From.Sym = n.Linksym()
|
||||
p.From.Offset = n.FrameOffset() + off
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = reg
|
||||
return p
|
||||
}
|
||||
|
||||
func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
|
||||
p = pp.Append(p, storeByType(t), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off)
|
||||
p.To.Name = obj.NAME_PARAM
|
||||
p.To.Sym = n.Linksym()
|
||||
p.Pos = p.Pos.WithNotStmt()
|
||||
return p
|
||||
}
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !compiler_bootstrap
|
||||
// +build !compiler_bootstrap
|
||||
|
||||
package base
|
||||
|
||||
// CompilerBootstrap reports whether the current compiler binary was
|
||||
// built with -tags=compiler_bootstrap.
|
||||
const CompilerBootstrap = false
|
||||
@@ -1,12 +0,0 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build compiler_bootstrap
|
||||
// +build compiler_bootstrap
|
||||
|
||||
package base
|
||||
|
||||
// CompilerBootstrap reports whether the current compiler binary was
|
||||
// built with -tags=compiler_bootstrap.
|
||||
const CompilerBootstrap = true
|
||||
@@ -44,11 +44,8 @@ type DebugFlags struct {
|
||||
Panic int `help:"show all compiler panics"`
|
||||
Slice int `help:"print information about slice compilation"`
|
||||
SoftFloat int `help:"force compiler to emit soft-float code"`
|
||||
SyncFrames int `help:"how many writer stack frames to include at sync points in unified export data"`
|
||||
TypeAssert int `help:"print information about type assertion inlining"`
|
||||
TypecheckInl int `help:"eager typechecking of inline function bodies"`
|
||||
Unified int `help:"enable unified IR construction"`
|
||||
UnifiedQuirks int `help:"enable unified IR construction's quirks mode"`
|
||||
WB int `help:"print information about write barriers"`
|
||||
ABIWrap int `help:"print information about ABI wrapper generation"`
|
||||
|
||||
|
||||
@@ -140,7 +140,6 @@ type CmdFlags struct {
|
||||
|
||||
// ParseFlags parses the command-line flags into Flag.
|
||||
func ParseFlags() {
|
||||
Flag.G = 3
|
||||
Flag.I = addImportDir
|
||||
|
||||
Flag.LowerC = 1
|
||||
@@ -160,11 +159,7 @@ func ParseFlags() {
|
||||
Flag.LinkShared = &Ctxt.Flag_linkshared
|
||||
Flag.Shared = &Ctxt.Flag_shared
|
||||
Flag.WB = true
|
||||
|
||||
Debug.InlFuncsWithClosures = 1
|
||||
if buildcfg.Experiment.Unified {
|
||||
Debug.Unified = 1
|
||||
}
|
||||
|
||||
Debug.Checkptr = -1 // so we can tell whether it is set explicitly
|
||||
|
||||
|
||||
@@ -233,27 +233,6 @@ func FatalfAt(pos src.XPos, format string, args ...interface{}) {
|
||||
ErrorExit()
|
||||
}
|
||||
|
||||
// Assert reports "assertion failed" with Fatalf, unless b is true.
|
||||
func Assert(b bool) {
|
||||
if !b {
|
||||
Fatalf("assertion failed")
|
||||
}
|
||||
}
|
||||
|
||||
// Assertf reports a fatal error with Fatalf, unless b is true.
|
||||
func Assertf(b bool, format string, args ...interface{}) {
|
||||
if !b {
|
||||
Fatalf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
// AssertfAt reports a fatal error with FatalfAt, unless b is true.
|
||||
func AssertfAt(b bool, pos src.XPos, format string, args ...interface{}) {
|
||||
if !b {
|
||||
FatalfAt(pos, format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
// hcrash crashes the compiler when -h is set, to find out where a message is generated.
|
||||
func hcrash() {
|
||||
if Flag.LowerH != 0 {
|
||||
|
||||
@@ -38,7 +38,6 @@ func Func(fn *ir.Func) {
|
||||
}
|
||||
}
|
||||
|
||||
ir.VisitList(fn.Body, markHiddenClosureDead)
|
||||
fn.Body = []ir.Node{ir.NewBlockStmt(base.Pos, nil)}
|
||||
}
|
||||
|
||||
@@ -63,11 +62,9 @@ func stmts(nn *ir.Nodes) {
|
||||
if ir.IsConst(n.Cond, constant.Bool) {
|
||||
var body ir.Nodes
|
||||
if ir.BoolVal(n.Cond) {
|
||||
ir.VisitList(n.Else, markHiddenClosureDead)
|
||||
n.Else = ir.Nodes{}
|
||||
body = n.Body
|
||||
} else {
|
||||
ir.VisitList(n.Body, markHiddenClosureDead)
|
||||
n.Body = ir.Nodes{}
|
||||
body = n.Else
|
||||
}
|
||||
@@ -117,7 +114,6 @@ func stmts(nn *ir.Nodes) {
|
||||
}
|
||||
|
||||
if cut {
|
||||
ir.VisitList((*nn)[i+1:len(*nn)], markHiddenClosureDead)
|
||||
*nn = (*nn)[:i+1]
|
||||
break
|
||||
}
|
||||
@@ -154,13 +150,3 @@ func expr(n ir.Node) ir.Node {
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func markHiddenClosureDead(n ir.Node) {
|
||||
if n.Op() != ir.OCLOSURE {
|
||||
return
|
||||
}
|
||||
clo := n.(*ir.ClosureExpr)
|
||||
if clo.Func.IsHiddenClosure() {
|
||||
clo.Func.SetIsDeadcodeClosure(true)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -214,10 +214,9 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir
|
||||
Type: base.Ctxt.Lookup(typename),
|
||||
DeclFile: declpos.RelFilename(),
|
||||
DeclLine: declpos.RelLine(),
|
||||
DeclCol: declpos.RelCol(),
|
||||
DeclCol: declpos.Col(),
|
||||
InlIndex: int32(inlIndex),
|
||||
ChildIndex: -1,
|
||||
DictIndex: n.DictIndex,
|
||||
})
|
||||
// Record go type of to insure that it gets emitted by the linker.
|
||||
fnsym.Func().RecordAutoType(reflectdata.TypeLinksym(n.Type()))
|
||||
@@ -372,10 +371,9 @@ func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var {
|
||||
Type: base.Ctxt.Lookup(typename),
|
||||
DeclFile: declpos.RelFilename(),
|
||||
DeclLine: declpos.RelLine(),
|
||||
DeclCol: declpos.RelCol(),
|
||||
DeclCol: declpos.Col(),
|
||||
InlIndex: int32(inlIndex),
|
||||
ChildIndex: -1,
|
||||
DictIndex: n.DictIndex,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -477,10 +475,9 @@ func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var
|
||||
StackOffset: ssagen.StackOffset(debug.Slots[debug.VarSlots[varID][0]]),
|
||||
DeclFile: declpos.RelFilename(),
|
||||
DeclLine: declpos.RelLine(),
|
||||
DeclCol: declpos.RelCol(),
|
||||
DeclCol: declpos.Col(),
|
||||
InlIndex: int32(inlIndex),
|
||||
ChildIndex: -1,
|
||||
DictIndex: n.DictIndex,
|
||||
}
|
||||
list := debug.LocationLists[varID]
|
||||
if len(list) != 0 {
|
||||
|
||||
@@ -244,7 +244,7 @@ func makePreinlineDclMap(fnsym *obj.LSym) map[varPos]int {
|
||||
DeclName: unversion(n.Sym().Name),
|
||||
DeclFile: pos.RelFilename(),
|
||||
DeclLine: pos.RelLine(),
|
||||
DeclCol: pos.RelCol(),
|
||||
DeclCol: pos.Col(),
|
||||
}
|
||||
if _, found := m[vp]; found {
|
||||
// We can see collisions (variables with the same name/file/line/col) in obfuscated or machine-generated code -- see issue 44378 for an example. Skip duplicates in such cases, since it is unlikely that a human will be debugging such code.
|
||||
|
||||
@@ -1,120 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package escape
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/base"
|
||||
"cmd/compile/internal/ir"
|
||||
)
|
||||
|
||||
// addr evaluates an addressable expression n and returns a hole
|
||||
// that represents storing into the represented location.
|
||||
func (e *escape) addr(n ir.Node) hole {
|
||||
if n == nil || ir.IsBlank(n) {
|
||||
// Can happen in select case, range, maybe others.
|
||||
return e.discardHole()
|
||||
}
|
||||
|
||||
k := e.heapHole()
|
||||
|
||||
switch n.Op() {
|
||||
default:
|
||||
base.Fatalf("unexpected addr: %v", n)
|
||||
case ir.ONAME:
|
||||
n := n.(*ir.Name)
|
||||
if n.Class == ir.PEXTERN {
|
||||
break
|
||||
}
|
||||
k = e.oldLoc(n).asHole()
|
||||
case ir.OLINKSYMOFFSET:
|
||||
break
|
||||
case ir.ODOT:
|
||||
n := n.(*ir.SelectorExpr)
|
||||
k = e.addr(n.X)
|
||||
case ir.OINDEX:
|
||||
n := n.(*ir.IndexExpr)
|
||||
e.discard(n.Index)
|
||||
if n.X.Type().IsArray() {
|
||||
k = e.addr(n.X)
|
||||
} else {
|
||||
e.discard(n.X)
|
||||
}
|
||||
case ir.ODEREF, ir.ODOTPTR:
|
||||
e.discard(n)
|
||||
case ir.OINDEXMAP:
|
||||
n := n.(*ir.IndexExpr)
|
||||
e.discard(n.X)
|
||||
e.assignHeap(n.Index, "key of map put", n)
|
||||
}
|
||||
|
||||
return k
|
||||
}
|
||||
|
||||
func (e *escape) addrs(l ir.Nodes) []hole {
|
||||
var ks []hole
|
||||
for _, n := range l {
|
||||
ks = append(ks, e.addr(n))
|
||||
}
|
||||
return ks
|
||||
}
|
||||
|
||||
func (e *escape) assignHeap(src ir.Node, why string, where ir.Node) {
|
||||
e.expr(e.heapHole().note(where, why), src)
|
||||
}
|
||||
|
||||
// assignList evaluates the assignment dsts... = srcs....
|
||||
func (e *escape) assignList(dsts, srcs []ir.Node, why string, where ir.Node) {
|
||||
ks := e.addrs(dsts)
|
||||
for i, k := range ks {
|
||||
var src ir.Node
|
||||
if i < len(srcs) {
|
||||
src = srcs[i]
|
||||
}
|
||||
|
||||
if dst := dsts[i]; dst != nil {
|
||||
// Detect implicit conversion of uintptr to unsafe.Pointer when
|
||||
// storing into reflect.{Slice,String}Header.
|
||||
if dst.Op() == ir.ODOTPTR && ir.IsReflectHeaderDataField(dst) {
|
||||
e.unsafeValue(e.heapHole().note(where, why), src)
|
||||
continue
|
||||
}
|
||||
|
||||
// Filter out some no-op assignments for escape analysis.
|
||||
if src != nil && isSelfAssign(dst, src) {
|
||||
if base.Flag.LowerM != 0 {
|
||||
base.WarnfAt(where.Pos(), "%v ignoring self-assignment in %v", e.curfn, where)
|
||||
}
|
||||
k = e.discardHole()
|
||||
}
|
||||
}
|
||||
|
||||
e.expr(k.note(where, why), src)
|
||||
}
|
||||
|
||||
e.reassigned(ks, where)
|
||||
}
|
||||
|
||||
// reassigned marks the locations associated with the given holes as
|
||||
// reassigned, unless the location represents a variable declared and
|
||||
// assigned exactly once by where.
|
||||
func (e *escape) reassigned(ks []hole, where ir.Node) {
|
||||
if as, ok := where.(*ir.AssignStmt); ok && as.Op() == ir.OAS && as.Y == nil {
|
||||
if dst, ok := as.X.(*ir.Name); ok && dst.Op() == ir.ONAME && dst.Defn == nil {
|
||||
// Zero-value assignment for variable declared without an
|
||||
// explicit initial value. Assume this is its initialization
|
||||
// statement.
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for _, k := range ks {
|
||||
loc := k.dst
|
||||
// Variables declared by range statements are assigned on every iteration.
|
||||
if n, ok := loc.n.(*ir.Name); ok && n.Defn == where && where.Op() != ir.ORANGE {
|
||||
continue
|
||||
}
|
||||
loc.reassigned = true
|
||||
}
|
||||
}
|
||||
@@ -1,428 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package escape
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/base"
|
||||
"cmd/compile/internal/ir"
|
||||
"cmd/compile/internal/typecheck"
|
||||
"cmd/compile/internal/types"
|
||||
"cmd/internal/src"
|
||||
)
|
||||
|
||||
// call evaluates a call expressions, including builtin calls. ks
|
||||
// should contain the holes representing where the function callee's
|
||||
// results flows.
|
||||
func (e *escape) call(ks []hole, call ir.Node) {
|
||||
var init ir.Nodes
|
||||
e.callCommon(ks, call, &init, nil)
|
||||
if len(init) != 0 {
|
||||
call.(*ir.CallExpr).PtrInit().Append(init...)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *escape) callCommon(ks []hole, call ir.Node, init *ir.Nodes, wrapper *ir.Func) {
|
||||
|
||||
// argumentPragma handles escape analysis of argument *argp to the
|
||||
// given hole. If the function callee is known, pragma is the
|
||||
// function's pragma flags; otherwise 0.
|
||||
argumentFunc := func(fn *ir.Name, k hole, argp *ir.Node) {
|
||||
e.rewriteArgument(argp, init, call, fn, wrapper)
|
||||
|
||||
e.expr(k.note(call, "call parameter"), *argp)
|
||||
}
|
||||
|
||||
argument := func(k hole, argp *ir.Node) {
|
||||
argumentFunc(nil, k, argp)
|
||||
}
|
||||
|
||||
switch call.Op() {
|
||||
default:
|
||||
ir.Dump("esc", call)
|
||||
base.Fatalf("unexpected call op: %v", call.Op())
|
||||
|
||||
case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
|
||||
call := call.(*ir.CallExpr)
|
||||
typecheck.FixVariadicCall(call)
|
||||
typecheck.FixMethodCall(call)
|
||||
|
||||
// Pick out the function callee, if statically known.
|
||||
//
|
||||
// TODO(mdempsky): Change fn from *ir.Name to *ir.Func, but some
|
||||
// functions (e.g., runtime builtins, method wrappers, generated
|
||||
// eq/hash functions) don't have it set. Investigate whether
|
||||
// that's a concern.
|
||||
var fn *ir.Name
|
||||
switch call.Op() {
|
||||
case ir.OCALLFUNC:
|
||||
// If we have a direct call to a closure (not just one we were
|
||||
// able to statically resolve with ir.StaticValue), mark it as
|
||||
// such so batch.outlives can optimize the flow results.
|
||||
if call.X.Op() == ir.OCLOSURE {
|
||||
call.X.(*ir.ClosureExpr).Func.SetClosureCalled(true)
|
||||
}
|
||||
|
||||
switch v := ir.StaticValue(call.X); v.Op() {
|
||||
case ir.ONAME:
|
||||
if v := v.(*ir.Name); v.Class == ir.PFUNC {
|
||||
fn = v
|
||||
}
|
||||
case ir.OCLOSURE:
|
||||
fn = v.(*ir.ClosureExpr).Func.Nname
|
||||
case ir.OMETHEXPR:
|
||||
fn = ir.MethodExprName(v)
|
||||
}
|
||||
case ir.OCALLMETH:
|
||||
base.FatalfAt(call.Pos(), "OCALLMETH missed by typecheck")
|
||||
}
|
||||
|
||||
fntype := call.X.Type()
|
||||
if fn != nil {
|
||||
fntype = fn.Type()
|
||||
}
|
||||
|
||||
if ks != nil && fn != nil && e.inMutualBatch(fn) {
|
||||
for i, result := range fn.Type().Results().FieldSlice() {
|
||||
e.expr(ks[i], ir.AsNode(result.Nname))
|
||||
}
|
||||
}
|
||||
|
||||
var recvp *ir.Node
|
||||
if call.Op() == ir.OCALLFUNC {
|
||||
// Evaluate callee function expression.
|
||||
//
|
||||
// Note: We use argument and not argumentFunc, because while
|
||||
// call.X here may be an argument to runtime.{new,defer}proc,
|
||||
// it's not an argument to fn itself.
|
||||
argument(e.discardHole(), &call.X)
|
||||
} else {
|
||||
recvp = &call.X.(*ir.SelectorExpr).X
|
||||
}
|
||||
|
||||
args := call.Args
|
||||
if recv := fntype.Recv(); recv != nil {
|
||||
if recvp == nil {
|
||||
// Function call using method expression. Recevier argument is
|
||||
// at the front of the regular arguments list.
|
||||
recvp = &args[0]
|
||||
args = args[1:]
|
||||
}
|
||||
|
||||
argumentFunc(fn, e.tagHole(ks, fn, recv), recvp)
|
||||
}
|
||||
|
||||
for i, param := range fntype.Params().FieldSlice() {
|
||||
argumentFunc(fn, e.tagHole(ks, fn, param), &args[i])
|
||||
}
|
||||
|
||||
case ir.OINLCALL:
|
||||
call := call.(*ir.InlinedCallExpr)
|
||||
e.stmts(call.Body)
|
||||
for i, result := range call.ReturnVars {
|
||||
k := e.discardHole()
|
||||
if ks != nil {
|
||||
k = ks[i]
|
||||
}
|
||||
e.expr(k, result)
|
||||
}
|
||||
|
||||
case ir.OAPPEND:
|
||||
call := call.(*ir.CallExpr)
|
||||
args := call.Args
|
||||
|
||||
// Appendee slice may flow directly to the result, if
|
||||
// it has enough capacity. Alternatively, a new heap
|
||||
// slice might be allocated, and all slice elements
|
||||
// might flow to heap.
|
||||
appendeeK := ks[0]
|
||||
if args[0].Type().Elem().HasPointers() {
|
||||
appendeeK = e.teeHole(appendeeK, e.heapHole().deref(call, "appendee slice"))
|
||||
}
|
||||
argument(appendeeK, &args[0])
|
||||
|
||||
if call.IsDDD {
|
||||
appendedK := e.discardHole()
|
||||
if args[1].Type().IsSlice() && args[1].Type().Elem().HasPointers() {
|
||||
appendedK = e.heapHole().deref(call, "appended slice...")
|
||||
}
|
||||
argument(appendedK, &args[1])
|
||||
} else {
|
||||
for i := 1; i < len(args); i++ {
|
||||
argument(e.heapHole(), &args[i])
|
||||
}
|
||||
}
|
||||
|
||||
case ir.OCOPY:
|
||||
call := call.(*ir.BinaryExpr)
|
||||
argument(e.discardHole(), &call.X)
|
||||
|
||||
copiedK := e.discardHole()
|
||||
if call.Y.Type().IsSlice() && call.Y.Type().Elem().HasPointers() {
|
||||
copiedK = e.heapHole().deref(call, "copied slice")
|
||||
}
|
||||
argument(copiedK, &call.Y)
|
||||
|
||||
case ir.OPANIC:
|
||||
call := call.(*ir.UnaryExpr)
|
||||
argument(e.heapHole(), &call.X)
|
||||
|
||||
case ir.OCOMPLEX:
|
||||
call := call.(*ir.BinaryExpr)
|
||||
argument(e.discardHole(), &call.X)
|
||||
argument(e.discardHole(), &call.Y)
|
||||
|
||||
case ir.ODELETE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
|
||||
call := call.(*ir.CallExpr)
|
||||
fixRecoverCall(call)
|
||||
for i := range call.Args {
|
||||
argument(e.discardHole(), &call.Args[i])
|
||||
}
|
||||
|
||||
case ir.OLEN, ir.OCAP, ir.OREAL, ir.OIMAG, ir.OCLOSE:
|
||||
call := call.(*ir.UnaryExpr)
|
||||
argument(e.discardHole(), &call.X)
|
||||
|
||||
case ir.OUNSAFEADD, ir.OUNSAFESLICE:
|
||||
call := call.(*ir.BinaryExpr)
|
||||
argument(ks[0], &call.X)
|
||||
argument(e.discardHole(), &call.Y)
|
||||
}
|
||||
}
|
||||
|
||||
// goDeferStmt analyzes a "go" or "defer" statement.
|
||||
//
|
||||
// In the process, it also normalizes the statement to always use a
|
||||
// simple function call with no arguments and no results. For example,
|
||||
// it rewrites:
|
||||
//
|
||||
// defer f(x, y)
|
||||
//
|
||||
// into:
|
||||
//
|
||||
// x1, y1 := x, y
|
||||
// defer func() { f(x1, y1) }()
|
||||
func (e *escape) goDeferStmt(n *ir.GoDeferStmt) {
|
||||
k := e.heapHole()
|
||||
if n.Op() == ir.ODEFER && e.loopDepth == 1 {
|
||||
// Top-level defer arguments don't escape to the heap,
|
||||
// but they do need to last until they're invoked.
|
||||
k = e.later(e.discardHole())
|
||||
|
||||
// force stack allocation of defer record, unless
|
||||
// open-coded defers are used (see ssa.go)
|
||||
n.SetEsc(ir.EscNever)
|
||||
}
|
||||
|
||||
call := n.Call
|
||||
|
||||
init := n.PtrInit()
|
||||
init.Append(ir.TakeInit(call)...)
|
||||
e.stmts(*init)
|
||||
|
||||
// If the function is already a zero argument/result function call,
|
||||
// just escape analyze it normally.
|
||||
if call, ok := call.(*ir.CallExpr); ok && call.Op() == ir.OCALLFUNC {
|
||||
if sig := call.X.Type(); sig.NumParams()+sig.NumResults() == 0 {
|
||||
if clo, ok := call.X.(*ir.ClosureExpr); ok && n.Op() == ir.OGO {
|
||||
clo.IsGoWrap = true
|
||||
}
|
||||
e.expr(k, call.X)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Create a new no-argument function that we'll hand off to defer.
|
||||
fn := ir.NewClosureFunc(n.Pos(), true)
|
||||
fn.SetWrapper(true)
|
||||
fn.Nname.SetType(types.NewSignature(types.LocalPkg, nil, nil, nil, nil))
|
||||
fn.Body = []ir.Node{call}
|
||||
|
||||
clo := fn.OClosure
|
||||
if n.Op() == ir.OGO {
|
||||
clo.IsGoWrap = true
|
||||
}
|
||||
|
||||
e.callCommon(nil, call, init, fn)
|
||||
e.closures = append(e.closures, closure{e.spill(k, clo), clo})
|
||||
|
||||
// Create new top level call to closure.
|
||||
n.Call = ir.NewCallExpr(call.Pos(), ir.OCALL, clo, nil)
|
||||
ir.WithFunc(e.curfn, func() {
|
||||
typecheck.Stmt(n.Call)
|
||||
})
|
||||
}
|
||||
|
||||
// rewriteArgument rewrites the argument *argp of the given call expression.
|
||||
// fn is the static callee function, if known.
|
||||
// wrapper is the go/defer wrapper function for call, if any.
|
||||
func (e *escape) rewriteArgument(argp *ir.Node, init *ir.Nodes, call ir.Node, fn *ir.Name, wrapper *ir.Func) {
|
||||
var pragma ir.PragmaFlag
|
||||
if fn != nil && fn.Func != nil {
|
||||
pragma = fn.Func.Pragma
|
||||
}
|
||||
|
||||
// unsafeUintptr rewrites "uintptr(ptr)" arguments to syscall-like
|
||||
// functions, so that ptr is kept alive and/or escaped as
|
||||
// appropriate. unsafeUintptr also reports whether it modified arg0.
|
||||
unsafeUintptr := func(arg0 ir.Node) bool {
|
||||
if pragma&(ir.UintptrKeepAlive|ir.UintptrEscapes) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// If the argument is really a pointer being converted to uintptr,
|
||||
// arrange for the pointer to be kept alive until the call returns,
|
||||
// by copying it into a temp and marking that temp
|
||||
// still alive when we pop the temp stack.
|
||||
if arg0.Op() != ir.OCONVNOP || !arg0.Type().IsUintptr() {
|
||||
return false
|
||||
}
|
||||
arg := arg0.(*ir.ConvExpr)
|
||||
|
||||
if !arg.X.Type().IsUnsafePtr() {
|
||||
return false
|
||||
}
|
||||
|
||||
// Create and declare a new pointer-typed temp variable.
|
||||
tmp := e.wrapExpr(arg.Pos(), &arg.X, init, call, wrapper)
|
||||
|
||||
if pragma&ir.UintptrEscapes != 0 {
|
||||
e.flow(e.heapHole().note(arg, "//go:uintptrescapes"), e.oldLoc(tmp))
|
||||
}
|
||||
|
||||
if pragma&ir.UintptrKeepAlive != 0 {
|
||||
call := call.(*ir.CallExpr)
|
||||
|
||||
// SSA implements CallExpr.KeepAlive using OpVarLive, which
|
||||
// doesn't support PAUTOHEAP variables. I tried changing it to
|
||||
// use OpKeepAlive, but that ran into issues of its own.
|
||||
// For now, the easy solution is to explicitly copy to (yet
|
||||
// another) new temporary variable.
|
||||
keep := tmp
|
||||
if keep.Class == ir.PAUTOHEAP {
|
||||
keep = e.copyExpr(arg.Pos(), tmp, call.PtrInit(), wrapper, false)
|
||||
}
|
||||
|
||||
keep.SetAddrtaken(true) // ensure SSA keeps the tmp variable
|
||||
call.KeepAlive = append(call.KeepAlive, keep)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
visit := func(pos src.XPos, argp *ir.Node) {
|
||||
// Optimize a few common constant expressions. By leaving these
|
||||
// untouched in the call expression, we let the wrapper handle
|
||||
// evaluating them, rather than taking up closure context space.
|
||||
switch arg := *argp; arg.Op() {
|
||||
case ir.OLITERAL, ir.ONIL, ir.OMETHEXPR:
|
||||
return
|
||||
case ir.ONAME:
|
||||
if arg.(*ir.Name).Class == ir.PFUNC {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if unsafeUintptr(*argp) {
|
||||
return
|
||||
}
|
||||
|
||||
if wrapper != nil {
|
||||
e.wrapExpr(pos, argp, init, call, wrapper)
|
||||
}
|
||||
}
|
||||
|
||||
// Peel away any slice lits.
|
||||
if arg := *argp; arg.Op() == ir.OSLICELIT {
|
||||
list := arg.(*ir.CompLitExpr).List
|
||||
for i := range list {
|
||||
visit(arg.Pos(), &list[i])
|
||||
}
|
||||
} else {
|
||||
visit(call.Pos(), argp)
|
||||
}
|
||||
}
|
||||
|
||||
// wrapExpr replaces *exprp with a temporary variable copy. If wrapper
|
||||
// is non-nil, the variable will be captured for use within that
|
||||
// function.
|
||||
func (e *escape) wrapExpr(pos src.XPos, exprp *ir.Node, init *ir.Nodes, call ir.Node, wrapper *ir.Func) *ir.Name {
|
||||
tmp := e.copyExpr(pos, *exprp, init, e.curfn, true)
|
||||
|
||||
if wrapper != nil {
|
||||
// Currently for "defer i.M()" if i is nil it panics at the point
|
||||
// of defer statement, not when deferred function is called. We
|
||||
// need to do the nil check outside of the wrapper.
|
||||
if call.Op() == ir.OCALLINTER && exprp == &call.(*ir.CallExpr).X.(*ir.SelectorExpr).X {
|
||||
check := ir.NewUnaryExpr(pos, ir.OCHECKNIL, ir.NewUnaryExpr(pos, ir.OITAB, tmp))
|
||||
init.Append(typecheck.Stmt(check))
|
||||
}
|
||||
|
||||
e.oldLoc(tmp).captured = true
|
||||
|
||||
tmp = ir.NewClosureVar(pos, wrapper, tmp)
|
||||
}
|
||||
|
||||
*exprp = tmp
|
||||
return tmp
|
||||
}
|
||||
|
||||
// copyExpr creates and returns a new temporary variable within fn;
|
||||
// appends statements to init to declare and initialize it to expr;
|
||||
// and escape analyzes the data flow if analyze is true.
|
||||
func (e *escape) copyExpr(pos src.XPos, expr ir.Node, init *ir.Nodes, fn *ir.Func, analyze bool) *ir.Name {
|
||||
if ir.HasUniquePos(expr) {
|
||||
pos = expr.Pos()
|
||||
}
|
||||
|
||||
tmp := typecheck.TempAt(pos, fn, expr.Type())
|
||||
|
||||
stmts := []ir.Node{
|
||||
ir.NewDecl(pos, ir.ODCL, tmp),
|
||||
ir.NewAssignStmt(pos, tmp, expr),
|
||||
}
|
||||
typecheck.Stmts(stmts)
|
||||
init.Append(stmts...)
|
||||
|
||||
if analyze {
|
||||
e.newLoc(tmp, false)
|
||||
e.stmts(stmts)
|
||||
}
|
||||
|
||||
return tmp
|
||||
}
|
||||
|
||||
// tagHole returns a hole for evaluating an argument passed to param.
|
||||
// ks should contain the holes representing where the function
|
||||
// callee's results flows. fn is the statically-known callee function,
|
||||
// if any.
|
||||
func (e *escape) tagHole(ks []hole, fn *ir.Name, param *types.Field) hole {
|
||||
// If this is a dynamic call, we can't rely on param.Note.
|
||||
if fn == nil {
|
||||
return e.heapHole()
|
||||
}
|
||||
|
||||
if e.inMutualBatch(fn) {
|
||||
return e.addr(ir.AsNode(param.Nname))
|
||||
}
|
||||
|
||||
// Call to previously tagged function.
|
||||
|
||||
var tagKs []hole
|
||||
|
||||
esc := parseLeaks(param.Note)
|
||||
if x := esc.Heap(); x >= 0 {
|
||||
tagKs = append(tagKs, e.heapHole().shift(x))
|
||||
}
|
||||
|
||||
if ks != nil {
|
||||
for i := 0; i < numEscResults; i++ {
|
||||
if x := esc.Result(i); x >= 0 {
|
||||
tagKs = append(tagKs, ks[i].shift(x))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return e.teeHole(tagKs...)
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package escape
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/base"
|
||||
"cmd/compile/internal/ir"
|
||||
"cmd/compile/internal/typecheck"
|
||||
"cmd/compile/internal/types"
|
||||
)
|
||||
|
||||
// TODO(mdempsky): Desugaring doesn't belong during escape analysis,
|
||||
// but for now it's the most convenient place for some rewrites.
|
||||
|
||||
// fixRecoverCall rewrites an ORECOVER call into ORECOVERFP,
|
||||
// adding an explicit frame pointer argument.
|
||||
// If call is not an ORECOVER call, it's left unmodified.
|
||||
func fixRecoverCall(call *ir.CallExpr) {
|
||||
if call.Op() != ir.ORECOVER {
|
||||
return
|
||||
}
|
||||
|
||||
pos := call.Pos()
|
||||
|
||||
// FP is equal to caller's SP plus FixedFrameSize().
|
||||
var fp ir.Node = ir.NewCallExpr(pos, ir.OGETCALLERSP, nil, nil)
|
||||
if off := base.Ctxt.FixedFrameSize(); off != 0 {
|
||||
fp = ir.NewBinaryExpr(fp.Pos(), ir.OADD, fp, ir.NewInt(off))
|
||||
}
|
||||
// TODO(mdempsky): Replace *int32 with unsafe.Pointer, without upsetting checkptr.
|
||||
fp = ir.NewConvExpr(pos, ir.OCONVNOP, types.NewPtr(types.Types[types.TINT32]), fp)
|
||||
|
||||
call.SetOp(ir.ORECOVERFP)
|
||||
call.Args = []ir.Node{typecheck.Expr(fp)}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,335 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package escape
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/base"
|
||||
"cmd/compile/internal/ir"
|
||||
"cmd/compile/internal/types"
|
||||
)
|
||||
|
||||
// expr models evaluating an expression n and flowing the result into
|
||||
// hole k.
|
||||
func (e *escape) expr(k hole, n ir.Node) {
|
||||
if n == nil {
|
||||
return
|
||||
}
|
||||
e.stmts(n.Init())
|
||||
e.exprSkipInit(k, n)
|
||||
}
|
||||
|
||||
func (e *escape) exprSkipInit(k hole, n ir.Node) {
|
||||
if n == nil {
|
||||
return
|
||||
}
|
||||
|
||||
lno := ir.SetPos(n)
|
||||
defer func() {
|
||||
base.Pos = lno
|
||||
}()
|
||||
|
||||
if k.derefs >= 0 && !n.Type().IsUntyped() && !n.Type().HasPointers() {
|
||||
k.dst = &e.blankLoc
|
||||
}
|
||||
|
||||
switch n.Op() {
|
||||
default:
|
||||
base.Fatalf("unexpected expr: %s %v", n.Op().String(), n)
|
||||
|
||||
case ir.OLITERAL, ir.ONIL, ir.OGETG, ir.OGETCALLERPC, ir.OGETCALLERSP, ir.OTYPE, ir.OMETHEXPR, ir.OLINKSYMOFFSET:
|
||||
// nop
|
||||
|
||||
case ir.ONAME:
|
||||
n := n.(*ir.Name)
|
||||
if n.Class == ir.PFUNC || n.Class == ir.PEXTERN {
|
||||
return
|
||||
}
|
||||
e.flow(k, e.oldLoc(n))
|
||||
|
||||
case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT:
|
||||
n := n.(*ir.UnaryExpr)
|
||||
e.discard(n.X)
|
||||
case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
|
||||
n := n.(*ir.BinaryExpr)
|
||||
e.discard(n.X)
|
||||
e.discard(n.Y)
|
||||
case ir.OANDAND, ir.OOROR:
|
||||
n := n.(*ir.LogicalExpr)
|
||||
e.discard(n.X)
|
||||
e.discard(n.Y)
|
||||
case ir.OADDR:
|
||||
n := n.(*ir.AddrExpr)
|
||||
e.expr(k.addr(n, "address-of"), n.X) // "address-of"
|
||||
case ir.ODEREF:
|
||||
n := n.(*ir.StarExpr)
|
||||
e.expr(k.deref(n, "indirection"), n.X) // "indirection"
|
||||
case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER:
|
||||
n := n.(*ir.SelectorExpr)
|
||||
e.expr(k.note(n, "dot"), n.X)
|
||||
case ir.ODOTPTR:
|
||||
n := n.(*ir.SelectorExpr)
|
||||
e.expr(k.deref(n, "dot of pointer"), n.X) // "dot of pointer"
|
||||
case ir.ODOTTYPE, ir.ODOTTYPE2:
|
||||
n := n.(*ir.TypeAssertExpr)
|
||||
e.expr(k.dotType(n.Type(), n, "dot"), n.X)
|
||||
case ir.ODYNAMICDOTTYPE, ir.ODYNAMICDOTTYPE2:
|
||||
n := n.(*ir.DynamicTypeAssertExpr)
|
||||
e.expr(k.dotType(n.Type(), n, "dot"), n.X)
|
||||
// n.T doesn't need to be tracked; it always points to read-only storage.
|
||||
case ir.OINDEX:
|
||||
n := n.(*ir.IndexExpr)
|
||||
if n.X.Type().IsArray() {
|
||||
e.expr(k.note(n, "fixed-array-index-of"), n.X)
|
||||
} else {
|
||||
// TODO(mdempsky): Fix why reason text.
|
||||
e.expr(k.deref(n, "dot of pointer"), n.X)
|
||||
}
|
||||
e.discard(n.Index)
|
||||
case ir.OINDEXMAP:
|
||||
n := n.(*ir.IndexExpr)
|
||||
e.discard(n.X)
|
||||
e.discard(n.Index)
|
||||
case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR:
|
||||
n := n.(*ir.SliceExpr)
|
||||
e.expr(k.note(n, "slice"), n.X)
|
||||
e.discard(n.Low)
|
||||
e.discard(n.High)
|
||||
e.discard(n.Max)
|
||||
|
||||
case ir.OCONV, ir.OCONVNOP:
|
||||
n := n.(*ir.ConvExpr)
|
||||
if ir.ShouldCheckPtr(e.curfn, 2) && n.Type().IsUnsafePtr() && n.X.Type().IsPtr() {
|
||||
// When -d=checkptr=2 is enabled, treat
|
||||
// conversions to unsafe.Pointer as an
|
||||
// escaping operation. This allows better
|
||||
// runtime instrumentation, since we can more
|
||||
// easily detect object boundaries on the heap
|
||||
// than the stack.
|
||||
e.assignHeap(n.X, "conversion to unsafe.Pointer", n)
|
||||
} else if n.Type().IsUnsafePtr() && n.X.Type().IsUintptr() {
|
||||
e.unsafeValue(k, n.X)
|
||||
} else {
|
||||
e.expr(k, n.X)
|
||||
}
|
||||
case ir.OCONVIFACE, ir.OCONVIDATA:
|
||||
n := n.(*ir.ConvExpr)
|
||||
if !n.X.Type().IsInterface() && !types.IsDirectIface(n.X.Type()) {
|
||||
k = e.spill(k, n)
|
||||
}
|
||||
e.expr(k.note(n, "interface-converted"), n.X)
|
||||
case ir.OEFACE:
|
||||
n := n.(*ir.BinaryExpr)
|
||||
// Note: n.X is not needed because it can never point to memory that might escape.
|
||||
e.expr(k, n.Y)
|
||||
case ir.OIDATA, ir.OSPTR:
|
||||
n := n.(*ir.UnaryExpr)
|
||||
e.expr(k, n.X)
|
||||
case ir.OSLICE2ARRPTR:
|
||||
// the slice pointer flows directly to the result
|
||||
n := n.(*ir.ConvExpr)
|
||||
e.expr(k, n.X)
|
||||
case ir.ORECV:
|
||||
n := n.(*ir.UnaryExpr)
|
||||
e.discard(n.X)
|
||||
|
||||
case ir.OCALLMETH, ir.OCALLFUNC, ir.OCALLINTER, ir.OINLCALL, ir.OLEN, ir.OCAP, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCOPY, ir.ORECOVER, ir.OUNSAFEADD, ir.OUNSAFESLICE:
|
||||
e.call([]hole{k}, n)
|
||||
|
||||
case ir.ONEW:
|
||||
n := n.(*ir.UnaryExpr)
|
||||
e.spill(k, n)
|
||||
|
||||
case ir.OMAKESLICE:
|
||||
n := n.(*ir.MakeExpr)
|
||||
e.spill(k, n)
|
||||
e.discard(n.Len)
|
||||
e.discard(n.Cap)
|
||||
case ir.OMAKECHAN:
|
||||
n := n.(*ir.MakeExpr)
|
||||
e.discard(n.Len)
|
||||
case ir.OMAKEMAP:
|
||||
n := n.(*ir.MakeExpr)
|
||||
e.spill(k, n)
|
||||
e.discard(n.Len)
|
||||
|
||||
case ir.OMETHVALUE:
|
||||
// Flow the receiver argument to both the closure and
|
||||
// to the receiver parameter.
|
||||
|
||||
n := n.(*ir.SelectorExpr)
|
||||
closureK := e.spill(k, n)
|
||||
|
||||
m := n.Selection
|
||||
|
||||
// We don't know how the method value will be called
|
||||
// later, so conservatively assume the result
|
||||
// parameters all flow to the heap.
|
||||
//
|
||||
// TODO(mdempsky): Change ks into a callback, so that
|
||||
// we don't have to create this slice?
|
||||
var ks []hole
|
||||
for i := m.Type.NumResults(); i > 0; i-- {
|
||||
ks = append(ks, e.heapHole())
|
||||
}
|
||||
name, _ := m.Nname.(*ir.Name)
|
||||
paramK := e.tagHole(ks, name, m.Type.Recv())
|
||||
|
||||
e.expr(e.teeHole(paramK, closureK), n.X)
|
||||
|
||||
case ir.OPTRLIT:
|
||||
n := n.(*ir.AddrExpr)
|
||||
e.expr(e.spill(k, n), n.X)
|
||||
|
||||
case ir.OARRAYLIT:
|
||||
n := n.(*ir.CompLitExpr)
|
||||
for _, elt := range n.List {
|
||||
if elt.Op() == ir.OKEY {
|
||||
elt = elt.(*ir.KeyExpr).Value
|
||||
}
|
||||
e.expr(k.note(n, "array literal element"), elt)
|
||||
}
|
||||
|
||||
case ir.OSLICELIT:
|
||||
n := n.(*ir.CompLitExpr)
|
||||
k = e.spill(k, n)
|
||||
|
||||
for _, elt := range n.List {
|
||||
if elt.Op() == ir.OKEY {
|
||||
elt = elt.(*ir.KeyExpr).Value
|
||||
}
|
||||
e.expr(k.note(n, "slice-literal-element"), elt)
|
||||
}
|
||||
|
||||
case ir.OSTRUCTLIT:
|
||||
n := n.(*ir.CompLitExpr)
|
||||
for _, elt := range n.List {
|
||||
e.expr(k.note(n, "struct literal element"), elt.(*ir.StructKeyExpr).Value)
|
||||
}
|
||||
|
||||
case ir.OMAPLIT:
|
||||
n := n.(*ir.CompLitExpr)
|
||||
e.spill(k, n)
|
||||
|
||||
// Map keys and values are always stored in the heap.
|
||||
for _, elt := range n.List {
|
||||
elt := elt.(*ir.KeyExpr)
|
||||
e.assignHeap(elt.Key, "map literal key", n)
|
||||
e.assignHeap(elt.Value, "map literal value", n)
|
||||
}
|
||||
|
||||
case ir.OCLOSURE:
|
||||
n := n.(*ir.ClosureExpr)
|
||||
k = e.spill(k, n)
|
||||
e.closures = append(e.closures, closure{k, n})
|
||||
|
||||
if fn := n.Func; fn.IsHiddenClosure() {
|
||||
for _, cv := range fn.ClosureVars {
|
||||
if loc := e.oldLoc(cv); !loc.captured {
|
||||
loc.captured = true
|
||||
|
||||
// Ignore reassignments to the variable in straightline code
|
||||
// preceding the first capture by a closure.
|
||||
if loc.loopDepth == e.loopDepth {
|
||||
loc.reassigned = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, n := range fn.Dcl {
|
||||
// Add locations for local variables of the
|
||||
// closure, if needed, in case we're not including
|
||||
// the closure func in the batch for escape
|
||||
// analysis (happens for escape analysis called
|
||||
// from reflectdata.methodWrapper)
|
||||
if n.Op() == ir.ONAME && n.Opt == nil {
|
||||
e.with(fn).newLoc(n, false)
|
||||
}
|
||||
}
|
||||
e.walkFunc(fn)
|
||||
}
|
||||
|
||||
case ir.ORUNES2STR, ir.OBYTES2STR, ir.OSTR2RUNES, ir.OSTR2BYTES, ir.ORUNESTR:
|
||||
n := n.(*ir.ConvExpr)
|
||||
e.spill(k, n)
|
||||
e.discard(n.X)
|
||||
|
||||
case ir.OADDSTR:
|
||||
n := n.(*ir.AddStringExpr)
|
||||
e.spill(k, n)
|
||||
|
||||
// Arguments of OADDSTR never escape;
|
||||
// runtime.concatstrings makes sure of that.
|
||||
e.discards(n.List)
|
||||
|
||||
case ir.ODYNAMICTYPE:
|
||||
// Nothing to do - argument is a *runtime._type (+ maybe a *runtime.itab) pointing to static data section
|
||||
}
|
||||
}
|
||||
|
||||
// unsafeValue evaluates a uintptr-typed arithmetic expression looking
|
||||
// for conversions from an unsafe.Pointer.
|
||||
func (e *escape) unsafeValue(k hole, n ir.Node) {
|
||||
if n.Type().Kind() != types.TUINTPTR {
|
||||
base.Fatalf("unexpected type %v for %v", n.Type(), n)
|
||||
}
|
||||
if k.addrtaken {
|
||||
base.Fatalf("unexpected addrtaken")
|
||||
}
|
||||
|
||||
e.stmts(n.Init())
|
||||
|
||||
switch n.Op() {
|
||||
case ir.OCONV, ir.OCONVNOP:
|
||||
n := n.(*ir.ConvExpr)
|
||||
if n.X.Type().IsUnsafePtr() {
|
||||
e.expr(k, n.X)
|
||||
} else {
|
||||
e.discard(n.X)
|
||||
}
|
||||
case ir.ODOTPTR:
|
||||
n := n.(*ir.SelectorExpr)
|
||||
if ir.IsReflectHeaderDataField(n) {
|
||||
e.expr(k.deref(n, "reflect.Header.Data"), n.X)
|
||||
} else {
|
||||
e.discard(n.X)
|
||||
}
|
||||
case ir.OPLUS, ir.ONEG, ir.OBITNOT:
|
||||
n := n.(*ir.UnaryExpr)
|
||||
e.unsafeValue(k, n.X)
|
||||
case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OAND, ir.OANDNOT:
|
||||
n := n.(*ir.BinaryExpr)
|
||||
e.unsafeValue(k, n.X)
|
||||
e.unsafeValue(k, n.Y)
|
||||
case ir.OLSH, ir.ORSH:
|
||||
n := n.(*ir.BinaryExpr)
|
||||
e.unsafeValue(k, n.X)
|
||||
// RHS need not be uintptr-typed (#32959) and can't meaningfully
|
||||
// flow pointers anyway.
|
||||
e.discard(n.Y)
|
||||
default:
|
||||
e.exprSkipInit(e.discardHole(), n)
|
||||
}
|
||||
}
|
||||
|
||||
// discard evaluates an expression n for side-effects, but discards
|
||||
// its value.
|
||||
func (e *escape) discard(n ir.Node) {
|
||||
e.expr(e.discardHole(), n)
|
||||
}
|
||||
|
||||
func (e *escape) discards(l ir.Nodes) {
|
||||
for _, n := range l {
|
||||
e.discard(n)
|
||||
}
|
||||
}
|
||||
|
||||
// spill allocates a new location associated with expression n, flows
|
||||
// its address to k, and returns a hole that flows values to it. It's
|
||||
// intended for use with most expressions that allocate storage.
|
||||
func (e *escape) spill(k hole, n ir.Node) hole {
|
||||
loc := e.newLoc(n, true)
|
||||
e.flow(k.addr(n, "spill"), loc)
|
||||
return loc.asHole()
|
||||
}
|
||||
@@ -1,324 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package escape
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/base"
|
||||
"cmd/compile/internal/ir"
|
||||
"cmd/compile/internal/logopt"
|
||||
"cmd/compile/internal/types"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Below we implement the methods for walking the AST and recording
|
||||
// data flow edges. Note that because a sub-expression might have
|
||||
// side-effects, it's important to always visit the entire AST.
|
||||
//
|
||||
// For example, write either:
|
||||
//
|
||||
// if x {
|
||||
// e.discard(n.Left)
|
||||
// } else {
|
||||
// e.value(k, n.Left)
|
||||
// }
|
||||
//
|
||||
// or
|
||||
//
|
||||
// if x {
|
||||
// k = e.discardHole()
|
||||
// }
|
||||
// e.value(k, n.Left)
|
||||
//
|
||||
// Do NOT write:
|
||||
//
|
||||
// // BAD: possibly loses side-effects within n.Left
|
||||
// if !x {
|
||||
// e.value(k, n.Left)
|
||||
// }
|
||||
|
||||
// An location represents an abstract location that stores a Go
|
||||
// variable.
|
||||
type location struct {
|
||||
n ir.Node // represented variable or expression, if any
|
||||
curfn *ir.Func // enclosing function
|
||||
edges []edge // incoming edges
|
||||
loopDepth int // loopDepth at declaration
|
||||
|
||||
// resultIndex records the tuple index (starting at 1) for
|
||||
// PPARAMOUT variables within their function's result type.
|
||||
// For non-PPARAMOUT variables it's 0.
|
||||
resultIndex int
|
||||
|
||||
// derefs and walkgen are used during walkOne to track the
|
||||
// minimal dereferences from the walk root.
|
||||
derefs int // >= -1
|
||||
walkgen uint32
|
||||
|
||||
// dst and dstEdgeindex track the next immediate assignment
|
||||
// destination location during walkone, along with the index
|
||||
// of the edge pointing back to this location.
|
||||
dst *location
|
||||
dstEdgeIdx int
|
||||
|
||||
// queued is used by walkAll to track whether this location is
|
||||
// in the walk queue.
|
||||
queued bool
|
||||
|
||||
// escapes reports whether the represented variable's address
|
||||
// escapes; that is, whether the variable must be heap
|
||||
// allocated.
|
||||
escapes bool
|
||||
|
||||
// transient reports whether the represented expression's
|
||||
// address does not outlive the statement; that is, whether
|
||||
// its storage can be immediately reused.
|
||||
transient bool
|
||||
|
||||
// paramEsc records the represented parameter's leak set.
|
||||
paramEsc leaks
|
||||
|
||||
captured bool // has a closure captured this variable?
|
||||
reassigned bool // has this variable been reassigned?
|
||||
addrtaken bool // has this variable's address been taken?
|
||||
}
|
||||
|
||||
// An edge represents an assignment edge between two Go variables.
|
||||
type edge struct {
|
||||
src *location
|
||||
derefs int // >= -1
|
||||
notes *note
|
||||
}
|
||||
|
||||
func (l *location) asHole() hole {
|
||||
return hole{dst: l}
|
||||
}
|
||||
|
||||
// leak records that parameter l leaks to sink.
|
||||
func (l *location) leakTo(sink *location, derefs int) {
|
||||
// If sink is a result parameter that doesn't escape (#44614)
|
||||
// and we can fit return bits into the escape analysis tag,
|
||||
// then record as a result leak.
|
||||
if !sink.escapes && sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn {
|
||||
ri := sink.resultIndex - 1
|
||||
if ri < numEscResults {
|
||||
// Leak to result parameter.
|
||||
l.paramEsc.AddResult(ri, derefs)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise, record as heap leak.
|
||||
l.paramEsc.AddHeap(derefs)
|
||||
}
|
||||
|
||||
func (l *location) isName(c ir.Class) bool {
|
||||
return l.n != nil && l.n.Op() == ir.ONAME && l.n.(*ir.Name).Class == c
|
||||
}
|
||||
|
||||
// A hole represents a context for evaluation of a Go
|
||||
// expression. E.g., when evaluating p in "x = **p", we'd have a hole
|
||||
// with dst==x and derefs==2.
|
||||
type hole struct {
|
||||
dst *location
|
||||
derefs int // >= -1
|
||||
notes *note
|
||||
|
||||
// addrtaken indicates whether this context is taking the address of
|
||||
// the expression, independent of whether the address will actually
|
||||
// be stored into a variable.
|
||||
addrtaken bool
|
||||
}
|
||||
|
||||
type note struct {
|
||||
next *note
|
||||
where ir.Node
|
||||
why string
|
||||
}
|
||||
|
||||
func (k hole) note(where ir.Node, why string) hole {
|
||||
if where == nil || why == "" {
|
||||
base.Fatalf("note: missing where/why")
|
||||
}
|
||||
if base.Flag.LowerM >= 2 || logopt.Enabled() {
|
||||
k.notes = ¬e{
|
||||
next: k.notes,
|
||||
where: where,
|
||||
why: why,
|
||||
}
|
||||
}
|
||||
return k
|
||||
}
|
||||
|
||||
func (k hole) shift(delta int) hole {
|
||||
k.derefs += delta
|
||||
if k.derefs < -1 {
|
||||
base.Fatalf("derefs underflow: %v", k.derefs)
|
||||
}
|
||||
k.addrtaken = delta < 0
|
||||
return k
|
||||
}
|
||||
|
||||
func (k hole) deref(where ir.Node, why string) hole { return k.shift(1).note(where, why) }
|
||||
func (k hole) addr(where ir.Node, why string) hole { return k.shift(-1).note(where, why) }
|
||||
|
||||
func (k hole) dotType(t *types.Type, where ir.Node, why string) hole {
|
||||
if !t.IsInterface() && !types.IsDirectIface(t) {
|
||||
k = k.shift(1)
|
||||
}
|
||||
return k.note(where, why)
|
||||
}
|
||||
|
||||
func (b *batch) flow(k hole, src *location) {
|
||||
if k.addrtaken {
|
||||
src.addrtaken = true
|
||||
}
|
||||
|
||||
dst := k.dst
|
||||
if dst == &b.blankLoc {
|
||||
return
|
||||
}
|
||||
if dst == src && k.derefs >= 0 { // dst = dst, dst = *dst, ...
|
||||
return
|
||||
}
|
||||
if dst.escapes && k.derefs < 0 { // dst = &src
|
||||
if base.Flag.LowerM >= 2 || logopt.Enabled() {
|
||||
pos := base.FmtPos(src.n.Pos())
|
||||
if base.Flag.LowerM >= 2 {
|
||||
fmt.Printf("%s: %v escapes to heap:\n", pos, src.n)
|
||||
}
|
||||
explanation := b.explainFlow(pos, dst, src, k.derefs, k.notes, []*logopt.LoggedOpt{})
|
||||
if logopt.Enabled() {
|
||||
var e_curfn *ir.Func // TODO(mdempsky): Fix.
|
||||
logopt.LogOpt(src.n.Pos(), "escapes", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", src.n), explanation)
|
||||
}
|
||||
|
||||
}
|
||||
src.escapes = true
|
||||
return
|
||||
}
|
||||
|
||||
// TODO(mdempsky): Deduplicate edges?
|
||||
dst.edges = append(dst.edges, edge{src: src, derefs: k.derefs, notes: k.notes})
|
||||
}
|
||||
|
||||
func (b *batch) heapHole() hole { return b.heapLoc.asHole() }
|
||||
func (b *batch) discardHole() hole { return b.blankLoc.asHole() }
|
||||
|
||||
func (b *batch) oldLoc(n *ir.Name) *location {
|
||||
if n.Canonical().Opt == nil {
|
||||
base.Fatalf("%v has no location", n)
|
||||
}
|
||||
return n.Canonical().Opt.(*location)
|
||||
}
|
||||
|
||||
func (e *escape) newLoc(n ir.Node, transient bool) *location {
|
||||
if e.curfn == nil {
|
||||
base.Fatalf("e.curfn isn't set")
|
||||
}
|
||||
if n != nil && n.Type() != nil && n.Type().NotInHeap() {
|
||||
base.ErrorfAt(n.Pos(), "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type())
|
||||
}
|
||||
|
||||
if n != nil && n.Op() == ir.ONAME {
|
||||
if canon := n.(*ir.Name).Canonical(); n != canon {
|
||||
base.Fatalf("newLoc on non-canonical %v (canonical is %v)", n, canon)
|
||||
}
|
||||
}
|
||||
loc := &location{
|
||||
n: n,
|
||||
curfn: e.curfn,
|
||||
loopDepth: e.loopDepth,
|
||||
transient: transient,
|
||||
}
|
||||
e.allLocs = append(e.allLocs, loc)
|
||||
if n != nil {
|
||||
if n.Op() == ir.ONAME {
|
||||
n := n.(*ir.Name)
|
||||
if n.Class == ir.PPARAM && n.Curfn == nil {
|
||||
// ok; hidden parameter
|
||||
} else if n.Curfn != e.curfn {
|
||||
base.Fatalf("curfn mismatch: %v != %v for %v", n.Curfn, e.curfn, n)
|
||||
}
|
||||
|
||||
if n.Opt != nil {
|
||||
base.Fatalf("%v already has a location", n)
|
||||
}
|
||||
n.Opt = loc
|
||||
}
|
||||
}
|
||||
return loc
|
||||
}
|
||||
|
||||
// teeHole returns a new hole that flows into each hole of ks,
|
||||
// similar to the Unix tee(1) command.
|
||||
func (e *escape) teeHole(ks ...hole) hole {
|
||||
if len(ks) == 0 {
|
||||
return e.discardHole()
|
||||
}
|
||||
if len(ks) == 1 {
|
||||
return ks[0]
|
||||
}
|
||||
// TODO(mdempsky): Optimize if there's only one non-discard hole?
|
||||
|
||||
// Given holes "l1 = _", "l2 = **_", "l3 = *_", ..., create a
|
||||
// new temporary location ltmp, wire it into place, and return
|
||||
// a hole for "ltmp = _".
|
||||
loc := e.newLoc(nil, true)
|
||||
for _, k := range ks {
|
||||
// N.B., "p = &q" and "p = &tmp; tmp = q" are not
|
||||
// semantically equivalent. To combine holes like "l1
|
||||
// = _" and "l2 = &_", we'd need to wire them as "l1 =
|
||||
// *ltmp" and "l2 = ltmp" and return "ltmp = &_"
|
||||
// instead.
|
||||
if k.derefs < 0 {
|
||||
base.Fatalf("teeHole: negative derefs")
|
||||
}
|
||||
|
||||
e.flow(k, loc)
|
||||
}
|
||||
return loc.asHole()
|
||||
}
|
||||
|
||||
// later returns a new hole that flows into k, but some time later.
|
||||
// Its main effect is to prevent immediate reuse of temporary
|
||||
// variables introduced during Order.
|
||||
func (e *escape) later(k hole) hole {
|
||||
loc := e.newLoc(nil, false)
|
||||
e.flow(k, loc)
|
||||
return loc.asHole()
|
||||
}
|
||||
|
||||
// Fmt is called from node printing to print information about escape analysis results.
|
||||
func Fmt(n ir.Node) string {
|
||||
text := ""
|
||||
switch n.Esc() {
|
||||
case ir.EscUnknown:
|
||||
break
|
||||
|
||||
case ir.EscHeap:
|
||||
text = "esc(h)"
|
||||
|
||||
case ir.EscNone:
|
||||
text = "esc(no)"
|
||||
|
||||
case ir.EscNever:
|
||||
text = "esc(N)"
|
||||
|
||||
default:
|
||||
text = fmt.Sprintf("esc(%d)", n.Esc())
|
||||
}
|
||||
|
||||
if n.Op() == ir.ONAME {
|
||||
n := n.(*ir.Name)
|
||||
if loc, ok := n.Opt.(*location); ok && loc.loopDepth != 0 {
|
||||
if text != "" {
|
||||
text += " "
|
||||
}
|
||||
text += fmt.Sprintf("ld(%d)", loc.loopDepth)
|
||||
}
|
||||
}
|
||||
|
||||
return text
|
||||
}
|
||||
@@ -1,106 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package escape
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/base"
|
||||
"math"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const numEscResults = 7
|
||||
|
||||
// An leaks represents a set of assignment flows from a parameter
|
||||
// to the heap or to any of its function's (first numEscResults)
|
||||
// result parameters.
|
||||
type leaks [1 + numEscResults]uint8
|
||||
|
||||
// Empty reports whether l is an empty set (i.e., no assignment flows).
|
||||
func (l leaks) Empty() bool { return l == leaks{} }
|
||||
|
||||
// Heap returns the minimum deref count of any assignment flow from l
|
||||
// to the heap. If no such flows exist, Heap returns -1.
|
||||
func (l leaks) Heap() int { return l.get(0) }
|
||||
|
||||
// Result returns the minimum deref count of any assignment flow from
|
||||
// l to its function's i'th result parameter. If no such flows exist,
|
||||
// Result returns -1.
|
||||
func (l leaks) Result(i int) int { return l.get(1 + i) }
|
||||
|
||||
// AddHeap adds an assignment flow from l to the heap.
|
||||
func (l *leaks) AddHeap(derefs int) { l.add(0, derefs) }
|
||||
|
||||
// AddResult adds an assignment flow from l to its function's i'th
|
||||
// result parameter.
|
||||
func (l *leaks) AddResult(i, derefs int) { l.add(1+i, derefs) }
|
||||
|
||||
func (l *leaks) setResult(i, derefs int) { l.set(1+i, derefs) }
|
||||
|
||||
func (l leaks) get(i int) int { return int(l[i]) - 1 }
|
||||
|
||||
func (l *leaks) add(i, derefs int) {
|
||||
if old := l.get(i); old < 0 || derefs < old {
|
||||
l.set(i, derefs)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *leaks) set(i, derefs int) {
|
||||
v := derefs + 1
|
||||
if v < 0 {
|
||||
base.Fatalf("invalid derefs count: %v", derefs)
|
||||
}
|
||||
if v > math.MaxUint8 {
|
||||
v = math.MaxUint8
|
||||
}
|
||||
|
||||
l[i] = uint8(v)
|
||||
}
|
||||
|
||||
// Optimize removes result flow paths that are equal in length or
|
||||
// longer than the shortest heap flow path.
|
||||
func (l *leaks) Optimize() {
|
||||
// If we have a path to the heap, then there's no use in
|
||||
// keeping equal or longer paths elsewhere.
|
||||
if x := l.Heap(); x >= 0 {
|
||||
for i := 0; i < numEscResults; i++ {
|
||||
if l.Result(i) >= x {
|
||||
l.setResult(i, -1)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var leakTagCache = map[leaks]string{}
|
||||
|
||||
// Encode converts l into a binary string for export data.
|
||||
func (l leaks) Encode() string {
|
||||
if l.Heap() == 0 {
|
||||
// Space optimization: empty string encodes more
|
||||
// efficiently in export data.
|
||||
return ""
|
||||
}
|
||||
if s, ok := leakTagCache[l]; ok {
|
||||
return s
|
||||
}
|
||||
|
||||
n := len(l)
|
||||
for n > 0 && l[n-1] == 0 {
|
||||
n--
|
||||
}
|
||||
s := "esc:" + string(l[:n])
|
||||
leakTagCache[l] = s
|
||||
return s
|
||||
}
|
||||
|
||||
// parseLeaks parses a binary string representing a leaks
|
||||
func parseLeaks(s string) leaks {
|
||||
var l leaks
|
||||
if !strings.HasPrefix(s, "esc:") {
|
||||
l.AddHeap(0)
|
||||
return l
|
||||
}
|
||||
copy(l[:], s[4:])
|
||||
return l
|
||||
}
|
||||
@@ -1,289 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package escape
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/base"
|
||||
"cmd/compile/internal/ir"
|
||||
"cmd/compile/internal/logopt"
|
||||
"cmd/internal/src"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// walkAll computes the minimal dereferences between all pairs of
|
||||
// locations.
|
||||
func (b *batch) walkAll() {
|
||||
// We use a work queue to keep track of locations that we need
|
||||
// to visit, and repeatedly walk until we reach a fixed point.
|
||||
//
|
||||
// We walk once from each location (including the heap), and
|
||||
// then re-enqueue each location on its transition from
|
||||
// transient->!transient and !escapes->escapes, which can each
|
||||
// happen at most once. So we take Θ(len(e.allLocs)) walks.
|
||||
|
||||
// LIFO queue, has enough room for e.allLocs and e.heapLoc.
|
||||
todo := make([]*location, 0, len(b.allLocs)+1)
|
||||
enqueue := func(loc *location) {
|
||||
if !loc.queued {
|
||||
todo = append(todo, loc)
|
||||
loc.queued = true
|
||||
}
|
||||
}
|
||||
|
||||
for _, loc := range b.allLocs {
|
||||
enqueue(loc)
|
||||
}
|
||||
enqueue(&b.heapLoc)
|
||||
|
||||
var walkgen uint32
|
||||
for len(todo) > 0 {
|
||||
root := todo[len(todo)-1]
|
||||
todo = todo[:len(todo)-1]
|
||||
root.queued = false
|
||||
|
||||
walkgen++
|
||||
b.walkOne(root, walkgen, enqueue)
|
||||
}
|
||||
}
|
||||
|
||||
// walkOne computes the minimal number of dereferences from root to
|
||||
// all other locations.
|
||||
func (b *batch) walkOne(root *location, walkgen uint32, enqueue func(*location)) {
|
||||
// The data flow graph has negative edges (from addressing
|
||||
// operations), so we use the Bellman-Ford algorithm. However,
|
||||
// we don't have to worry about infinite negative cycles since
|
||||
// we bound intermediate dereference counts to 0.
|
||||
|
||||
root.walkgen = walkgen
|
||||
root.derefs = 0
|
||||
root.dst = nil
|
||||
|
||||
todo := []*location{root} // LIFO queue
|
||||
for len(todo) > 0 {
|
||||
l := todo[len(todo)-1]
|
||||
todo = todo[:len(todo)-1]
|
||||
|
||||
derefs := l.derefs
|
||||
|
||||
// If l.derefs < 0, then l's address flows to root.
|
||||
addressOf := derefs < 0
|
||||
if addressOf {
|
||||
// For a flow path like "root = &l; l = x",
|
||||
// l's address flows to root, but x's does
|
||||
// not. We recognize this by lower bounding
|
||||
// derefs at 0.
|
||||
derefs = 0
|
||||
|
||||
// If l's address flows to a non-transient
|
||||
// location, then l can't be transiently
|
||||
// allocated.
|
||||
if !root.transient && l.transient {
|
||||
l.transient = false
|
||||
enqueue(l)
|
||||
}
|
||||
}
|
||||
|
||||
if b.outlives(root, l) {
|
||||
// l's value flows to root. If l is a function
|
||||
// parameter and root is the heap or a
|
||||
// corresponding result parameter, then record
|
||||
// that value flow for tagging the function
|
||||
// later.
|
||||
if l.isName(ir.PPARAM) {
|
||||
if (logopt.Enabled() || base.Flag.LowerM >= 2) && !l.escapes {
|
||||
if base.Flag.LowerM >= 2 {
|
||||
fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos()), l.n, b.explainLoc(root), derefs)
|
||||
}
|
||||
explanation := b.explainPath(root, l)
|
||||
if logopt.Enabled() {
|
||||
var e_curfn *ir.Func // TODO(mdempsky): Fix.
|
||||
logopt.LogOpt(l.n.Pos(), "leak", "escape", ir.FuncName(e_curfn),
|
||||
fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, b.explainLoc(root), derefs), explanation)
|
||||
}
|
||||
}
|
||||
l.leakTo(root, derefs)
|
||||
}
|
||||
|
||||
// If l's address flows somewhere that
|
||||
// outlives it, then l needs to be heap
|
||||
// allocated.
|
||||
if addressOf && !l.escapes {
|
||||
if logopt.Enabled() || base.Flag.LowerM >= 2 {
|
||||
if base.Flag.LowerM >= 2 {
|
||||
fmt.Printf("%s: %v escapes to heap:\n", base.FmtPos(l.n.Pos()), l.n)
|
||||
}
|
||||
explanation := b.explainPath(root, l)
|
||||
if logopt.Enabled() {
|
||||
var e_curfn *ir.Func // TODO(mdempsky): Fix.
|
||||
logopt.LogOpt(l.n.Pos(), "escape", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", l.n), explanation)
|
||||
}
|
||||
}
|
||||
l.escapes = true
|
||||
enqueue(l)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
for i, edge := range l.edges {
|
||||
if edge.src.escapes {
|
||||
continue
|
||||
}
|
||||
d := derefs + edge.derefs
|
||||
if edge.src.walkgen != walkgen || edge.src.derefs > d {
|
||||
edge.src.walkgen = walkgen
|
||||
edge.src.derefs = d
|
||||
edge.src.dst = l
|
||||
edge.src.dstEdgeIdx = i
|
||||
todo = append(todo, edge.src)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// explainPath prints an explanation of how src flows to the walk root.
|
||||
func (b *batch) explainPath(root, src *location) []*logopt.LoggedOpt {
|
||||
visited := make(map[*location]bool)
|
||||
pos := base.FmtPos(src.n.Pos())
|
||||
var explanation []*logopt.LoggedOpt
|
||||
for {
|
||||
// Prevent infinite loop.
|
||||
if visited[src] {
|
||||
if base.Flag.LowerM >= 2 {
|
||||
fmt.Printf("%s: warning: truncated explanation due to assignment cycle; see golang.org/issue/35518\n", pos)
|
||||
}
|
||||
break
|
||||
}
|
||||
visited[src] = true
|
||||
dst := src.dst
|
||||
edge := &dst.edges[src.dstEdgeIdx]
|
||||
if edge.src != src {
|
||||
base.Fatalf("path inconsistency: %v != %v", edge.src, src)
|
||||
}
|
||||
|
||||
explanation = b.explainFlow(pos, dst, src, edge.derefs, edge.notes, explanation)
|
||||
|
||||
if dst == root {
|
||||
break
|
||||
}
|
||||
src = dst
|
||||
}
|
||||
|
||||
return explanation
|
||||
}
|
||||
|
||||
func (b *batch) explainFlow(pos string, dst, srcloc *location, derefs int, notes *note, explanation []*logopt.LoggedOpt) []*logopt.LoggedOpt {
|
||||
ops := "&"
|
||||
if derefs >= 0 {
|
||||
ops = strings.Repeat("*", derefs)
|
||||
}
|
||||
print := base.Flag.LowerM >= 2
|
||||
|
||||
flow := fmt.Sprintf(" flow: %s = %s%v:", b.explainLoc(dst), ops, b.explainLoc(srcloc))
|
||||
if print {
|
||||
fmt.Printf("%s:%s\n", pos, flow)
|
||||
}
|
||||
if logopt.Enabled() {
|
||||
var epos src.XPos
|
||||
if notes != nil {
|
||||
epos = notes.where.Pos()
|
||||
} else if srcloc != nil && srcloc.n != nil {
|
||||
epos = srcloc.n.Pos()
|
||||
}
|
||||
var e_curfn *ir.Func // TODO(mdempsky): Fix.
|
||||
explanation = append(explanation, logopt.NewLoggedOpt(epos, "escflow", "escape", ir.FuncName(e_curfn), flow))
|
||||
}
|
||||
|
||||
for note := notes; note != nil; note = note.next {
|
||||
if print {
|
||||
fmt.Printf("%s: from %v (%v) at %s\n", pos, note.where, note.why, base.FmtPos(note.where.Pos()))
|
||||
}
|
||||
if logopt.Enabled() {
|
||||
var e_curfn *ir.Func // TODO(mdempsky): Fix.
|
||||
explanation = append(explanation, logopt.NewLoggedOpt(note.where.Pos(), "escflow", "escape", ir.FuncName(e_curfn),
|
||||
fmt.Sprintf(" from %v (%v)", note.where, note.why)))
|
||||
}
|
||||
}
|
||||
return explanation
|
||||
}
|
||||
|
||||
func (b *batch) explainLoc(l *location) string {
|
||||
if l == &b.heapLoc {
|
||||
return "{heap}"
|
||||
}
|
||||
if l.n == nil {
|
||||
// TODO(mdempsky): Omit entirely.
|
||||
return "{temp}"
|
||||
}
|
||||
if l.n.Op() == ir.ONAME {
|
||||
return fmt.Sprintf("%v", l.n)
|
||||
}
|
||||
return fmt.Sprintf("{storage for %v}", l.n)
|
||||
}
|
||||
|
||||
// outlives reports whether values stored in l may survive beyond
|
||||
// other's lifetime if stack allocated.
|
||||
func (b *batch) outlives(l, other *location) bool {
|
||||
// The heap outlives everything.
|
||||
if l.escapes {
|
||||
return true
|
||||
}
|
||||
|
||||
// We don't know what callers do with returned values, so
|
||||
// pessimistically we need to assume they flow to the heap and
|
||||
// outlive everything too.
|
||||
if l.isName(ir.PPARAMOUT) {
|
||||
// Exception: Directly called closures can return
|
||||
// locations allocated outside of them without forcing
|
||||
// them to the heap. For example:
|
||||
//
|
||||
// var u int // okay to stack allocate
|
||||
// *(func() *int { return &u }()) = 42
|
||||
if containsClosure(other.curfn, l.curfn) && l.curfn.ClosureCalled() {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// If l and other are within the same function, then l
|
||||
// outlives other if it was declared outside other's loop
|
||||
// scope. For example:
|
||||
//
|
||||
// var l *int
|
||||
// for {
|
||||
// l = new(int)
|
||||
// }
|
||||
if l.curfn == other.curfn && l.loopDepth < other.loopDepth {
|
||||
return true
|
||||
}
|
||||
|
||||
// If other is declared within a child closure of where l is
|
||||
// declared, then l outlives it. For example:
|
||||
//
|
||||
// var l *int
|
||||
// func() {
|
||||
// l = new(int)
|
||||
// }
|
||||
if containsClosure(l.curfn, other.curfn) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// containsClosure reports whether c is a closure contained within f.
|
||||
func containsClosure(f, c *ir.Func) bool {
|
||||
// Common case.
|
||||
if f == c {
|
||||
return false
|
||||
}
|
||||
|
||||
// Closures within function Foo are named like "Foo.funcN..."
|
||||
// TODO(mdempsky): Better way to recognize this.
|
||||
fn := f.Sym().Name
|
||||
cn := c.Sym().Name
|
||||
return len(cn) > len(fn) && cn[:len(fn)] == fn && cn[len(fn)] == '.'
|
||||
}
|
||||
@@ -1,208 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package escape
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/base"
|
||||
"cmd/compile/internal/ir"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// stmt evaluates a single Go statement.
|
||||
func (e *escape) stmt(n ir.Node) {
|
||||
if n == nil {
|
||||
return
|
||||
}
|
||||
|
||||
lno := ir.SetPos(n)
|
||||
defer func() {
|
||||
base.Pos = lno
|
||||
}()
|
||||
|
||||
if base.Flag.LowerM > 2 {
|
||||
fmt.Printf("%v:[%d] %v stmt: %v\n", base.FmtPos(base.Pos), e.loopDepth, e.curfn, n)
|
||||
}
|
||||
|
||||
e.stmts(n.Init())
|
||||
|
||||
switch n.Op() {
|
||||
default:
|
||||
base.Fatalf("unexpected stmt: %v", n)
|
||||
|
||||
case ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL, ir.OINLMARK:
|
||||
// nop
|
||||
|
||||
case ir.OBREAK, ir.OCONTINUE, ir.OGOTO:
|
||||
// TODO(mdempsky): Handle dead code?
|
||||
|
||||
case ir.OBLOCK:
|
||||
n := n.(*ir.BlockStmt)
|
||||
e.stmts(n.List)
|
||||
|
||||
case ir.ODCL:
|
||||
// Record loop depth at declaration.
|
||||
n := n.(*ir.Decl)
|
||||
if !ir.IsBlank(n.X) {
|
||||
e.dcl(n.X)
|
||||
}
|
||||
|
||||
case ir.OLABEL:
|
||||
n := n.(*ir.LabelStmt)
|
||||
switch e.labels[n.Label] {
|
||||
case nonlooping:
|
||||
if base.Flag.LowerM > 2 {
|
||||
fmt.Printf("%v:%v non-looping label\n", base.FmtPos(base.Pos), n)
|
||||
}
|
||||
case looping:
|
||||
if base.Flag.LowerM > 2 {
|
||||
fmt.Printf("%v: %v looping label\n", base.FmtPos(base.Pos), n)
|
||||
}
|
||||
e.loopDepth++
|
||||
default:
|
||||
base.Fatalf("label missing tag")
|
||||
}
|
||||
delete(e.labels, n.Label)
|
||||
|
||||
case ir.OIF:
|
||||
n := n.(*ir.IfStmt)
|
||||
e.discard(n.Cond)
|
||||
e.block(n.Body)
|
||||
e.block(n.Else)
|
||||
|
||||
case ir.OFOR, ir.OFORUNTIL:
|
||||
n := n.(*ir.ForStmt)
|
||||
e.loopDepth++
|
||||
e.discard(n.Cond)
|
||||
e.stmt(n.Post)
|
||||
e.block(n.Body)
|
||||
e.loopDepth--
|
||||
|
||||
case ir.ORANGE:
|
||||
// for Key, Value = range X { Body }
|
||||
n := n.(*ir.RangeStmt)
|
||||
|
||||
// X is evaluated outside the loop.
|
||||
tmp := e.newLoc(nil, false)
|
||||
e.expr(tmp.asHole(), n.X)
|
||||
|
||||
e.loopDepth++
|
||||
ks := e.addrs([]ir.Node{n.Key, n.Value})
|
||||
if n.X.Type().IsArray() {
|
||||
e.flow(ks[1].note(n, "range"), tmp)
|
||||
} else {
|
||||
e.flow(ks[1].deref(n, "range-deref"), tmp)
|
||||
}
|
||||
e.reassigned(ks, n)
|
||||
|
||||
e.block(n.Body)
|
||||
e.loopDepth--
|
||||
|
||||
case ir.OSWITCH:
|
||||
n := n.(*ir.SwitchStmt)
|
||||
|
||||
if guard, ok := n.Tag.(*ir.TypeSwitchGuard); ok {
|
||||
var ks []hole
|
||||
if guard.Tag != nil {
|
||||
for _, cas := range n.Cases {
|
||||
cv := cas.Var
|
||||
k := e.dcl(cv) // type switch variables have no ODCL.
|
||||
if cv.Type().HasPointers() {
|
||||
ks = append(ks, k.dotType(cv.Type(), cas, "switch case"))
|
||||
}
|
||||
}
|
||||
}
|
||||
e.expr(e.teeHole(ks...), n.Tag.(*ir.TypeSwitchGuard).X)
|
||||
} else {
|
||||
e.discard(n.Tag)
|
||||
}
|
||||
|
||||
for _, cas := range n.Cases {
|
||||
e.discards(cas.List)
|
||||
e.block(cas.Body)
|
||||
}
|
||||
|
||||
case ir.OSELECT:
|
||||
n := n.(*ir.SelectStmt)
|
||||
for _, cas := range n.Cases {
|
||||
e.stmt(cas.Comm)
|
||||
e.block(cas.Body)
|
||||
}
|
||||
case ir.ORECV:
|
||||
// TODO(mdempsky): Consider e.discard(n.Left).
|
||||
n := n.(*ir.UnaryExpr)
|
||||
e.exprSkipInit(e.discardHole(), n) // already visited n.Ninit
|
||||
case ir.OSEND:
|
||||
n := n.(*ir.SendStmt)
|
||||
e.discard(n.Chan)
|
||||
e.assignHeap(n.Value, "send", n)
|
||||
|
||||
case ir.OAS:
|
||||
n := n.(*ir.AssignStmt)
|
||||
e.assignList([]ir.Node{n.X}, []ir.Node{n.Y}, "assign", n)
|
||||
case ir.OASOP:
|
||||
n := n.(*ir.AssignOpStmt)
|
||||
// TODO(mdempsky): Worry about OLSH/ORSH?
|
||||
e.assignList([]ir.Node{n.X}, []ir.Node{n.Y}, "assign", n)
|
||||
case ir.OAS2:
|
||||
n := n.(*ir.AssignListStmt)
|
||||
e.assignList(n.Lhs, n.Rhs, "assign-pair", n)
|
||||
|
||||
case ir.OAS2DOTTYPE: // v, ok = x.(type)
|
||||
n := n.(*ir.AssignListStmt)
|
||||
e.assignList(n.Lhs, n.Rhs, "assign-pair-dot-type", n)
|
||||
case ir.OAS2MAPR: // v, ok = m[k]
|
||||
n := n.(*ir.AssignListStmt)
|
||||
e.assignList(n.Lhs, n.Rhs, "assign-pair-mapr", n)
|
||||
case ir.OAS2RECV, ir.OSELRECV2: // v, ok = <-ch
|
||||
n := n.(*ir.AssignListStmt)
|
||||
e.assignList(n.Lhs, n.Rhs, "assign-pair-receive", n)
|
||||
|
||||
case ir.OAS2FUNC:
|
||||
n := n.(*ir.AssignListStmt)
|
||||
e.stmts(n.Rhs[0].Init())
|
||||
ks := e.addrs(n.Lhs)
|
||||
e.call(ks, n.Rhs[0])
|
||||
e.reassigned(ks, n)
|
||||
case ir.ORETURN:
|
||||
n := n.(*ir.ReturnStmt)
|
||||
results := e.curfn.Type().Results().FieldSlice()
|
||||
dsts := make([]ir.Node, len(results))
|
||||
for i, res := range results {
|
||||
dsts[i] = res.Nname.(*ir.Name)
|
||||
}
|
||||
e.assignList(dsts, n.Results, "return", n)
|
||||
case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OINLCALL, ir.OCLOSE, ir.OCOPY, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
|
||||
e.call(nil, n)
|
||||
case ir.OGO, ir.ODEFER:
|
||||
n := n.(*ir.GoDeferStmt)
|
||||
e.goDeferStmt(n)
|
||||
|
||||
case ir.OTAILCALL:
|
||||
n := n.(*ir.TailCallStmt)
|
||||
e.call(nil, n.Call)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *escape) stmts(l ir.Nodes) {
|
||||
for _, n := range l {
|
||||
e.stmt(n)
|
||||
}
|
||||
}
|
||||
|
||||
// block is like stmts, but preserves loopDepth.
|
||||
func (e *escape) block(l ir.Nodes) {
|
||||
old := e.loopDepth
|
||||
e.stmts(l)
|
||||
e.loopDepth = old
|
||||
}
|
||||
|
||||
func (e *escape) dcl(n *ir.Name) hole {
|
||||
if n.Curfn != e.curfn || n.IsClosureVar() {
|
||||
base.Fatalf("bad declaration of %v", n)
|
||||
}
|
||||
loc := e.oldLoc(n)
|
||||
loc.loopDepth = e.loopDepth
|
||||
return loc.asHole()
|
||||
}
|
||||
@@ -1,215 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package escape
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/ir"
|
||||
"cmd/compile/internal/typecheck"
|
||||
)
|
||||
|
||||
func isSliceSelfAssign(dst, src ir.Node) bool {
|
||||
// Detect the following special case.
|
||||
//
|
||||
// func (b *Buffer) Foo() {
|
||||
// n, m := ...
|
||||
// b.buf = b.buf[n:m]
|
||||
// }
|
||||
//
|
||||
// This assignment is a no-op for escape analysis,
|
||||
// it does not store any new pointers into b that were not already there.
|
||||
// However, without this special case b will escape, because we assign to OIND/ODOTPTR.
|
||||
// Here we assume that the statement will not contain calls,
|
||||
// that is, that order will move any calls to init.
|
||||
// Otherwise base ONAME value could change between the moments
|
||||
// when we evaluate it for dst and for src.
|
||||
|
||||
// dst is ONAME dereference.
|
||||
var dstX ir.Node
|
||||
switch dst.Op() {
|
||||
default:
|
||||
return false
|
||||
case ir.ODEREF:
|
||||
dst := dst.(*ir.StarExpr)
|
||||
dstX = dst.X
|
||||
case ir.ODOTPTR:
|
||||
dst := dst.(*ir.SelectorExpr)
|
||||
dstX = dst.X
|
||||
}
|
||||
if dstX.Op() != ir.ONAME {
|
||||
return false
|
||||
}
|
||||
// src is a slice operation.
|
||||
switch src.Op() {
|
||||
case ir.OSLICE, ir.OSLICE3, ir.OSLICESTR:
|
||||
// OK.
|
||||
case ir.OSLICEARR, ir.OSLICE3ARR:
|
||||
// Since arrays are embedded into containing object,
|
||||
// slice of non-pointer array will introduce a new pointer into b that was not already there
|
||||
// (pointer to b itself). After such assignment, if b contents escape,
|
||||
// b escapes as well. If we ignore such OSLICEARR, we will conclude
|
||||
// that b does not escape when b contents do.
|
||||
//
|
||||
// Pointer to an array is OK since it's not stored inside b directly.
|
||||
// For slicing an array (not pointer to array), there is an implicit OADDR.
|
||||
// We check that to determine non-pointer array slicing.
|
||||
src := src.(*ir.SliceExpr)
|
||||
if src.X.Op() == ir.OADDR {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
// slice is applied to ONAME dereference.
|
||||
var baseX ir.Node
|
||||
switch base := src.(*ir.SliceExpr).X; base.Op() {
|
||||
default:
|
||||
return false
|
||||
case ir.ODEREF:
|
||||
base := base.(*ir.StarExpr)
|
||||
baseX = base.X
|
||||
case ir.ODOTPTR:
|
||||
base := base.(*ir.SelectorExpr)
|
||||
baseX = base.X
|
||||
}
|
||||
if baseX.Op() != ir.ONAME {
|
||||
return false
|
||||
}
|
||||
// dst and src reference the same base ONAME.
|
||||
return dstX.(*ir.Name) == baseX.(*ir.Name)
|
||||
}
|
||||
|
||||
// isSelfAssign reports whether assignment from src to dst can
|
||||
// be ignored by the escape analysis as it's effectively a self-assignment.
|
||||
func isSelfAssign(dst, src ir.Node) bool {
|
||||
if isSliceSelfAssign(dst, src) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Detect trivial assignments that assign back to the same object.
|
||||
//
|
||||
// It covers these cases:
|
||||
// val.x = val.y
|
||||
// val.x[i] = val.y[j]
|
||||
// val.x1.x2 = val.x1.y2
|
||||
// ... etc
|
||||
//
|
||||
// These assignments do not change assigned object lifetime.
|
||||
|
||||
if dst == nil || src == nil || dst.Op() != src.Op() {
|
||||
return false
|
||||
}
|
||||
|
||||
// The expression prefix must be both "safe" and identical.
|
||||
switch dst.Op() {
|
||||
case ir.ODOT, ir.ODOTPTR:
|
||||
// Safe trailing accessors that are permitted to differ.
|
||||
dst := dst.(*ir.SelectorExpr)
|
||||
src := src.(*ir.SelectorExpr)
|
||||
return ir.SameSafeExpr(dst.X, src.X)
|
||||
case ir.OINDEX:
|
||||
dst := dst.(*ir.IndexExpr)
|
||||
src := src.(*ir.IndexExpr)
|
||||
if mayAffectMemory(dst.Index) || mayAffectMemory(src.Index) {
|
||||
return false
|
||||
}
|
||||
return ir.SameSafeExpr(dst.X, src.X)
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// mayAffectMemory reports whether evaluation of n may affect the program's
|
||||
// memory state. If the expression can't affect memory state, then it can be
|
||||
// safely ignored by the escape analysis.
|
||||
func mayAffectMemory(n ir.Node) bool {
|
||||
// We may want to use a list of "memory safe" ops instead of generally
|
||||
// "side-effect free", which would include all calls and other ops that can
|
||||
// allocate or change global state. For now, it's safer to start with the latter.
|
||||
//
|
||||
// We're ignoring things like division by zero, index out of range,
|
||||
// and nil pointer dereference here.
|
||||
|
||||
// TODO(rsc): It seems like it should be possible to replace this with
|
||||
// an ir.Any looking for any op that's not the ones in the case statement.
|
||||
// But that produces changes in the compiled output detected by buildall.
|
||||
switch n.Op() {
|
||||
case ir.ONAME, ir.OLITERAL, ir.ONIL:
|
||||
return false
|
||||
|
||||
case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD:
|
||||
n := n.(*ir.BinaryExpr)
|
||||
return mayAffectMemory(n.X) || mayAffectMemory(n.Y)
|
||||
|
||||
case ir.OINDEX:
|
||||
n := n.(*ir.IndexExpr)
|
||||
return mayAffectMemory(n.X) || mayAffectMemory(n.Index)
|
||||
|
||||
case ir.OCONVNOP, ir.OCONV:
|
||||
n := n.(*ir.ConvExpr)
|
||||
return mayAffectMemory(n.X)
|
||||
|
||||
case ir.OLEN, ir.OCAP, ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
|
||||
n := n.(*ir.UnaryExpr)
|
||||
return mayAffectMemory(n.X)
|
||||
|
||||
case ir.ODOT, ir.ODOTPTR:
|
||||
n := n.(*ir.SelectorExpr)
|
||||
return mayAffectMemory(n.X)
|
||||
|
||||
case ir.ODEREF:
|
||||
n := n.(*ir.StarExpr)
|
||||
return mayAffectMemory(n.X)
|
||||
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// HeapAllocReason returns the reason the given Node must be heap
|
||||
// allocated, or the empty string if it doesn't.
|
||||
func HeapAllocReason(n ir.Node) string {
|
||||
if n == nil || n.Type() == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Parameters are always passed via the stack.
|
||||
if n.Op() == ir.ONAME {
|
||||
n := n.(*ir.Name)
|
||||
if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT {
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
if n.Type().Size() > ir.MaxStackVarSize {
|
||||
return "too large for stack"
|
||||
}
|
||||
|
||||
if (n.Op() == ir.ONEW || n.Op() == ir.OPTRLIT) && n.Type().Elem().Size() > ir.MaxImplicitStackVarSize {
|
||||
return "too large for stack"
|
||||
}
|
||||
|
||||
if n.Op() == ir.OCLOSURE && typecheck.ClosureType(n.(*ir.ClosureExpr)).Size() > ir.MaxImplicitStackVarSize {
|
||||
return "too large for stack"
|
||||
}
|
||||
if n.Op() == ir.OMETHVALUE && typecheck.MethodValueType(n.(*ir.SelectorExpr)).Size() > ir.MaxImplicitStackVarSize {
|
||||
return "too large for stack"
|
||||
}
|
||||
|
||||
if n.Op() == ir.OMAKESLICE {
|
||||
n := n.(*ir.MakeExpr)
|
||||
r := n.Cap
|
||||
if r == nil {
|
||||
r = n.Len
|
||||
}
|
||||
if !ir.IsSmallIntConst(r) {
|
||||
return "non-constant size"
|
||||
}
|
||||
if t := n.Type(); t.Elem().Size() != 0 && ir.Int64Val(r) > ir.MaxImplicitStackVarSize/t.Elem().Size() {
|
||||
return "too large for stack"
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
@@ -5,16 +5,46 @@
|
||||
package gc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/constant"
|
||||
|
||||
"cmd/compile/internal/base"
|
||||
"cmd/compile/internal/inline"
|
||||
"cmd/compile/internal/ir"
|
||||
"cmd/compile/internal/typecheck"
|
||||
"cmd/compile/internal/types"
|
||||
"cmd/internal/bio"
|
||||
"fmt"
|
||||
"go/constant"
|
||||
)
|
||||
|
||||
func exportf(bout *bio.Writer, format string, args ...interface{}) {
|
||||
fmt.Fprintf(bout, format, args...)
|
||||
if base.Debug.Export != 0 {
|
||||
fmt.Printf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func dumpexport(bout *bio.Writer) {
|
||||
p := &exporter{marked: make(map[*types.Type]bool)}
|
||||
for _, n := range typecheck.Target.Exports {
|
||||
// Must catch it here rather than Export(), because the type can be
|
||||
// not fully set (still TFORW) when Export() is called.
|
||||
if n.Type() != nil && n.Type().HasTParam() {
|
||||
base.Fatalf("Cannot (yet) export a generic type: %v", n)
|
||||
}
|
||||
p.markObject(n)
|
||||
}
|
||||
|
||||
// The linker also looks for the $$ marker - use char after $$ to distinguish format.
|
||||
exportf(bout, "\n$$B\n") // indicate binary export format
|
||||
off := bout.Offset()
|
||||
typecheck.WriteExports(bout.Writer)
|
||||
size := bout.Offset() - off
|
||||
exportf(bout, "\n$$\n")
|
||||
|
||||
if base.Debug.Export != 0 {
|
||||
fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", base.Ctxt.Pkgpath, size)
|
||||
}
|
||||
}
|
||||
|
||||
func dumpasmhdr() {
|
||||
b, err := bio.Create(base.Flag.AsmHdr)
|
||||
if err != nil {
|
||||
@@ -38,7 +68,7 @@ func dumpasmhdr() {
|
||||
if !t.IsStruct() || t.StructType().Map != nil || t.IsFuncArgStruct() {
|
||||
break
|
||||
}
|
||||
fmt.Fprintf(b, "#define %s__size %d\n", n.Sym().Name, int(t.Size()))
|
||||
fmt.Fprintf(b, "#define %s__size %d\n", n.Sym().Name, int(t.Width))
|
||||
for _, f := range t.Fields().Slice() {
|
||||
if !f.Sym.IsBlank() {
|
||||
fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym().Name, f.Sym.Name, int(f.Offset))
|
||||
@@ -49,3 +79,83 @@ func dumpasmhdr() {
|
||||
|
||||
b.Close()
|
||||
}
|
||||
|
||||
type exporter struct {
|
||||
marked map[*types.Type]bool // types already seen by markType
|
||||
}
|
||||
|
||||
// markObject visits a reachable object.
|
||||
func (p *exporter) markObject(n ir.Node) {
|
||||
if n.Op() == ir.ONAME {
|
||||
n := n.(*ir.Name)
|
||||
if n.Class == ir.PFUNC {
|
||||
inline.Inline_Flood(n, typecheck.Export)
|
||||
}
|
||||
}
|
||||
|
||||
p.markType(n.Type())
|
||||
}
|
||||
|
||||
// markType recursively visits types reachable from t to identify
|
||||
// functions whose inline bodies may be needed.
|
||||
func (p *exporter) markType(t *types.Type) {
|
||||
if p.marked[t] {
|
||||
return
|
||||
}
|
||||
p.marked[t] = true
|
||||
|
||||
// If this is a named type, mark all of its associated
|
||||
// methods. Skip interface types because t.Methods contains
|
||||
// only their unexpanded method set (i.e., exclusive of
|
||||
// interface embeddings), and the switch statement below
|
||||
// handles their full method set.
|
||||
if t.Sym() != nil && t.Kind() != types.TINTER {
|
||||
for _, m := range t.Methods().Slice() {
|
||||
if types.IsExported(m.Sym.Name) {
|
||||
p.markObject(ir.AsNode(m.Nname))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Recursively mark any types that can be produced given a
|
||||
// value of type t: dereferencing a pointer; indexing or
|
||||
// iterating over an array, slice, or map; receiving from a
|
||||
// channel; accessing a struct field or interface method; or
|
||||
// calling a function.
|
||||
//
|
||||
// Notably, we don't mark function parameter types, because
|
||||
// the user already needs some way to construct values of
|
||||
// those types.
|
||||
switch t.Kind() {
|
||||
case types.TPTR, types.TARRAY, types.TSLICE:
|
||||
p.markType(t.Elem())
|
||||
|
||||
case types.TCHAN:
|
||||
if t.ChanDir().CanRecv() {
|
||||
p.markType(t.Elem())
|
||||
}
|
||||
|
||||
case types.TMAP:
|
||||
p.markType(t.Key())
|
||||
p.markType(t.Elem())
|
||||
|
||||
case types.TSTRUCT:
|
||||
for _, f := range t.FieldSlice() {
|
||||
if types.IsExported(f.Sym.Name) || f.Embedded != 0 {
|
||||
p.markType(f.Type)
|
||||
}
|
||||
}
|
||||
|
||||
case types.TFUNC:
|
||||
for _, f := range t.Results().FieldSlice() {
|
||||
p.markType(f.Type)
|
||||
}
|
||||
|
||||
case types.TINTER:
|
||||
for _, f := range t.AllMethods().Slice() {
|
||||
if types.IsExported(f.Sym.Name) {
|
||||
p.markType(f.Type)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,7 +32,6 @@ import (
|
||||
"log"
|
||||
"os"
|
||||
"runtime"
|
||||
"sort"
|
||||
)
|
||||
|
||||
func hidePanic() {
|
||||
@@ -84,7 +83,7 @@ func Main(archInit func(*ssagen.ArchInfo)) {
|
||||
types.BuiltinPkg.Prefix = "go.builtin" // not go%2ebuiltin
|
||||
|
||||
// pseudo-package, accessed by import "unsafe"
|
||||
types.UnsafePkg = types.NewPkg("unsafe", "unsafe")
|
||||
ir.Pkgs.Unsafe = types.NewPkg("unsafe", "unsafe")
|
||||
|
||||
// Pseudo-package that contains the compiler's builtin
|
||||
// declarations for package runtime. These are declared in a
|
||||
@@ -160,6 +159,9 @@ func Main(archInit func(*ssagen.ArchInfo)) {
|
||||
dwarf.EnableLogging(base.Debug.DwarfInl != 0)
|
||||
}
|
||||
if base.Debug.SoftFloat != 0 {
|
||||
if buildcfg.Experiment.RegabiArgs {
|
||||
log.Fatalf("softfloat mode with GOEXPERIMENT=regabiargs not implemented ")
|
||||
}
|
||||
ssagen.Arch.SoftFloat = true
|
||||
}
|
||||
|
||||
@@ -179,40 +181,21 @@ func Main(archInit func(*ssagen.ArchInfo)) {
|
||||
|
||||
typecheck.Target = new(ir.Package)
|
||||
|
||||
typecheck.NeedITab = func(t, iface *types.Type) { reflectdata.ITabAddr(t, iface) }
|
||||
typecheck.NeedRuntimeType = reflectdata.NeedRuntimeType // TODO(rsc): TypeSym for lock?
|
||||
|
||||
base.AutogeneratedPos = makePos(src.NewFileBase("<autogenerated>", "<autogenerated>"), 1, 0)
|
||||
|
||||
typecheck.InitUniverse()
|
||||
typecheck.InitRuntime()
|
||||
|
||||
// Parse and typecheck input.
|
||||
noder.LoadPackage(flag.Args())
|
||||
|
||||
dwarfgen.RecordPackageName()
|
||||
|
||||
// Prepare for backend processing. This must happen before pkginit,
|
||||
// because it generates itabs for initializing global variables.
|
||||
ssagen.InitConfig()
|
||||
|
||||
// Create "init" function for package-scope variable initialization
|
||||
// statements, if any.
|
||||
//
|
||||
// Note: This needs to happen early, before any optimizations. The
|
||||
// Go spec defines a precise order than initialization should be
|
||||
// carried out in, and even mundane optimizations like dead code
|
||||
// removal can skew the results (e.g., #43444).
|
||||
pkginit.MakeInit()
|
||||
|
||||
// Stability quirk: sort top-level declarations, so we're not
|
||||
// sensitive to the order that functions are added. In particular,
|
||||
// the order that noder+typecheck add function closures is very
|
||||
// subtle, and not important to reproduce.
|
||||
if base.Debug.UnifiedQuirks != 0 {
|
||||
s := typecheck.Target.Decls
|
||||
sort.SliceStable(s, func(i, j int) bool {
|
||||
return s[i].Pos().Before(s[j].Pos())
|
||||
})
|
||||
// Build init task.
|
||||
if initTask := pkginit.Task(); initTask != nil {
|
||||
typecheck.Export(initTask)
|
||||
}
|
||||
|
||||
// Eliminate some obviously dead code.
|
||||
@@ -245,7 +228,6 @@ func Main(archInit func(*ssagen.ArchInfo)) {
|
||||
if base.Flag.LowerL != 0 {
|
||||
inline.InlinePackage()
|
||||
}
|
||||
noder.MakeWrappers(typecheck.Target) // must happen after inlining
|
||||
|
||||
// Devirtualize.
|
||||
for _, n := range typecheck.Target.Decls {
|
||||
@@ -255,11 +237,6 @@ func Main(archInit func(*ssagen.ArchInfo)) {
|
||||
}
|
||||
ir.CurFunc = nil
|
||||
|
||||
// Build init task, if needed.
|
||||
if initTask := pkginit.Task(); initTask != nil {
|
||||
typecheck.Export(initTask)
|
||||
}
|
||||
|
||||
// Generate ABI wrappers. Must happen before escape analysis
|
||||
// and doesn't benefit from dead-coding or inlining.
|
||||
symABIs.GenABIWrappers()
|
||||
@@ -275,11 +252,6 @@ func Main(archInit func(*ssagen.ArchInfo)) {
|
||||
base.Timer.Start("fe", "escapes")
|
||||
escape.Funcs(typecheck.Target.Decls)
|
||||
|
||||
// TODO(mdempsky): This is a hack. We need a proper, global work
|
||||
// queue for scheduling function compilation so components don't
|
||||
// need to adjust their behavior depending on when they're called.
|
||||
reflectdata.AfterGlobalEscapeAnalysis = true
|
||||
|
||||
// Collect information for go:nowritebarrierrec
|
||||
// checking. This must happen before transforming closures during Walk
|
||||
// We'll do the final check after write barriers are
|
||||
@@ -288,7 +260,17 @@ func Main(archInit func(*ssagen.ArchInfo)) {
|
||||
ssagen.EnableNoWriteBarrierRecCheck()
|
||||
}
|
||||
|
||||
// Prepare for SSA compilation.
|
||||
// This must be before CompileITabs, because CompileITabs
|
||||
// can trigger function compilation.
|
||||
typecheck.InitRuntime()
|
||||
ssagen.InitConfig()
|
||||
|
||||
// Just before compilation, compile itabs found on
|
||||
// the right side of OCONVIFACE so that methods
|
||||
// can be de-virtualized during compilation.
|
||||
ir.CurFunc = nil
|
||||
reflectdata.CompileITabs()
|
||||
|
||||
// Compile top level functions.
|
||||
// Don't use range--walk can add functions to Target.Decls.
|
||||
@@ -296,10 +278,6 @@ func Main(archInit func(*ssagen.ArchInfo)) {
|
||||
fcount := int64(0)
|
||||
for i := 0; i < len(typecheck.Target.Decls); i++ {
|
||||
if fn, ok := typecheck.Target.Decls[i].(*ir.Func); ok {
|
||||
// Don't try compiling dead hidden closure.
|
||||
if fn.IsDeadcodeClosure() {
|
||||
continue
|
||||
}
|
||||
enqueueFunc(fn)
|
||||
fcount++
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ package gc
|
||||
import (
|
||||
"cmd/compile/internal/base"
|
||||
"cmd/compile/internal/ir"
|
||||
"cmd/compile/internal/noder"
|
||||
"cmd/compile/internal/objw"
|
||||
"cmd/compile/internal/reflectdata"
|
||||
"cmd/compile/internal/staticdata"
|
||||
@@ -104,7 +103,7 @@ func finishArchiveEntry(bout *bio.Writer, start int64, name string) {
|
||||
|
||||
func dumpCompilerObj(bout *bio.Writer) {
|
||||
printObjHeader(bout)
|
||||
noder.WriteExports(bout)
|
||||
dumpexport(bout)
|
||||
}
|
||||
|
||||
func dumpdata() {
|
||||
@@ -117,7 +116,7 @@ func dumpdata() {
|
||||
addsignats(typecheck.Target.Externs)
|
||||
reflectdata.WriteRuntimeTypes()
|
||||
reflectdata.WriteTabs()
|
||||
numPTabs := reflectdata.CountPTabs()
|
||||
numPTabs, numITabs := reflectdata.CountTabs()
|
||||
reflectdata.WriteImportStrings()
|
||||
reflectdata.WriteBasicTypes()
|
||||
dumpembeds()
|
||||
@@ -158,10 +157,13 @@ func dumpdata() {
|
||||
if numExports != len(typecheck.Target.Exports) {
|
||||
base.Fatalf("Target.Exports changed after compile functions loop")
|
||||
}
|
||||
newNumPTabs := reflectdata.CountPTabs()
|
||||
newNumPTabs, newNumITabs := reflectdata.CountTabs()
|
||||
if newNumPTabs != numPTabs {
|
||||
base.Fatalf("ptabs changed after compile functions loop")
|
||||
}
|
||||
if newNumITabs != numITabs {
|
||||
base.Fatalf("itabs changed after compile functions loop")
|
||||
}
|
||||
}
|
||||
|
||||
func dumpLinkerObj(bout *bio.Writer) {
|
||||
@@ -274,7 +276,7 @@ func ggloblnod(nam *ir.Name) {
|
||||
if nam.Type() != nil && !nam.Type().HasPointers() {
|
||||
flags |= obj.NOPTR
|
||||
}
|
||||
base.Ctxt.Globl(s, nam.Type().Size(), flags)
|
||||
base.Ctxt.Globl(s, nam.Type().Width, flags)
|
||||
if nam.LibfuzzerExtraCounter() {
|
||||
s.Type = objabi.SLIBFUZZER_EXTRA_COUNTER
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// UNREVIEWED
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// UNREVIEWED
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
@@ -155,7 +156,7 @@ func Import(packages map[string]*types2.Package, path, srcDir string, lookup fun
|
||||
// binary export format starts with a 'c', 'd', or 'v'
|
||||
// (from "version"). Select appropriate importer.
|
||||
if len(data) > 0 && data[0] == 'i' {
|
||||
pkg, err = ImportData(packages, string(data[1:]), id)
|
||||
_, pkg, err = iImportData(packages, data[1:], id)
|
||||
} else {
|
||||
err = fmt.Errorf("import %q: old binary export format no longer supported (recompile library)", path)
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user