Compare commits

..

25 Commits

Author SHA1 Message Date
Carlos Amedee
3979fb9af9 [release-branch.go1.16] go1.16.2
Change-Id: I0513a9731e0e0d57bfeda0ffab1c5787edc916f8
Reviewed-on: https://go-review.googlesource.com/c/go/+/300969
Run-TryBot: Carlos Amedee <carlos@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Dmitri Shuralyov <dmitshur@golang.org>
Trust: Carlos Amedee <carlos@golang.org>
2021-03-11 17:08:05 +00:00
Jay Conrod
5993fbbd48 [release-branch.go1.16] cmd/go: clarify errors for commands run outside a module
The new error message tells the user what was wrong (no go.mod found)
and directs them to 'go help modules', which links to tutorials.

Includes test fix from CL 298794
Fixes #44746

Change-Id: I98f31fec4a8757eb1792b45491519da4c552cb0f
Reviewed-on: https://go-review.googlesource.com/c/go/+/298650
Trust: Jay Conrod <jayconrod@google.com>
Run-TryBot: Jay Conrod <jayconrod@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Bryan C. Mills <bcmills@google.com>
(cherry picked from commit b87e9b9f68)
Reviewed-on: https://go-review.googlesource.com/c/go/+/298929
2021-03-10 21:27:07 +00:00
Jay Conrod
6e04188440 [release-branch.go1.16] cmd/go: don't report missing std import errors for tidy and vendor
'go mod tidy' and 'go mod vendor' normally report errors when a
package can't be imported, even if the import appears in a file that
wouldn't be compiled by the current version of Go. These errors are
common for packages introduced in higher versions of Go, like "embed"
in 1.16.

This change causes 'go mod tidy' and 'go mod vendor' to ignore
missing package errors if the import path appears to come from the
standard library because it lacks a dot in the first path element.

Fixes #44793
Updates #27063

Change-Id: I61d6443e77ab95fd8c0d1514f57ef4c8885a77cc
Reviewed-on: https://go-review.googlesource.com/c/go/+/298749
Trust: Jay Conrod <jayconrod@google.com>
Run-TryBot: Jay Conrod <jayconrod@google.com>
Reviewed-by: Bryan C. Mills <bcmills@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
(cherry picked from commit 56d52e6611)
Reviewed-on: https://go-review.googlesource.com/c/go/+/298949
2021-03-10 21:25:35 +00:00
Katie Hockman
b5c1b5aa07 [release-branch.go1.16] all: merge release-branch.go1.16-security into release-branch.go1.16
Change-Id: Icc8775f559b0125eae94ce4ffd4dcb4e7146a500
2021-03-10 11:55:14 -05:00
Alexander Rakoczy
e9e0473681 [release-branch.go1.16-security] go1.16.1
Change-Id: I4999d72caf9462554a2a6f1d761244cafec34718
Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1014541
Reviewed-by: Katie Hockman <katiehockman@google.com>
2021-03-10 14:25:03 +00:00
Roland Shoemaker
634d28d78c [release-branch.go1.16-security] archive/zip: fix panic in Reader.Open
When operating on a Zip file that contains a file prefixed with "../",
Open(...) would cause a panic in toValidName when attempting to strip
the prefixed path components.

Fixes CVE-2021-27919

Change-Id: Ic755d8126cb0897e2cbbdacf572439c38dde7b35
Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1004761
Reviewed-by: Filippo Valsorda <valsorda@google.com>
Reviewed-by: Russ Cox <rsc@google.com>
Reviewed-by: Katie Hockman <katiehockman@google.com>
(cherry picked from commit ce22003b26eaf8e4a690757f699aae7062d41472)
Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1013753
Reviewed-by: Roland Shoemaker <bracewell@google.com>
2021-03-09 17:55:16 +00:00
Katie Hockman
d86e53e896 [release-branch.go1.16-security] encoding/xml: prevent infinite loop while decoding
This change properly handles a TokenReader which
returns an EOF in the middle of an open XML
element.

Thanks to Sam Whited for reporting this.

Fixes CVE-2021-27918

Change-Id: Id02a3f3def4a1b415fa2d9a8e3b373eb6cb0f433
Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1004594
Reviewed-by: Russ Cox <rsc@google.com>
Reviewed-by: Roland Shoemaker <bracewell@google.com>
Reviewed-by: Filippo Valsorda <valsorda@google.com>
(cherry picked from commit e7ce1f6746223ec7b4caa3b1ece25d9be3864710)
Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1014235
2021-03-09 17:55:05 +00:00
Jay Conrod
3068d55c2f [release-branch.go1.16] cmd: upgrade golang.org/x/mod to relax import path check
This incorporates CL 298009, which allows leading dots in import path
elements but not module path elements. Also added a test.

Fixes #44647
Updates #34992

Change-Id: I2d5faabd8f7b23a7943d3f3ccb6707ab5dc2ce3c
Reviewed-on: https://go-review.googlesource.com/c/go/+/297530
Trust: Jay Conrod <jayconrod@google.com>
Run-TryBot: Jay Conrod <jayconrod@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Bryan C. Mills <bcmills@google.com>
(cherry picked from commit 97bdac03ae)
Reviewed-on: https://go-review.googlesource.com/c/go/+/297912
Reviewed-by: Dmitri Shuralyov <dmitshur@golang.org>
2021-03-03 17:59:29 +00:00
Jay Conrod
a9ba734e4d [release-branch.go1.16] cmd/go/internal/modload: don't query when fixing canonical versions
If a canonical version is passed to fixVersion when loading the main
go.mod and that version don't match the module path's major version
suffix, don't call Query.

Query doesn't return a useful error in this case when the path is
malformed, for example, when it doens't have a dot in the first path
element. It's better to report the major version mismatch error.

Fixes #44496

Change-Id: I97b1f64aee894fa0db6fb637aa03a51357ee782c
Reviewed-on: https://go-review.googlesource.com/c/go/+/296590
Trust: Jay Conrod <jayconrod@google.com>
Run-TryBot: Jay Conrod <jayconrod@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Bryan C. Mills <bcmills@google.com>
(cherry picked from commit 5fafc0bbd4)
Reviewed-on: https://go-review.googlesource.com/c/go/+/297989
Reviewed-by: Dmitri Shuralyov <dmitshur@golang.org>
2021-03-03 17:58:41 +00:00
Jay Conrod
047ca22916 [release-branch.go1.16] cmd: upgrade golang.org/x/mod to fix go.mod parser
modfile.Parse passed an empty string to the VersionFixer for the
module path. This caused errors for v2+ versions.

For #44496

Change-Id: I13b86b6ecf6815c4bc9a96ec0668284c9228c205
Reviewed-on: https://go-review.googlesource.com/c/go/+/296131
Trust: Jay Conrod <jayconrod@google.com>
Run-TryBot: Jay Conrod <jayconrod@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Bryan C. Mills <bcmills@google.com>
(cherry picked from commit bcac57f89c)
Reviewed-on: https://go-review.googlesource.com/c/go/+/297990
Reviewed-by: Dmitri Shuralyov <dmitshur@golang.org>
2021-03-03 17:58:27 +00:00
Jordan Liggitt
2b7243a62f [release-branch.go1.16] cmd/go: add missing newline to retraction warning message
Updates #44674
Fixes #44676

Change-Id: Icbdb79084bf7bd2f52cc0a53abcc1ec6f0c4a1bf
Reviewed-on: https://go-review.googlesource.com/c/go/+/297350
Reviewed-by: Bryan C. Mills <bcmills@google.com>
Reviewed-by: Jay Conrod <jayconrod@google.com>
Run-TryBot: Bryan C. Mills <bcmills@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Trust: Jay Conrod <jayconrod@google.com>
(cherry picked from commit 87beecd6df)
Reviewed-on: https://go-review.googlesource.com/c/go/+/297633
Trust: Bryan C. Mills <bcmills@google.com>
2021-03-01 23:40:01 +00:00
Cherry Zhang
a9547ad8ad [release-branch.go1.16] cmd/link: handle types as converted to interface when dynlink
When using plugins, a type (whose value) may be pass to a plugin
and get converted to interface there, or vice versa. We need to
treat the type as potentially converted to interface, and retain
its methods.

Updates #44586.
Fixes #44638.

Change-Id: I80dd35e68baedaa852a317543ccd78d94628d13b
Reviewed-on: https://go-review.googlesource.com/c/go/+/296709
Trust: Cherry Zhang <cherryyz@google.com>
Run-TryBot: Cherry Zhang <cherryyz@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Than McIntosh <thanm@google.com>
(cherry picked from commit a655208c9e)
Reviewed-on: https://go-review.googlesource.com/c/go/+/296910
2021-03-01 22:31:33 +00:00
Matthew Dempsky
292abd96ae [release-branch.go1.16] cmd/compile: fix escape analysis of heap-allocated results
One of escape analysis's responsibilities is to summarize whether/how
each function parameter flows to the heap so we can correctly
incorporate those flows into callers' escape analysis data flow
graphs.

As an optimization, we separately record when parameters flow to
result parameters, so that we can more precisely analyze parameter
flows based on how the results are used at the call site. However, if
a named result parameter itself needs to be heap allocated, this
optimization isn't safe and the parameter needs to be recorded as
flowing to heap rather than flowing to result.

Escape analysis used to get this correct because it conservatively
rewalked the data-flow graph multiple times. So even though it would
incorrectly record the result parameter flow, it would separately find
a flow to the heap. However, CL 196811 (specifically, case 3)
optimized the walking logic to reduce unnecessary rewalks causing us
to stop finding the extra heap flow.

This CL fixes the issue by correcting location.leakTo to be sensitive
to sink.escapes and not record result-flows when the result parameter
escapes to the heap.

Fixes #44659.

Change-Id: I48742ed35a6cab591094e2d23a439e205bd65c50
Reviewed-on: https://go-review.googlesource.com/c/go/+/297289
Trust: Matthew Dempsky <mdempsky@google.com>
Run-TryBot: Matthew Dempsky <mdempsky@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-on: https://go-review.googlesource.com/c/go/+/297290
2021-03-01 22:02:31 +00:00
Ian Lance Taylor
88f91b709e [release-branch.go1.16] time: correct unusual extension string cases
This fixes two uncommon cases.

First, the tzdata code permits timezone offsets up to 24 * 7, although
the POSIX TZ parsing does not. The tzdata code uses this to specify a
day of week in some cases.

Second, we incorrectly rejected a negative time offset for when a time
zone change comes into effect.

For #44385
Fixes #44618

Change-Id: I5f2efc1d385e9bfa974a0de3fa81e7a94b827602
Reviewed-on: https://go-review.googlesource.com/c/go/+/296392
Trust: Ian Lance Taylor <iant@golang.org>
Run-TryBot: Ian Lance Taylor <iant@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Tobias Klauser <tobias.klauser@gmail.com>
(cherry picked from commit d9fd38e68b)
Reviewed-on: https://go-review.googlesource.com/c/go/+/297230
2021-03-01 21:51:11 +00:00
Jason A. Donenfeld
4fd2617cd8 [release-branch.go1.16] syscall: do not overflow key memory in GetQueuedCompletionStatus
The third argument to GetQueuedCompletionStatus is a pointer to a
uintptr, not a uint32. Users of this functions have therefore been
corrupting their memory every time they used it. Either that memory
corruption was silent (dangerous), or their programs didn't work so they
chose a different API to use.

This fixes the problem by passing through an intermediate buffer.

Updates #44538.
Fixes #44593.

Change-Id: Icacd71f705b36e41e52bd8c4d74898559a27522f
Reviewed-on: https://go-review.googlesource.com/c/go/+/296150
Trust: Jason A. Donenfeld <Jason@zx2c4.com>
Run-TryBot: Jason A. Donenfeld <Jason@zx2c4.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Bryan C. Mills <bcmills@google.com>
2021-03-01 21:33:08 +00:00
Bryan C. Mills
e0bd146a13 [release-branch.go1.16] cmd/go: fix version validation in 'go mod edit -exclude'
The fix is to pull in CL 295931 from the x/mod repo.

Updates #44497
Fixes #44498

Change-Id: I008b58d0f4bb48c09d4f1e6ed31d11a714f87dc0
Reviewed-on: https://go-review.googlesource.com/c/go/+/295150
Trust: Bryan C. Mills <bcmills@google.com>
Run-TryBot: Bryan C. Mills <bcmills@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Jay Conrod <jayconrod@google.com>
Reviewed-by: Michael Matloob <matloob@golang.org>
(cherry picked from commit 691ac806d2)
Reviewed-on: https://go-review.googlesource.com/c/go/+/295930
2021-03-01 20:40:43 +00:00
Cuong Manh Le
ca9cd629fb [release-branch.go1.16] cmd/compile: fix mishandling of unsafe-uintptr arguments with call method in go/defer
In CL 253457, we did the same fix for direct function calls. But for
method calls, the receiver argument also need to be passed through the
wrapper function, which we are not doing so the compiler crashes with
the code in #44415.

It will be nicer if we can rewrite OCALLMETHOD to normal OCALLFUNC, but
that will be for future CL. The passing receiver argument to wrapper
function is easier for backporting to go1.16 branch.

Fixes #44464

Change-Id: I03607a64429042c6066ce673931db9769deb3124
Reviewed-on: https://go-review.googlesource.com/c/go/+/296490
Trust: Cuong Manh Le <cuong.manhle.vn@gmail.com>
Run-TryBot: Cuong Manh Le <cuong.manhle.vn@gmail.com>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-on: https://go-review.googlesource.com/c/go/+/296769
Trust: Bryan C. Mills <bcmills@google.com>
2021-03-01 20:38:01 +00:00
Than McIntosh
18e5d75ffb [release-branch.go1.16] cmd/compile: fix panic in DWARF-gen handling obfuscated code
DWARF generation uses variable source positions (file/line/col) as a
way to uniquely identify locals and parameters, as part of the process
of matching up post-optimization variables with the corresponding
pre-optimization versions (since the DWARF needs to be in terms of the
original source constructs).

This strategy can run into problems when compiling obfuscated or
machine-generated code, where you can in some circumstances wind up
with two local variables that appear to have the same name, file,
line, and column. This patch changes DWARF generation to skip over
such duplicates as opposed to issuing a fatal error (if an
obfuscation tool is in use, it is unlikely that a human being will be
able to make much sense of DWARF info in any case).

Fixes #44433.

Change-Id: I198022d184701aa9ec3dce42c005d29b72d2e321
Reviewed-on: https://go-review.googlesource.com/c/go/+/294289
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
Reviewed-by: Jeremy Faller <jeremy@golang.org>
Reviewed-by: Cherry Zhang <cherryyz@google.com>
Trust: Than McIntosh <thanm@google.com>
Run-TryBot: Than McIntosh <thanm@google.com>
(cherry picked from commit e78e04ce39)
Reviewed-on: https://go-review.googlesource.com/c/go/+/294789
2021-03-01 20:13:56 +00:00
Matthew Dempsky
ddeae6b248 [release-branch.go1.16] cmd/compile: declare inlined result params early for empty returns
The code for delayed declaration of inlined result parameters only
handles non-empty return statements. This is generally okay, because
we already early declare if there are any (non-blank) named result
parameters.

But if a user writes a function with only blank result parameters and
with exactly one return statement, which is empty, then they could end
up hitting the dreaded "Value live at entry" ICE.

This CL fixes the issue by ensuring we always early declare inlined
result parameters if there are any empty return statements.

Fixes #44358.

Change-Id: I315f3853be436452883b1ce31da1bdffdf24d506
Reviewed-on: https://go-review.googlesource.com/c/go/+/293293
TryBot-Result: Go Bot <gobot@golang.org>
Trust: Matthew Dempsky <mdempsky@google.com>
Run-TryBot: Matthew Dempsky <mdempsky@google.com>
Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com>
Reviewed-by: David Chase <drchase@google.com>
Reviewed-on: https://go-review.googlesource.com/c/go/+/296569
2021-03-01 18:01:46 +00:00
Ian Lance Taylor
b7e0eb49d8 [release-branch.go1.16] syscall: add explicit ios build tag
This permits analysis of the syscall package by tools built with
older versions of Go that do not recognize ios as a GOOS.

For #44459
Fixes #44462

Change-Id: I79cec2ffe0dbcbc2dc45a385e556dc9e62033125
Reviewed-on: https://go-review.googlesource.com/c/go/+/294634
Trust: Ian Lance Taylor <iant@golang.org>
Run-TryBot: Ian Lance Taylor <iant@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
(cherry picked from commit 03d36d8198)
Reviewed-on: https://go-review.googlesource.com/c/go/+/294635
Reviewed-by: Cherry Zhang <cherryyz@google.com>
2021-02-25 22:48:20 +00:00
Ian Lance Taylor
0b8c416688 [release-branch.go1.16] README: pull gopher image from website
Fixes breakage accidentally introduced by https://golang.org/cl/291711.

Fixes #44402.
Updates #44295.

Change-Id: I76f3e5577d1d24027d4ed2a725b5b749ab2d059c
Reviewed-on: https://go-review.googlesource.com/c/go/+/292629
Trust: Ian Lance Taylor <iant@golang.org>
Run-TryBot: Ian Lance Taylor <iant@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
(cherry picked from commit 6f3da9d2f6)
Reviewed-on: https://go-review.googlesource.com/c/go/+/293843
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
Trust: Dmitri Shuralyov <dmitshur@golang.org>
Run-TryBot: Dmitri Shuralyov <dmitshur@golang.org>
2021-02-25 21:27:25 +00:00
Ian Lance Taylor
1a7e9af153 [release-branch.go1.16] runtime/cgo: add cast in C code to avoid C compiler warning
For #44340
Fixes #44346

Change-Id: Id80dd1f44a988b653933732afcc8e49a826affc4
Reviewed-on: https://go-review.googlesource.com/c/go/+/293209
Reviewed-by: Andrew G. Morgan <agm@google.com>
Reviewed-by: Bryan C. Mills <bcmills@google.com>
Trust: Ian Lance Taylor <iant@golang.org>
Run-TryBot: Ian Lance Taylor <iant@golang.org>
(cherry picked from commit 07ef313525)
Reviewed-on: https://go-review.googlesource.com/c/go/+/293411
TryBot-Result: Go Bot <gobot@golang.org>
2021-02-18 03:06:52 +00:00
Alexander Rakoczy
f21be2fdc6 [release-branch.go1.16] go1.16
Change-Id: I4c1350e0cb74ebfde5832973979e02997476d16c
Reviewed-on: https://go-review.googlesource.com/c/go/+/292609
TryBot-Result: Go Bot <gobot@golang.org>
Trust: Alexander Rakoczy <alex@golang.org>
Run-TryBot: Alexander Rakoczy <alex@golang.org>
Reviewed-by: Dmitri Shuralyov <dmitshur@golang.org>
Reviewed-by: Carlos Amedee <carlos@golang.org>
2021-02-16 18:08:40 +00:00
Alexander Rakoczy
e34168e634 [release-branch.go1.16] all: merge master into release-branch.go1.16
1004a7cb31 runtime/metrics: update documentation to current interface
6530f2617f doc/go1.16: remove draft notice
353e111455 doc/go1.16: fix mismatched id attribute
f0d23c9dbb internal/poll: netpollcheckerr before sendfile
0cb3415154 doc: remove all docs not tied to distribution
626ef08127 doc: remove install.html and install-source.html
30641e36aa internal/poll: if copy_file_range returns 0, assume it failed
33d72fd412 doc/faq: update generics entry to reflect accepted proposal
852ce7c212 cmd/go: provide a more helpful suggestion for "go vet -?"
66c27093d0 cmd/link: fix typo in link_test.go
ff0e93ea31 doc/go1.16: note that package path elements beginning with '.' are disallowed
249da7ec02 CONTRIBUTORS: update for the Go 1.16 release
864d4f1c6b cmd/go: multiple small 'go help' fixes
26ceae85a8 spec: More precise wording in section on function calls.
930c2c9a68 cmd/go: reject embedded files that can't be packed into modules
e5b08e6d5c io/fs: allow backslash in ValidPath, reject in os.DirFS.Open
ed8079096f cmd/compile: mark concrete call of reflect.(*rtype).Method as REFLECTMETHOD
e9c9683597 cmd/go: suppress errors from 'go get -d' for packages that only conditionally exist
e0ac989cf3 archive/tar: detect out of bounds accesses in PAX records resulting from padded lengths
c9d6f45fec runtime/metrics: fix a couple of documentation typpos
cea4e21b52 io/fs: backslash is always a glob meta character
dc725bfb3c doc/go1.16: mention new vet check for asn1.Unmarshal
1901853098 runtime/metrics: fix panic in readingAllMetric example
ed3e4afa12 syscall/plan9: remove spooky fd action at a distance
724d0720b3 doc/go1.16: add missed heading tag in vet section
b54cd94d47 embed, io/fs: clarify that leading and trailing slashes are disallowed
4516afebed testing/fstest: avoid symlink-induced failures in tester
8869086d8f runtime: fix typo in histogram.go
e491c6eea9 math/big: fix comment in divRecursiveStep
fca94ab3ab spec: improve the example in Type assertions section
98f8454a73 cmd/link: don't decode type symbol in shared library in deadcode
1426a571b7 cmd/link: fix off-by-1 error in findShlibSection
32e789f4fb test: fix incorrectly laid out instructions in issue11656.go
0b6cfea634 doc/go1.16: document that on OpenBSD syscalls are now made through libc
26e29aa15a cmd/link: disable TestPIESize if CGO isn't enabled
6ac91e460c doc/go1.16: minor markup fixes
44361140c0 embed: update docs for proposal tweaks
68058edc39 runtime: document pointer write atomicity for memclrNoHeapPointers
c8bd8010ff syscall: generate readlen/writelen for openbsd libc
41bb49b878 cmd/go: revert TestScript/build_trimpath to use ioutil.ReadFile
725a642c2d runtime: correct syscall10/syscall10X on openbsd/amd64
4b068cafb5 doc/go1.16: document go/build/constraint package
376518d77f runtime,syscall: convert syscall on openbsd/arm64 to libc

Change-Id: Icfe3d849f459eda48d7d786d0cd7b082c9c2c325
2021-02-16 12:10:50 -05:00
Alexander Rakoczy
3e06467282 [release-branch.go1.16] go1.16rc1
Change-Id: I978f6df491a19a9c45ab906dbc5194b8665bf4a5
Reviewed-on: https://go-review.googlesource.com/c/go/+/287352
Run-TryBot: Alexander Rakoczy <alex@golang.org>
Trust: Alexander Rakoczy <alex@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Dmitri Shuralyov <dmitshur@golang.org>
2021-01-27 22:19:13 +00:00
2361 changed files with 53894 additions and 128654 deletions

View File

@@ -195,7 +195,7 @@ Ayanamist Yang <ayanamist@gmail.com>
Aymerick Jéhanne <aymerick@jehanne.org>
Azat Kaumov <kaumov.a.r@gmail.com>
Baiju Muthukadan <baiju.m.mail@gmail.com>
Baokun Lee <nototon@gmail.com> <bk@golangcn.org>
Baokun Lee <nototon@gmail.com>
Bartosz Grzybowski <melkorm@gmail.com>
Bastian Ike <bastian.ike@gmail.com>
Ben Burkert <ben@benburkert.com>

View File

@@ -321,7 +321,7 @@ Azat Kaumov <kaumov.a.r@gmail.com>
Baiju Muthukadan <baiju.m.mail@gmail.com>
Balaram Makam <bmakam.qdt@qualcommdatacenter.com>
Balazs Lecz <leczb@google.com>
Baokun Lee <nototon@gmail.com> <bk@golangcn.org>
Baokun Lee <nototon@gmail.com>
Barnaby Keene <accounts@southcla.ws>
Bartosz Grzybowski <melkorm@gmail.com>
Bartosz Oler <brtsz@google.com>

View File

@@ -1,46 +0,0 @@
# dev.go2go branch
This branch provides an experimental go2go tool for testing the use of
the generics design draft.
This branch was published in 2020 and is no longer being maintained.
The [generics
proposal](https://go.googlesource.com/proposal/+/refs/heads/master/design/43651-type-parameters.md)
has [been accepted](https://golang.org/issue/43651) and development is
now focused on implementing the proposal in the ordinary Go tools.
## Original README
This branch contains a type checker and a translation tool for
experimentation with generics in Go.
This implements the [generics design draft](https://go.googlesource.com/proposal/+/refs/heads/master/design/go2draft-type-parameters.md).
You can build this branch [as
usual](https://golang.org/doc/install/source).
You can then use the new `go2go` tool.
Write your generic code in a file with the extension `.go2` instead of
`.go`.
Run it using `go tool go2go run x.go2`.
There are some sample packages in `cmd/go2go/testdata/go2path/src`.
You can see the full documentation for the tool by running `go doc
cmd/go2go`.
The `go2go` tool will look for `.go2` files using the environment variable `GO2PATH`.
You can find some useful packages that you might want to experiment with by
setting `GO2PATH=$GOROOT/src/cmd/go2go/testdata/go2path`.
If you find bugs in the updated type checker or in the translation
tool, they should be filed in the [standard Go issue
tracker](https://golang.org/issue).
Please start the issue title with `cmd/go2go`.
Note that the issue tracker should only be used for reporting bugs in
the tools, not for discussion of changes to the language.
Unlike typical dev branches, we do not intend any eventual merge of
this code into the master branch.
We will update it for bug fixes and change to the generic design
draft.
If a generics language proposal is accepted, it will be implemented in
the standard compiler, not via a translation tool.
The necessary changes to the go/* packages (go/ast, go/parser, and so
forth) may re-use code from this prototype but will follow the usual
code review process.

1
VERSION Normal file
View File

@@ -0,0 +1 @@
go1.16.2

View File

@@ -1,2 +0,0 @@
branch: dev.go2go
parent-branch: master

1220
doc/go1.16.html Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,92 +0,0 @@
<!--{
"Title": "Go 1.17 Release Notes",
"Path": "/doc/go1.17"
}-->
<!--
NOTE: In this document and others in this directory, the convention is to
set fixed-width phrases with non-fixed-width spaces, as in
<code>hello</code> <code>world</code>.
Do not send CLs removing the interior tags from such phrases.
-->
<style>
main ul li { margin: 0.5em 0; }
</style>
<h2 id="introduction">DRAFT RELEASE NOTES — Introduction to Go 1.17</h2>
<p>
<strong>
Go 1.17 is not yet released. These are work-in-progress
release notes. Go 1.17 is expected to be released in August 2021.
</strong>
</p>
<h2 id="language">Changes to the language</h2>
<p>
TODO: complete this section
</p>
<h2 id="ports">Ports</h2>
<p>
TODO: complete this section, or delete if not needed
</p>
<h2 id="tools">Tools</h2>
<p>
TODO: complete this section, or delete if not needed
</p>
<h3 id="go-command">Go command</h3>
<h4 id="go-get"><code>go</code> <code>get</code></h4>
<p><!-- golang.org/issue/37519 -->
The <code>go</code> <code>get</code> <code>-insecure</code> flag is
deprecated and has been removed. To permit the use of insecure schemes
when fetching dependencies, please use the <code>GOINSECURE</code>
environment variable. The <code>-insecure</code> flag also bypassed module
sum validation, use <code>GOPRIVATE</code> or <code>GONOSUMDB</code> if
you need that functionality. See <code>go</code> <code>help</code>
<code>environment</code> for details.
</p>
<h2 id="runtime">Runtime</h2>
<p>
TODO: complete this section, or delete if not needed
</p>
<h2 id="compiler">Compiler</h2>
<p>
TODO: complete this section, or delete if not needed
</p>
<h2 id="linker">Linker</h2>
<p>
TODO: complete this section, or delete if not needed
</p>
<h2 id="library">Core library</h2>
<p>
TODO: complete this section
</p>
<h3 id="minor_library_changes">Minor changes to the library</h3>
<p>
As always, there are various minor changes and updates to the library,
made with the Go 1 <a href="/doc/go1compat">promise of compatibility</a>
in mind.
</p>
<p>
TODO: complete this section
</p>

View File

@@ -1,6 +1,6 @@
<!--{
"Title": "The Go Programming Language Specification",
"Subtitle": "Version of Feb 24, 2021",
"Subtitle": "Version of Feb 10, 2021",
"Path": "/ref/spec"
}-->
@@ -830,7 +830,7 @@ The underlying type of <code>[]B1</code>, <code>B3</code>, and <code>B4</code> i
<h3 id="Method_sets">Method sets</h3>
<p>
A type has a (possibly empty) <i>method set</i> associated with it.
A type may have a <i>method set</i> associated with it.
The method set of an <a href="#Interface_types">interface type</a> is its interface.
The method set of any other type <code>T</code> consists of all
<a href="#Method_declarations">methods</a> declared with receiver type <code>T</code>.
@@ -3532,9 +3532,9 @@ within <code>Greeting</code>, <code>who</code> will have the value
</p>
<p>
If the final argument is assignable to a slice type <code>[]T</code> and
is followed by <code>...</code>, it is passed unchanged as the value
for a <code>...T</code> parameter. In this case no new slice is created.
If the final argument is assignable to a slice type <code>[]T</code>, it is
passed unchanged as the value for a <code>...T</code> parameter if the argument
is followed by <code>...</code>. In this case no new slice is created.
</p>
<p>

View File

@@ -182,7 +182,7 @@ func testCallbackCallers(t *testing.T) {
"runtime.cgocallbackg1",
"runtime.cgocallbackg",
"runtime.cgocallback",
"runtime.systemstack_switch",
"runtime.asmcgocall",
"runtime.cgocall",
"test._Cfunc_callback",
"test.nestedCall.func1",

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build gc
// +build !gccgo
#include "textflag.h"

View File

@@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// +build amd64 amd64p32
// +build gc
// +build !gccgo
#include "textflag.h"

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build gc
// +build !gccgo
#include "textflag.h"

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build gc
// +build !gccgo
#include "textflag.h"

View File

@@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// +build mips64 mips64le
// +build gc
// +build !gccgo
#include "textflag.h"

View File

@@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// +build mips mipsle
// +build gc
// +build !gccgo
#include "textflag.h"

View File

@@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// +build ppc64 ppc64le
// +build gc
// +build !gccgo
#include "textflag.h"

View File

@@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// +build riscv64
// +build gc
// +build !gccgo
#include "textflag.h"

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build gc
// +build !gccgo
#include "textflag.h"

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build gc
// +build !gccgo
#include "textflag.h"

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build gc
// +build !gccgo
package depBase

View File

@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build aix || linux || dragonfly || openbsd || solaris
// +build aix linux dragonfly openbsd solaris
package tar

View File

@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build darwin || freebsd || netbsd
// +build darwin freebsd netbsd
package tar

View File

@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build aix || linux || darwin || dragonfly || freebsd || openbsd || netbsd || solaris
// +build aix linux darwin dragonfly freebsd openbsd netbsd solaris
package tar

View File

@@ -664,7 +664,7 @@ func toValidName(name string) string {
if strings.HasPrefix(p, "/") {
p = p[len("/"):]
}
for strings.HasPrefix(name, "../") {
for strings.HasPrefix(p, "../") {
p = p[len("../"):]
}
return p

View File

@@ -1081,3 +1081,38 @@ func TestFS(t *testing.T) {
t.Fatal(err)
}
}
func TestCVE202127919(t *testing.T) {
// Archive containing only the file "../test.txt"
data := []byte{
0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x08, 0x00,
0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x2e, 0x2e,
0x2f, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x74, 0x78,
0x74, 0x0a, 0xc9, 0xc8, 0x2c, 0x56, 0xc8, 0x2c,
0x56, 0x48, 0x54, 0x28, 0x49, 0x2d, 0x2e, 0x51,
0x28, 0x49, 0xad, 0x28, 0x51, 0x48, 0xcb, 0xcc,
0x49, 0xd5, 0xe3, 0x02, 0x04, 0x00, 0x00, 0xff,
0xff, 0x50, 0x4b, 0x07, 0x08, 0xc0, 0xd7, 0xed,
0xc3, 0x20, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x00,
0x00, 0x50, 0x4b, 0x01, 0x02, 0x14, 0x00, 0x14,
0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
0x00, 0xc0, 0xd7, 0xed, 0xc3, 0x20, 0x00, 0x00,
0x00, 0x1a, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e,
0x2e, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x74,
0x78, 0x74, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00,
0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x39, 0x00,
0x00, 0x00, 0x59, 0x00, 0x00, 0x00, 0x00, 0x00,
}
r, err := NewReader(bytes.NewReader([]byte(data)), int64(len(data)))
if err != nil {
t.Fatalf("Error reading the archive: %v", err)
}
_, err = r.Open("test.txt")
if err != nil {
t.Errorf("Error reading file: %v", err)
}
}

View File

@@ -670,8 +670,7 @@ func (b *Writer) WriteByte(c byte) error {
// WriteRune writes a single Unicode code point, returning
// the number of bytes written and any error.
func (b *Writer) WriteRune(r rune) (size int, err error) {
// Compare as uint32 to correctly handle negative runes.
if uint32(r) < utf8.RuneSelf {
if r < utf8.RuneSelf {
err = b.WriteByte(byte(r))
if err != nil {
return 0, err

View File

@@ -534,20 +534,6 @@ func TestReadWriteRune(t *testing.T) {
}
}
func TestWriteInvalidRune(t *testing.T) {
// Invalid runes, including negative ones, should be written as the
// replacement character.
for _, r := range []rune{-1, utf8.MaxRune + 1} {
var buf bytes.Buffer
w := NewWriter(&buf)
w.WriteRune(r)
w.Flush()
if s := buf.String(); s != "\uFFFD" {
t.Errorf("WriteRune(%d) wrote %q, not replacement character", r, s)
}
}
}
func TestReadStringAllocs(t *testing.T) {
r := strings.NewReader(" foo foo 42 42 42 42 42 42 42 42 4.2 4.2 4.2 4.2\n")
buf := NewReader(r)

View File

@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
//go:build linux
// +build linux
package bytes_test

View File

@@ -275,8 +275,7 @@ func (b *Buffer) WriteByte(c byte) error {
// included to match bufio.Writer's WriteRune. The buffer is grown as needed;
// if it becomes too large, WriteRune will panic with ErrTooLarge.
func (b *Buffer) WriteRune(r rune) (n int, err error) {
// Compare as uint32 to correctly handle negative runes.
if uint32(r) < utf8.RuneSelf {
if r < utf8.RuneSelf {
b.WriteByte(byte(r))
return 1, nil
}

View File

@@ -6,7 +6,6 @@ package bytes_test
import (
. "bytes"
"fmt"
"io"
"math/rand"
"testing"
@@ -388,16 +387,6 @@ func TestRuneIO(t *testing.T) {
}
}
func TestWriteInvalidRune(t *testing.T) {
// Invalid runes, including negative ones, should be written as
// utf8.RuneError.
for _, r := range []rune{-1, utf8.MaxRune + 1} {
var buf Buffer
buf.WriteRune(r)
check(t, fmt.Sprintf("TestWriteInvalidRune (%d)", r), &buf, "\uFFFD")
}
}
func TestNext(t *testing.T) {
b := []byte{0, 1, 2, 3, 4}
tmp := make([]byte, 5)

View File

@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build ignore
// +build ignore
// The run program is invoked via the dist tool.

View File

@@ -109,10 +109,6 @@ func archX86(linkArch *obj.LinkArch) *Arch {
register["SB"] = RSB
register["FP"] = RFP
register["PC"] = RPC
if linkArch == &x86.Linkamd64 {
// Alias g to R14
register["g"] = x86.REGG
}
// Register prefix not used on this architecture.
instructions := make(map[string]obj.As)

View File

@@ -811,7 +811,10 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) {
} else {
mask = (^uint32(0) >> uint(mask2+1)) & (^uint32(0) << uint(31-(mask1-1)))
}
prog.SetFrom3Const(int64(mask))
prog.SetFrom3(obj.Addr{
Type: obj.TYPE_CONST,
Offset: int64(mask),
})
prog.To = a[4]
break
}

View File

@@ -36,7 +36,6 @@ func testEndToEnd(t *testing.T, goarch, file string) {
var ok bool
testOut = new(bytes.Buffer) // The assembler writes test output to this buffer.
ctxt.Bso = bufio.NewWriter(os.Stdout)
ctxt.IsAsm = true
defer ctxt.Bso.Flush()
failed := false
ctxt.DiagFunc = func(format string, args ...interface{}) {
@@ -279,7 +278,6 @@ func testErrors(t *testing.T, goarch, file string) {
var ok bool
testOut = new(bytes.Buffer) // The assembler writes test output to this buffer.
ctxt.Bso = bufio.NewWriter(os.Stdout)
ctxt.IsAsm = true
defer ctxt.Bso.Flush()
failed := false
var errBuf bytes.Buffer
@@ -439,12 +437,8 @@ func TestPPC64EndToEnd(t *testing.T) {
testEndToEnd(t, "ppc64", "ppc64")
}
func TestRISCVEndToEnd(t *testing.T) {
testEndToEnd(t, "riscv64", "riscv64")
}
func TestRISCVErrors(t *testing.T) {
testErrors(t, "riscv64", "riscv64error")
func TestRISCVEncoder(t *testing.T) {
testEndToEnd(t, "riscv64", "riscvenc")
}
func TestS390XEndToEnd(t *testing.T) {

View File

@@ -259,7 +259,6 @@ var amd64OperandTests = []operandTest{
{"R15", "R15"},
{"R8", "R8"},
{"R9", "R9"},
{"g", "R14"},
{"SI", "SI"},
{"SP", "SP"},
{"X0", "X0"},

View File

@@ -305,7 +305,7 @@ func (p *Parser) pseudo(word string, operands [][]lex.Token) bool {
// references and writes symabis information to w.
//
// The symabis format is documented at
// cmd/compile/internal/ssagen.ReadSymABIs.
// cmd/compile/internal/gc.readSymABIs.
func (p *Parser) symDefRef(w io.Writer, word string, operands [][]lex.Token) {
switch word {
case "TEXT":

View File

@@ -1,14 +0,0 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
TEXT errors(SB),$0
MOV $0, 0(SP) // ERROR "constant load must target register"
MOV $0, 8(SP) // ERROR "constant load must target register"
MOV $1234, 0(SP) // ERROR "constant load must target register"
MOV $1234, 8(SP) // ERROR "constant load must target register"
MOVB $1, X5 // ERROR "unsupported constant load"
MOVH $1, X5 // ERROR "unsupported constant load"
MOVW $1, X5 // ERROR "unsupported constant load"
MOVF $1, X5 // ERROR "unsupported constant load"
RET

View File

@@ -32,13 +32,11 @@ var (
D MultiFlag
I MultiFlag
PrintOut int
DebugV bool
)
func init() {
flag.Var(&D, "D", "predefined symbol with optional simple value -D=identifier=value; can be set multiple times")
flag.Var(&I, "I", "include directory; can be set multiple times")
flag.BoolVar(&DebugV, "v", false, "print debug output")
objabi.AddVersionFlag() // -V
objabi.Flagcount("S", "print assembly and machine code", &PrintOut)
}

View File

@@ -36,7 +36,6 @@ func main() {
ctxt := obj.Linknew(architecture.LinkArch)
ctxt.Debugasm = flags.PrintOut
ctxt.Debugvlog = flags.DebugV
ctxt.Flag_dynlink = *flags.Dynlink
ctxt.Flag_linkshared = *flags.Linkshared
ctxt.Flag_shared = *flags.Shared || *flags.Dynlink

View File

@@ -1566,17 +1566,9 @@ func (p *Package) gccMachine() []string {
case "s390x":
return []string{"-m64"}
case "mips64", "mips64le":
if gomips64 == "hardfloat" {
return []string{"-mabi=64", "-mhard-float"}
} else if gomips64 == "softfloat" {
return []string{"-mabi=64", "-msoft-float"}
}
return []string{"-mabi=64"}
case "mips", "mipsle":
if gomips == "hardfloat" {
return []string{"-mabi=32", "-mfp32", "-mhard-float", "-mno-odd-spreg"}
} else if gomips == "softfloat" {
return []string{"-mabi=32", "-msoft-float"}
}
return []string{"-mabi=32"}
}
return nil
}

View File

@@ -245,7 +245,7 @@ var importRuntimeCgo = flag.Bool("import_runtime_cgo", true, "import runtime/cgo
var importSyscall = flag.Bool("import_syscall", true, "import syscall in generated code")
var trimpath = flag.String("trimpath", "", "applies supplied rewrites or trims prefixes to recorded source file paths")
var goarch, goos, gomips, gomips64 string
var goarch, goos string
func main() {
objabi.AddVersionFlag() // -V
@@ -405,8 +405,6 @@ func newPackage(args []string) *Package {
if s := os.Getenv("GOOS"); s != "" {
goos = s
}
gomips = objabi.GOMIPS
gomips64 = objabi.GOMIPS64
ptrSize := ptrSizeMap[goarch]
if ptrSize == 0 {
fatalf("unknown ptrSize for $GOARCH %q", goarch)

599
src/cmd/compile/fmt_test.go Normal file
View File

@@ -0,0 +1,599 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements TestFormats; a test that verifies
// format strings in the compiler (this directory and all
// subdirectories, recursively).
//
// TestFormats finds potential (Printf, etc.) format strings.
// If they are used in a call, the format verbs are verified
// based on the matching argument type against a precomputed
// map of valid formats (knownFormats). This map can be used to
// automatically rewrite format strings across all compiler
// files with the -r flag.
//
// The format map needs to be updated whenever a new (type,
// format) combination is found and the format verb is not
// 'v' or 'T' (as in "%v" or "%T"). To update the map auto-
// matically from the compiler source's use of format strings,
// use the -u flag. (Whether formats are valid for the values
// to be formatted must be verified manually, of course.)
//
// The -v flag prints out the names of all functions called
// with a format string, the names of files that were not
// processed, and any format rewrites made (with -r).
//
// Run as: go test -run Formats [-r][-u][-v]
//
// Known shortcomings:
// - indexed format strings ("%[2]s", etc.) are not supported
// (the test will fail)
// - format strings that are not simple string literals cannot
// be updated automatically
// (the test will fail with respective warnings)
// - format strings in _test packages outside the current
// package are not processed
// (the test will report those files)
//
package main_test
import (
"bytes"
"flag"
"fmt"
"go/ast"
"go/build"
"go/constant"
"go/format"
"go/importer"
"go/parser"
"go/token"
"go/types"
"internal/testenv"
"io"
"io/fs"
"io/ioutil"
"log"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"testing"
"unicode/utf8"
)
var (
rewrite = flag.Bool("r", false, "rewrite format strings")
update = flag.Bool("u", false, "update known formats")
)
// The following variables collect information across all processed files.
var (
fset = token.NewFileSet()
formatStrings = make(map[*ast.BasicLit]bool) // set of all potential format strings found
foundFormats = make(map[string]bool) // set of all formats found
callSites = make(map[*ast.CallExpr]*callSite) // map of all calls
)
// A File is a corresponding (filename, ast) pair.
type File struct {
name string
ast *ast.File
}
func TestFormats(t *testing.T) {
if testing.Short() && testenv.Builder() == "" {
t.Skip("Skipping in short mode")
}
testenv.MustHaveGoBuild(t) // more restrictive than necessary, but that's ok
// process all directories
filepath.WalkDir(".", func(path string, info fs.DirEntry, err error) error {
if info.IsDir() {
if info.Name() == "testdata" {
return filepath.SkipDir
}
importPath := filepath.Join("cmd/compile", path)
if ignoredPackages[filepath.ToSlash(importPath)] {
return filepath.SkipDir
}
pkg, err := build.Import(importPath, path, 0)
if err != nil {
if _, ok := err.(*build.NoGoError); ok {
return nil // nothing to do here
}
t.Fatal(err)
}
collectPkgFormats(t, pkg)
}
return nil
})
// test and rewrite formats
updatedFiles := make(map[string]File) // files that were rewritten
for _, p := range callSites {
// test current format literal and determine updated one
out := formatReplace(p.str, func(index int, in string) string {
if in == "*" {
return in // cannot rewrite '*' (as in "%*d")
}
// in != '*'
typ := p.types[index]
format := typ + " " + in // e.g., "*Node %n"
// check if format is known
out, known := knownFormats[format]
// record format if not yet found
_, found := foundFormats[format]
if !found {
foundFormats[format] = true
}
// report an error if the format is unknown and this is the first
// time we see it; ignore "%v" and "%T" which are always valid
if !known && !found && in != "%v" && in != "%T" {
t.Errorf("%s: unknown format %q for %s argument", posString(p.arg), in, typ)
}
if out == "" {
out = in
}
return out
})
// replace existing format literal if it changed
if out != p.str {
// we cannot replace the argument if it's not a string literal for now
// (e.g., it may be "foo" + "bar")
lit, ok := p.arg.(*ast.BasicLit)
if !ok {
delete(callSites, p.call) // treat as if we hadn't found this site
continue
}
if testing.Verbose() {
fmt.Printf("%s:\n\t- %q\n\t+ %q\n", posString(p.arg), p.str, out)
}
// find argument index of format argument
index := -1
for i, arg := range p.call.Args {
if p.arg == arg {
index = i
break
}
}
if index < 0 {
// we may have processed the same call site twice,
// but that shouldn't happen
panic("internal error: matching argument not found")
}
// replace literal
new := *lit // make a copy
new.Value = strconv.Quote(out) // this may introduce "-quotes where there were `-quotes
p.call.Args[index] = &new
updatedFiles[p.file.name] = p.file
}
}
// write dirty files back
var filesUpdated bool
if len(updatedFiles) > 0 && *rewrite {
for _, file := range updatedFiles {
var buf bytes.Buffer
if err := format.Node(&buf, fset, file.ast); err != nil {
t.Errorf("WARNING: gofmt %s failed: %v", file.name, err)
continue
}
if err := ioutil.WriteFile(file.name, buf.Bytes(), 0x666); err != nil {
t.Errorf("WARNING: writing %s failed: %v", file.name, err)
continue
}
fmt.Printf("updated %s\n", file.name)
filesUpdated = true
}
}
// report the names of all functions called with a format string
if len(callSites) > 0 && testing.Verbose() {
set := make(map[string]bool)
for _, p := range callSites {
set[nodeString(p.call.Fun)] = true
}
var list []string
for s := range set {
list = append(list, s)
}
fmt.Println("\nFunctions called with a format string")
writeList(os.Stdout, list)
}
// update formats
if len(foundFormats) > 0 && *update {
var list []string
for s := range foundFormats {
list = append(list, fmt.Sprintf("%q: \"\",", s))
}
var buf bytes.Buffer
buf.WriteString(knownFormatsHeader)
writeList(&buf, list)
buf.WriteString("}\n")
out, err := format.Source(buf.Bytes())
const outfile = "fmtmap_test.go"
if err != nil {
t.Errorf("WARNING: gofmt %s failed: %v", outfile, err)
out = buf.Bytes() // continue with unformatted source
}
if err = ioutil.WriteFile(outfile, out, 0644); err != nil {
t.Errorf("WARNING: updating format map failed: %v", err)
}
}
// check that knownFormats is up to date
if !*rewrite && !*update {
var mismatch bool
for s := range foundFormats {
if _, ok := knownFormats[s]; !ok {
mismatch = true
break
}
}
if !mismatch {
for s := range knownFormats {
if _, ok := foundFormats[s]; !ok {
mismatch = true
break
}
}
}
if mismatch {
t.Errorf("format map is out of date; run 'go test -u' to update and manually verify correctness of change'")
}
}
// all format strings of calls must be in the formatStrings set (self-verification)
for _, p := range callSites {
if lit, ok := p.arg.(*ast.BasicLit); ok && lit.Kind == token.STRING {
if formatStrings[lit] {
// ok
delete(formatStrings, lit)
} else {
// this should never happen
panic(fmt.Sprintf("internal error: format string not found (%s)", posString(lit)))
}
}
}
// if we have any strings left, we may need to update them manually
if len(formatStrings) > 0 && filesUpdated {
var list []string
for lit := range formatStrings {
list = append(list, fmt.Sprintf("%s: %s", posString(lit), nodeString(lit)))
}
fmt.Println("\nWARNING: Potentially missed format strings")
writeList(os.Stdout, list)
t.Fail()
}
fmt.Println()
}
// A callSite describes a function call that appears to contain
// a format string.
type callSite struct {
file File
call *ast.CallExpr // call containing the format string
arg ast.Expr // format argument (string literal or constant)
str string // unquoted format string
types []string // argument types
}
func collectPkgFormats(t *testing.T, pkg *build.Package) {
// collect all files
var filenames []string
filenames = append(filenames, pkg.GoFiles...)
filenames = append(filenames, pkg.CgoFiles...)
filenames = append(filenames, pkg.TestGoFiles...)
// TODO(gri) verify _test files outside package
for _, name := range pkg.XTestGoFiles {
// don't process this test itself
if name != "fmt_test.go" && testing.Verbose() {
fmt.Printf("WARNING: %s not processed\n", filepath.Join(pkg.Dir, name))
}
}
// make filenames relative to .
for i, name := range filenames {
filenames[i] = filepath.Join(pkg.Dir, name)
}
// parse all files
files := make([]*ast.File, len(filenames))
for i, filename := range filenames {
f, err := parser.ParseFile(fset, filename, nil, parser.ParseComments)
if err != nil {
t.Fatal(err)
}
files[i] = f
}
// typecheck package
conf := types.Config{Importer: importer.Default()}
etypes := make(map[ast.Expr]types.TypeAndValue)
if _, err := conf.Check(pkg.ImportPath, fset, files, &types.Info{Types: etypes}); err != nil {
t.Fatal(err)
}
// collect all potential format strings (for extra verification later)
for _, file := range files {
ast.Inspect(file, func(n ast.Node) bool {
if s, ok := stringLit(n); ok && isFormat(s) {
formatStrings[n.(*ast.BasicLit)] = true
}
return true
})
}
// collect all formats/arguments of calls with format strings
for index, file := range files {
ast.Inspect(file, func(n ast.Node) bool {
if call, ok := n.(*ast.CallExpr); ok {
if ignoredFunctions[nodeString(call.Fun)] {
return true
}
// look for an arguments that might be a format string
for i, arg := range call.Args {
if s, ok := stringVal(etypes[arg]); ok && isFormat(s) {
// make sure we have enough arguments
n := numFormatArgs(s)
if i+1+n > len(call.Args) {
t.Errorf("%s: not enough format args (ignore %s?)", posString(call), nodeString(call.Fun))
break // ignore this call
}
// assume last n arguments are to be formatted;
// determine their types
argTypes := make([]string, n)
for i, arg := range call.Args[len(call.Args)-n:] {
if tv, ok := etypes[arg]; ok {
argTypes[i] = typeString(tv.Type)
}
}
// collect call site
if callSites[call] != nil {
panic("internal error: file processed twice?")
}
callSites[call] = &callSite{
file: File{filenames[index], file},
call: call,
arg: arg,
str: s,
types: argTypes,
}
break // at most one format per argument list
}
}
}
return true
})
}
}
// writeList writes list in sorted order to w.
func writeList(w io.Writer, list []string) {
sort.Strings(list)
for _, s := range list {
fmt.Fprintln(w, "\t", s)
}
}
// posString returns a string representation of n's position
// in the form filename:line:col: .
func posString(n ast.Node) string {
if n == nil {
return ""
}
return fset.Position(n.Pos()).String()
}
// nodeString returns a string representation of n.
func nodeString(n ast.Node) string {
var buf bytes.Buffer
if err := format.Node(&buf, fset, n); err != nil {
log.Fatal(err) // should always succeed
}
return buf.String()
}
// typeString returns a string representation of n.
func typeString(typ types.Type) string {
return filepath.ToSlash(typ.String())
}
// stringLit returns the unquoted string value and true if
// n represents a string literal; otherwise it returns ""
// and false.
func stringLit(n ast.Node) (string, bool) {
if lit, ok := n.(*ast.BasicLit); ok && lit.Kind == token.STRING {
s, err := strconv.Unquote(lit.Value)
if err != nil {
log.Fatal(err) // should not happen with correct ASTs
}
return s, true
}
return "", false
}
// stringVal returns the (unquoted) string value and true if
// tv is a string constant; otherwise it returns "" and false.
func stringVal(tv types.TypeAndValue) (string, bool) {
if tv.IsValue() && tv.Value != nil && tv.Value.Kind() == constant.String {
return constant.StringVal(tv.Value), true
}
return "", false
}
// formatIter iterates through the string s in increasing
// index order and calls f for each format specifier '%..v'.
// The arguments for f describe the specifier's index range.
// If a format specifier contains a "*", f is called with
// the index range for "*" alone, before being called for
// the entire specifier. The result of f is the index of
// the rune at which iteration continues.
func formatIter(s string, f func(i, j int) int) {
i := 0 // index after current rune
var r rune // current rune
next := func() {
r1, w := utf8.DecodeRuneInString(s[i:])
if w == 0 {
r1 = -1 // signal end-of-string
}
r = r1
i += w
}
flags := func() {
for r == ' ' || r == '#' || r == '+' || r == '-' || r == '0' {
next()
}
}
index := func() {
if r == '[' {
log.Fatalf("cannot handle indexed arguments: %s", s)
}
}
digits := func() {
index()
if r == '*' {
i = f(i-1, i)
next()
return
}
for '0' <= r && r <= '9' {
next()
}
}
for next(); r >= 0; next() {
if r == '%' {
i0 := i
next()
flags()
digits()
if r == '.' {
next()
digits()
}
index()
// accept any letter (a-z, A-Z) as format verb;
// ignore anything else
if 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' {
i = f(i0-1, i)
}
}
}
}
// isFormat reports whether s contains format specifiers.
func isFormat(s string) (yes bool) {
formatIter(s, func(i, j int) int {
yes = true
return len(s) // stop iteration
})
return
}
// oneFormat reports whether s is exactly one format specifier.
func oneFormat(s string) (yes bool) {
formatIter(s, func(i, j int) int {
yes = i == 0 && j == len(s)
return j
})
return
}
// numFormatArgs returns the number of format specifiers in s.
func numFormatArgs(s string) int {
count := 0
formatIter(s, func(i, j int) int {
count++
return j
})
return count
}
// formatReplace replaces the i'th format specifier s in the incoming
// string in with the result of f(i, s) and returns the new string.
func formatReplace(in string, f func(i int, s string) string) string {
var buf []byte
i0 := 0
index := 0
formatIter(in, func(i, j int) int {
if sub := in[i:j]; sub != "*" { // ignore calls for "*" width/length specifiers
buf = append(buf, in[i0:i]...)
buf = append(buf, f(index, sub)...)
i0 = j
}
index++
return j
})
return string(append(buf, in[i0:]...))
}
// ignoredPackages is the set of packages which can
// be ignored.
var ignoredPackages = map[string]bool{}
// ignoredFunctions is the set of functions which may have
// format-like arguments but which don't do any formatting and
// thus may be ignored.
var ignoredFunctions = map[string]bool{}
func init() {
// verify that knownFormats entries are correctly formatted
for key, val := range knownFormats {
// key must be "typename format", and format starts with a '%'
// (formats containing '*' alone are not collected in this map)
i := strings.Index(key, "%")
if i < 0 || !oneFormat(key[i:]) {
log.Fatalf("incorrect knownFormats key: %q", key)
}
// val must be "format" or ""
if val != "" && !oneFormat(val) {
log.Fatalf("incorrect knownFormats value: %q (key = %q)", val, key)
}
}
}
const knownFormatsHeader = `// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements the knownFormats map which records the valid
// formats for a given type. The valid formats must correspond to
// supported compiler formats implemented in fmt.go, or whatever
// other format verbs are implemented for the given type. The map may
// also be used to change the use of a format verb across all compiler
// sources automatically (for instance, if the implementation of fmt.go
// changes), by using the -r option together with the new formats in the
// map. To generate this file automatically from the existing source,
// run: go test -run Formats -u.
//
// See the package comment in fmt_test.go for additional information.
package main_test
// knownFormats entries are of the form "typename format" -> "newformat".
// An absent entry means that the format is not recognized as valid.
// An empty new format means that the format should remain unchanged.
var knownFormats = map[string]string{
`

View File

@@ -0,0 +1,211 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements the knownFormats map which records the valid
// formats for a given type. The valid formats must correspond to
// supported compiler formats implemented in fmt.go, or whatever
// other format verbs are implemented for the given type. The map may
// also be used to change the use of a format verb across all compiler
// sources automatically (for instance, if the implementation of fmt.go
// changes), by using the -r option together with the new formats in the
// map. To generate this file automatically from the existing source,
// run: go test -run Formats -u.
//
// See the package comment in fmt_test.go for additional information.
package main_test
// knownFormats entries are of the form "typename format" -> "newformat".
// An absent entry means that the format is not recognized as valid.
// An empty new format means that the format should remain unchanged.
var knownFormats = map[string]string{
"*bytes.Buffer %s": "",
"*cmd/compile/internal/gc.EscLocation %v": "",
"*cmd/compile/internal/gc.Mpflt %v": "",
"*cmd/compile/internal/gc.Mpint %v": "",
"*cmd/compile/internal/gc.Node %#v": "",
"*cmd/compile/internal/gc.Node %+S": "",
"*cmd/compile/internal/gc.Node %+v": "",
"*cmd/compile/internal/gc.Node %L": "",
"*cmd/compile/internal/gc.Node %S": "",
"*cmd/compile/internal/gc.Node %j": "",
"*cmd/compile/internal/gc.Node %p": "",
"*cmd/compile/internal/gc.Node %v": "",
"*cmd/compile/internal/ssa.Block %s": "",
"*cmd/compile/internal/ssa.Block %v": "",
"*cmd/compile/internal/ssa.Func %s": "",
"*cmd/compile/internal/ssa.Func %v": "",
"*cmd/compile/internal/ssa.Register %s": "",
"*cmd/compile/internal/ssa.Register %v": "",
"*cmd/compile/internal/ssa.SparseTreeNode %v": "",
"*cmd/compile/internal/ssa.Value %s": "",
"*cmd/compile/internal/ssa.Value %v": "",
"*cmd/compile/internal/ssa.sparseTreeMapEntry %v": "",
"*cmd/compile/internal/types.Field %p": "",
"*cmd/compile/internal/types.Field %v": "",
"*cmd/compile/internal/types.Sym %0S": "",
"*cmd/compile/internal/types.Sym %S": "",
"*cmd/compile/internal/types.Sym %p": "",
"*cmd/compile/internal/types.Sym %v": "",
"*cmd/compile/internal/types.Type %#L": "",
"*cmd/compile/internal/types.Type %#v": "",
"*cmd/compile/internal/types.Type %+v": "",
"*cmd/compile/internal/types.Type %-S": "",
"*cmd/compile/internal/types.Type %0S": "",
"*cmd/compile/internal/types.Type %L": "",
"*cmd/compile/internal/types.Type %S": "",
"*cmd/compile/internal/types.Type %p": "",
"*cmd/compile/internal/types.Type %s": "",
"*cmd/compile/internal/types.Type %v": "",
"*cmd/internal/obj.Addr %v": "",
"*cmd/internal/obj.LSym %v": "",
"*math/big.Float %f": "",
"*math/big.Int %#x": "",
"*math/big.Int %s": "",
"*math/big.Int %v": "",
"[16]byte %x": "",
"[]*cmd/compile/internal/ssa.Block %v": "",
"[]*cmd/compile/internal/ssa.Value %v": "",
"[][]string %q": "",
"[]byte %s": "",
"[]byte %x": "",
"[]cmd/compile/internal/ssa.Edge %v": "",
"[]cmd/compile/internal/ssa.ID %v": "",
"[]cmd/compile/internal/ssa.posetNode %v": "",
"[]cmd/compile/internal/ssa.posetUndo %v": "",
"[]cmd/compile/internal/syntax.token %s": "",
"[]string %v": "",
"[]uint32 %v": "",
"bool %v": "",
"byte %08b": "",
"byte %c": "",
"byte %q": "",
"byte %v": "",
"cmd/compile/internal/arm.shift %d": "",
"cmd/compile/internal/gc.Class %d": "",
"cmd/compile/internal/gc.Class %s": "",
"cmd/compile/internal/gc.Class %v": "",
"cmd/compile/internal/gc.Ctype %d": "",
"cmd/compile/internal/gc.Ctype %v": "",
"cmd/compile/internal/gc.Nodes %#v": "",
"cmd/compile/internal/gc.Nodes %+v": "",
"cmd/compile/internal/gc.Nodes %.v": "",
"cmd/compile/internal/gc.Nodes %v": "",
"cmd/compile/internal/gc.Op %#v": "",
"cmd/compile/internal/gc.Op %v": "",
"cmd/compile/internal/gc.Val %#v": "",
"cmd/compile/internal/gc.Val %T": "",
"cmd/compile/internal/gc.Val %v": "",
"cmd/compile/internal/gc.fmtMode %d": "",
"cmd/compile/internal/gc.initKind %d": "",
"cmd/compile/internal/gc.itag %v": "",
"cmd/compile/internal/ssa.BranchPrediction %d": "",
"cmd/compile/internal/ssa.Edge %v": "",
"cmd/compile/internal/ssa.GCNode %v": "",
"cmd/compile/internal/ssa.ID %d": "",
"cmd/compile/internal/ssa.ID %v": "",
"cmd/compile/internal/ssa.LocalSlot %s": "",
"cmd/compile/internal/ssa.LocalSlot %v": "",
"cmd/compile/internal/ssa.Location %s": "",
"cmd/compile/internal/ssa.Op %s": "",
"cmd/compile/internal/ssa.Op %v": "",
"cmd/compile/internal/ssa.Sym %v": "",
"cmd/compile/internal/ssa.ValAndOff %s": "",
"cmd/compile/internal/ssa.domain %v": "",
"cmd/compile/internal/ssa.flagConstant %s": "",
"cmd/compile/internal/ssa.posetNode %v": "",
"cmd/compile/internal/ssa.posetTestOp %v": "",
"cmd/compile/internal/ssa.rbrank %d": "",
"cmd/compile/internal/ssa.regMask %d": "",
"cmd/compile/internal/ssa.register %d": "",
"cmd/compile/internal/ssa.relation %s": "",
"cmd/compile/internal/syntax.Error %q": "",
"cmd/compile/internal/syntax.Expr %#v": "",
"cmd/compile/internal/syntax.LitKind %d": "",
"cmd/compile/internal/syntax.Node %T": "",
"cmd/compile/internal/syntax.Operator %s": "",
"cmd/compile/internal/syntax.Pos %s": "",
"cmd/compile/internal/syntax.Pos %v": "",
"cmd/compile/internal/syntax.position %s": "",
"cmd/compile/internal/syntax.token %q": "",
"cmd/compile/internal/syntax.token %s": "",
"cmd/compile/internal/types.EType %d": "",
"cmd/compile/internal/types.EType %s": "",
"cmd/compile/internal/types.EType %v": "",
"cmd/internal/obj.ABI %v": "",
"error %v": "",
"float64 %.2f": "",
"float64 %.3f": "",
"float64 %.6g": "",
"float64 %g": "",
"int %#x": "",
"int %-12d": "",
"int %-6d": "",
"int %-8o": "",
"int %02d": "",
"int %6d": "",
"int %c": "",
"int %d": "",
"int %v": "",
"int %x": "",
"int16 %d": "",
"int16 %x": "",
"int32 %#x": "",
"int32 %d": "",
"int32 %v": "",
"int32 %x": "",
"int64 %#x": "",
"int64 %+d": "",
"int64 %-10d": "",
"int64 %.5d": "",
"int64 %d": "",
"int64 %v": "",
"int64 %x": "",
"int8 %d": "",
"int8 %v": "",
"int8 %x": "",
"interface{} %#v": "",
"interface{} %T": "",
"interface{} %p": "",
"interface{} %q": "",
"interface{} %s": "",
"interface{} %v": "",
"map[*cmd/compile/internal/gc.Node]*cmd/compile/internal/ssa.Value %v": "",
"map[*cmd/compile/internal/gc.Node][]*cmd/compile/internal/gc.Node %v": "",
"map[cmd/compile/internal/ssa.ID]uint32 %v": "",
"map[int64]uint32 %v": "",
"math/big.Accuracy %s": "",
"reflect.Type %s": "",
"rune %#U": "",
"rune %c": "",
"rune %q": "",
"string %-*s": "",
"string %-16s": "",
"string %-6s": "",
"string %q": "",
"string %s": "",
"string %v": "",
"time.Duration %d": "",
"time.Duration %v": "",
"uint %04x": "",
"uint %5d": "",
"uint %d": "",
"uint %x": "",
"uint16 %d": "",
"uint16 %x": "",
"uint32 %#U": "",
"uint32 %#x": "",
"uint32 %d": "",
"uint32 %v": "",
"uint32 %x": "",
"uint64 %08x": "",
"uint64 %b": "",
"uint64 %d": "",
"uint64 %x": "",
"uint8 %#x": "",
"uint8 %d": "",
"uint8 %v": "",
"uint8 %x": "",
"uintptr %d": "",
}

View File

@@ -1,634 +0,0 @@
# Go internal ABI specification
This document describes Gos internal application binary interface
(ABI), known as ABIInternal.
Go's ABI defines the layout of data in memory and the conventions for
calling between Go functions.
This ABI is *unstable* and will change between Go versions.
If youre writing assembly code, please instead refer to Gos
[assembly documentation](/doc/asm.html), which describes Gos stable
ABI, known as ABI0.
All functions defined in Go source follow ABIInternal.
However, ABIInternal and ABI0 functions are able to call each other
through transparent *ABI wrappers*, described in the [internal calling
convention proposal](https://golang.org/design/27539-internal-abi).
Go uses a common ABI design across all architectures.
We first describe the common ABI, and then cover per-architecture
specifics.
*Rationale*: For the reasoning behind using a common ABI across
architectures instead of the platform ABI, see the [register-based Go
calling convention proposal](https://golang.org/design/40724-register-calling).
## Memory layout
Go's built-in types have the following sizes and alignments.
Many, though not all, of these sizes are guaranteed by the [language
specification](/doc/go_spec.html#Size_and_alignment_guarantees).
Those that aren't guaranteed may change in future versions of Go (for
example, we've considered changing the alignment of int64 on 32-bit).
| Type | 64-bit | | 32-bit | |
| --- | --- | --- | --- | --- |
| | Size | Align | Size | Align |
| bool, uint8, int8 | 1 | 1 | 1 | 1 |
| uint16, int16 | 2 | 2 | 2 | 2 |
| uint32, int32 | 4 | 4 | 4 | 4 |
| uint64, int64 | 8 | 8 | 8 | 4 |
| int, uint | 8 | 8 | 4 | 4 |
| float32 | 4 | 4 | 4 | 4 |
| float64 | 8 | 8 | 8 | 4 |
| complex64 | 8 | 4 | 8 | 4 |
| complex128 | 16 | 8 | 16 | 4 |
| uintptr, *T, unsafe.Pointer | 8 | 8 | 4 | 4 |
The types `byte` and `rune` are aliases for `uint8` and `int32`,
respectively, and hence have the same size and alignment as these
types.
The layout of `map`, `chan`, and `func` types is equivalent to *T.
To describe the layout of the remaining composite types, we first
define the layout of a *sequence* S of N fields with types
t<sub>1</sub>, t<sub>2</sub>, ..., t<sub>N</sub>.
We define the byte offset at which each field begins relative to a
base address of 0, as well as the size and alignment of the sequence
as follows:
```
offset(S, i) = 0 if i = 1
= align(offset(S, i-1) + sizeof(t_(i-1)), alignof(t_i))
alignof(S) = 1 if N = 0
= max(alignof(t_i) | 1 <= i <= N)
sizeof(S) = 0 if N = 0
= align(offset(S, N) + sizeof(t_N), alignof(S))
```
Where sizeof(T) and alignof(T) are the size and alignment of type T,
respectively, and align(x, y) rounds x up to a multiple of y.
The `interface{}` type is a sequence of 1. a pointer to the runtime type
description for the interface's dynamic type and 2. an `unsafe.Pointer`
data field.
Any other interface type (besides the empty interface) is a sequence
of 1. a pointer to the runtime "itab" that gives the method pointers and
the type of the data field and 2. an `unsafe.Pointer` data field.
An interface can be "direct" or "indirect" depending on the dynamic
type: a direct interface stores the value directly in the data field,
and an indirect interface stores a pointer to the value in the data
field.
An interface can only be direct if the value consists of a single
pointer word.
An array type `[N]T` is a sequence of N fields of type T.
The slice type `[]T` is a sequence of a `*[cap]T` pointer to the slice
backing store, an `int` giving the `len` of the slice, and an `int`
giving the `cap` of the slice.
The `string` type is a sequence of a `*[len]byte` pointer to the
string backing store, and an `int` giving the `len` of the string.
A struct type `struct { f1 t1; ...; fM tM }` is laid out as the
sequence t1, ..., tM, tP, where tP is either:
- Type `byte` if sizeof(tM) = 0 and any of sizeof(t*i*) ≠ 0.
- Empty (size 0 and align 1) otherwise.
The padding byte prevents creating a past-the-end pointer by taking
the address of the final, empty fN field.
Note that user-written assembly code should generally not depend on Go
type layout and should instead use the constants defined in
[`go_asm.h`](/doc/asm.html#data-offsets).
## Function call argument and result passing
Function calls pass arguments and results using a combination of the
stack and machine registers.
Each argument or result is passed either entirely in registers or
entirely on the stack.
Because access to registers is generally faster than access to the
stack, arguments and results are preferentially passed in registers.
However, any argument or result that contains a non-trivial array or
does not fit entirely in the remaining available registers is passed
on the stack.
Each architecture defines a sequence of integer registers and a
sequence of floating-point registers.
At a high level, arguments and results are recursively broken down
into values of base types and these base values are assigned to
registers from these sequences.
Arguments and results can share the same registers, but do not share
the same stack space.
Beyond the arguments and results passed on the stack, the caller also
reserves spill space on the stack for all register-based arguments
(but does not populate this space).
The receiver, arguments, and results of function or method F are
assigned to registers or the stack using the following algorithm:
1. Let NI and NFP be the length of integer and floating-point register
sequences defined by the architecture.
Let I and FP be 0; these are the indexes of the next integer and
floating-pointer register.
Let S, the type sequence defining the stack frame, be empty.
1. If F is a method, assign Fs receiver.
1. For each argument A of F, assign A.
1. Add a pointer-alignment field to S. This has size 0 and the same
alignment as `uintptr`.
1. Reset I and FP to 0.
1. For each result R of F, assign R.
1. Add a pointer-alignment field to S.
1. For each register-assigned receiver and argument of F, let T be its
type and add T to the stack sequence S.
This is the argument's (or receiver's) spill space and will be
uninitialized at the call.
1. Add a pointer-alignment field to S.
Assigning a receiver, argument, or result V of underlying type T works
as follows:
1. Remember I and FP.
1. If T has zero size, add T to the stack sequence S and return.
1. Try to register-assign V.
1. If step 2 failed, reset I and FP to the values from step 1, add T
to the stack sequence S, and assign V to this field in S.
Register-assignment of a value V of underlying type T works as follows:
1. If T is a boolean or integral type that fits in an integer
register, assign V to register I and increment I.
1. If T is an integral type that fits in two integer registers, assign
the least significant and most significant halves of V to registers
I and I+1, respectively, and increment I by 2
1. If T is a floating-point type and can be represented without loss
of precision in a floating-point register, assign V to register FP
and increment FP.
1. If T is a complex type, recursively register-assign its real and
imaginary parts.
1. If T is a pointer type, map type, channel type, or function type,
assign V to register I and increment I.
1. If T is a string type, interface type, or slice type, recursively
register-assign Vs components (2 for strings and interfaces, 3 for
slices).
1. If T is a struct type, recursively register-assign each field of V.
1. If T is an array type of length 0, do nothing.
1. If T is an array type of length 1, recursively register-assign its
one element.
1. If T is an array type of length > 1, fail.
1. If I > NI or FP > NFP, fail.
1. If any recursive assignment above fails, fail.
The above algorithm produces an assignment of each receiver, argument,
and result to registers or to a field in the stack sequence.
The final stack sequence looks like: stack-assigned receiver,
stack-assigned arguments, pointer-alignment, stack-assigned results,
pointer-alignment, spill space for each register-assigned argument,
pointer-alignment.
The following diagram shows what this stack frame looks like on the
stack, using the typical convention where address 0 is at the bottom:
+------------------------------+
| . . . |
| 2nd reg argument spill space |
| 1st reg argument spill space |
| <pointer-sized alignment> |
| . . . |
| 2nd stack-assigned result |
| 1st stack-assigned result |
| <pointer-sized alignment> |
| . . . |
| 2nd stack-assigned argument |
| 1st stack-assigned argument |
| stack-assigned receiver |
+------------------------------+ ↓ lower addresses
To perform a call, the caller reserves space starting at the lowest
address in its stack frame for the call stack frame, stores arguments
in the registers and argument stack fields determined by the above
algorithm, and performs the call.
At the time of a call, spill space, result stack fields, and result
registers are left uninitialized.
Upon return, the callee must have stored results to all result
registers and result stack fields determined by the above algorithm.
There are no callee-save registers, so a call may overwrite any
register that doesnt have a fixed meaning, including argument
registers.
### Example
Consider the function `func f(a1 uint8, a2 [2]uintptr, a3 uint8) (r1
struct { x uintptr; y [2]uintptr }, r2 string)` on a 64-bit
architecture with hypothetical integer registers R0R9.
On entry, `a1` is assigned to `R0`, `a3` is assigned to `R1` and the
stack frame is laid out in the following sequence:
a2 [2]uintptr
r1.x uintptr
r1.y [2]uintptr
a1Spill uint8
a2Spill uint8
_ [6]uint8 // alignment padding
In the stack frame, only the `a2` field is initialized on entry; the
rest of the frame is left uninitialized.
On exit, `r2.base` is assigned to `R0`, `r2.len` is assigned to `R1`,
and `r1.x` and `r1.y` are initialized in the stack frame.
There are several things to note in this example.
First, `a2` and `r1` are stack-assigned because they contain arrays.
The other arguments and results are register-assigned.
Result `r2` is decomposed into its components, which are individually
register-assigned.
On the stack, the stack-assigned arguments appear at lower addresses
than the stack-assigned results, which appear at lower addresses than
the argument spill area.
Only arguments, not results, are assigned a spill area on the stack.
### Rationale
Each base value is assigned to its own register to optimize
construction and access.
An alternative would be to pack multiple sub-word values into
registers, or to simply map an argument's in-memory layout to
registers (this is common in C ABIs), but this typically adds cost to
pack and unpack these values.
Modern architectures have more than enough registers to pass all
arguments and results this way for nearly all functions (see the
appendix), so theres little downside to spreading base values across
registers.
Arguments that cant be fully assigned to registers are passed
entirely on the stack in case the callee takes the address of that
argument.
If an argument could be split across the stack and registers and the
callee took its address, it would need to be reconstructed in memory,
a process that would be proportional to the size of the argument.
Non-trivial arrays are always passed on the stack because indexing
into an array typically requires a computed offset, which generally
isnt possible with registers.
Arrays in general are rare in function signatures (only 0.7% of
functions in the Go 1.15 standard library and 0.2% in kubelet).
We considered allowing array fields to be passed on the stack while
the rest of an arguments fields are passed in registers, but this
creates the same problems as other large structs if the callee takes
the address of an argument, and would benefit <0.1% of functions in
kubelet (and even these very little).
We make exceptions for 0 and 1-element arrays because these dont
require computed offsets, and 1-element arrays are already decomposed
in the compilers SSA representation.
The ABI assignment algorithm above is equivalent to Gos stack-based
ABI0 calling convention if there are zero architecture registers.
This is intended to ease the transition to the register-based internal
ABI and make it easy for the compiler to generate either calling
convention.
An architecture may still define register meanings that arent
compatible with ABI0, but these differences should be easy to account
for in the compiler.
The assignment algorithm assigns zero-sized values to the stack
(assignment step 2) in order to support ABI0-equivalence.
While these values take no space themselves, they do result in
alignment padding on the stack in ABI0.
Without this step, the internal ABI would register-assign zero-sized
values even on architectures that provide no argument registers
because they don't consume any registers, and hence not add alignment
padding to the stack.
The algorithm reserves spill space for arguments in the callers frame
so that the compiler can generate a stack growth path that spills into
this reserved space.
If the callee has to grow the stack, it may not be able to reserve
enough additional stack space in its own frame to spill these, which
is why its important that the caller do so.
These slots also act as the home location if these arguments need to
be spilled for any other reason, which simplifies traceback printing.
There are several options for how to lay out the argument spill space.
We chose to lay out each argument according to its type's usual memory
layout but to separate the spill space from the regular argument
space.
Using the usual memory layout simplifies the compiler because it
already understands this layout.
Also, if a function takes the address of a register-assigned argument,
the compiler must spill that argument to memory in its usual memory
layout and it's more convenient to use the argument spill space for
this purpose.
Alternatively, the spill space could be structured around argument
registers.
In this approach, the stack growth spill path would spill each
argument register to a register-sized stack word.
However, if the function takes the address of a register-assigned
argument, the compiler would have to reconstruct it in memory layout
elsewhere on the stack.
The spill space could also be interleaved with the stack-assigned
arguments so the arguments appear in order whether they are register-
or stack-assigned.
This would be close to ABI0, except that register-assigned arguments
would be uninitialized on the stack and there's no need to reserve
stack space for register-assigned results.
We expect separating the spill space to perform better because of
memory locality.
Separating the space is also potentially simpler for `reflect` calls
because this allows `reflect` to summarize the spill space as a single
number.
Finally, the long-term intent is to remove reserved spill slots
entirely allowing most functions to be called without any stack
setup and easing the introduction of callee-save registers and
separating the spill space makes that transition easier.
## Closures
A func value (e.g., `var x func()`) is a pointer to a closure object.
A closure object begins with a pointer-sized program counter
representing the entry point of the function, followed by zero or more
bytes containing the closed-over environment.
Closure calls follow the same conventions as static function and
method calls, with one addition. Each architecture specifies a
*closure context pointer* register and calls to closures store the
address of the closure object in the closure context pointer register
prior to the call.
## Software floating-point mode
In "softfloat" mode, the ABI simply treats the hardware as having zero
floating-point registers.
As a result, any arguments containing floating-point values will be
passed on the stack.
*Rationale*: Softfloat mode is about compatibility over performance
and is not commonly used.
Hence, we keep the ABI as simple as possible in this case, rather than
adding additional rules for passing floating-point values in integer
registers.
## Architecture specifics
This section describes per-architecture register mappings, as well as
other per-architecture special cases.
### amd64 architecture
The amd64 architecture uses the following sequence of 9 registers for
integer arguments and results:
RAX, RBX, RCX, RDI, RSI, R8, R9, R10, R11
It uses X0 X14 for floating-point arguments and results.
*Rationale*: These sequences are chosen from the available registers
to be relatively easy to remember.
Registers R12 and R13 are permanent scratch registers.
R15 is a scratch register except in dynamically linked binaries.
*Rationale*: Some operations such as stack growth and reflection calls
need dedicated scratch registers in order to manipulate call frames
without corrupting arguments or results.
Special-purpose registers are as follows:
| Register | Call meaning | Body meaning |
| --- | --- | --- |
| RSP | Stack pointer | Fixed |
| RBP | Frame pointer | Fixed |
| RDX | Closure context pointer | Scratch |
| R12 | None | Scratch |
| R13 | None | Scratch |
| R14 | Current goroutine | Scratch |
| R15 | GOT reference temporary | Fixed if dynlink |
| X15 | Zero value | Fixed |
*Rationale*: These register meanings are compatible with Gos
stack-based calling convention except for R14 and X15, which will have
to be restored on transitions from ABI0 code to ABIInternal code.
In ABI0, these are undefined, so transitions from ABIInternal to ABI0
can ignore these registers.
*Rationale*: For the current goroutine pointer, we chose a register
that requires an additional REX byte.
While this adds one byte to every function prologue, it is hardly ever
accessed outside the function prologue and we expect making more
single-byte registers available to be a net win.
*Rationale*: We designate X15 as a fixed zero register because
functions often have to bulk zero their stack frames, and this is more
efficient with a designated zero register.
#### Stack layout
The stack pointer, RSP, grows down and is always aligned to 8 bytes.
The amd64 architecture does not use a link register.
A function's stack frame is laid out as follows:
+------------------------------+
| return PC |
| RBP on entry |
| ... locals ... |
| ... outgoing arguments ... |
+------------------------------+ ↓ lower addresses
The "return PC" is pushed as part of the standard amd64 `CALL`
operation.
On entry, a function subtracts from RSP to open its stack frame and
saves the value of RBP directly below the return PC.
A leaf function that does not require any stack space may omit the
saved RBP.
The Go ABI's use of RBP as a frame pointer register is compatible with
amd64 platform conventions so that Go can inter-operate with platform
debuggers and profilers.
#### Flags
The direction flag (D) is always cleared (set to the “forward”
direction) at a call.
The arithmetic status flags are treated like scratch registers and not
preserved across calls.
All other bits in RFLAGS are system flags.
At function calls and returns, the CPU is in x87 mode (not MMX
technology mode).
*Rationale*: Go on amd64 does not use either the x87 registers or MMX
registers. Hence, we follow the SysV platform conventions in order to
simplify transitions to and from the C ABI.
At calls, the MXCSR control bits are always set as follows:
| Flag | Bit | Value | Meaning |
| --- | --- | --- | --- |
| FZ | 15 | 0 | Do not flush to zero |
| RC | 14/13 | 0 (RN) | Round to nearest |
| PM | 12 | 1 | Precision masked |
| UM | 11 | 1 | Underflow masked |
| OM | 10 | 1 | Overflow masked |
| ZM | 9 | 1 | Divide-by-zero masked |
| DM | 8 | 1 | Denormal operations masked |
| IM | 7 | 1 | Invalid operations masked |
| DAZ | 6 | 0 | Do not zero de-normals |
The MXCSR status bits are callee-save.
*Rationale*: Having a fixed MXCSR control configuration allows Go
functions to use SSE operations without modifying or saving the MXCSR.
Functions are allowed to modify it between calls (as long as they
restore it), but as of this writing Go code never does.
The above fixed configuration matches the process initialization
control bits specified by the ELF AMD64 ABI.
The x87 floating-point control word is not used by Go on amd64.
## Future directions
### Spill path improvements
The ABI currently reserves spill space for argument registers so the
compiler can statically generate an argument spill path before calling
into `runtime.morestack` to grow the stack.
This ensures there will be sufficient spill space even when the stack
is nearly exhausted and keeps stack growth and stack scanning
essentially unchanged from ABI0.
However, this wastes stack space (the median wastage is 16 bytes per
call), resulting in larger stacks and increased cache footprint.
A better approach would be to reserve stack space only when spilling.
One way to ensure enough space is available to spill would be for
every function to ensure there is enough space for the function's own
frame *as well as* the spill space of all functions it calls.
For most functions, this would change the threshold for the prologue
stack growth check.
For `nosplit` functions, this would change the threshold used in the
linker's static stack size check.
Allocating spill space in the callee rather than the caller may also
allow for faster reflection calls in the common case where a function
takes only register arguments, since it would allow reflection to make
these calls directly without allocating any frame.
The statically-generated spill path also increases code size.
It is possible to instead have a generic spill path in the runtime, as
part of `morestack`.
However, this complicates reserving the spill space, since spilling
all possible register arguments would, in most cases, take
significantly more space than spilling only those used by a particular
function.
Some options are to spill to a temporary space and copy back only the
registers used by the function, or to grow the stack if necessary
before spilling to it (using a temporary space if necessary), or to
use a heap-allocated space if insufficient stack space is available.
These options all add enough complexity that we will have to make this
decision based on the actual code size growth caused by the static
spill paths.
### Clobber sets
As defined, the ABI does not use callee-save registers.
This significantly simplifies the garbage collector and the compiler's
register allocator, but at some performance cost.
A potentially better balance for Go code would be to use *clobber
sets*: for each function, the compiler records the set of registers it
clobbers (including those clobbered by functions it calls) and any
register not clobbered by function F can remain live across calls to
F.
This is generally a good fit for Go because Go's package DAG allows
function metadata like the clobber set to flow up the call graph, even
across package boundaries.
Clobber sets would require relatively little change to the garbage
collector, unlike general callee-save registers.
One disadvantage of clobber sets over callee-save registers is that
they don't help with indirect function calls or interface method
calls, since static information isn't available in these cases.
### Large aggregates
Go encourages passing composite values by value, and this simplifies
reasoning about mutation and races.
However, this comes at a performance cost for large composite values.
It may be possible to instead transparently pass large composite
values by reference and delay copying until it is actually necessary.
## Appendix: Register usage analysis
In order to understand the impacts of the above design on register
usage, we
[analyzed](https://github.com/aclements/go-misc/tree/master/abi) the
impact of the above ABI on a large code base: cmd/kubelet from
[Kubernetes](https://github.com/kubernetes/kubernetes) at tag v1.18.8.
The following table shows the impact of different numbers of available
integer and floating-point registers on argument assignment:
```
| | | | stack args | spills | stack total |
| ints | floats | % fit | p50 | p95 | p99 | p50 | p95 | p99 | p50 | p95 | p99 |
| 0 | 0 | 6.3% | 32 | 152 | 256 | 0 | 0 | 0 | 32 | 152 | 256 |
| 0 | 8 | 6.4% | 32 | 152 | 256 | 0 | 0 | 0 | 32 | 152 | 256 |
| 1 | 8 | 21.3% | 24 | 144 | 248 | 8 | 8 | 8 | 32 | 152 | 256 |
| 2 | 8 | 38.9% | 16 | 128 | 224 | 8 | 16 | 16 | 24 | 136 | 240 |
| 3 | 8 | 57.0% | 0 | 120 | 224 | 16 | 24 | 24 | 24 | 136 | 240 |
| 4 | 8 | 73.0% | 0 | 120 | 216 | 16 | 32 | 32 | 24 | 136 | 232 |
| 5 | 8 | 83.3% | 0 | 112 | 216 | 16 | 40 | 40 | 24 | 136 | 232 |
| 6 | 8 | 87.5% | 0 | 112 | 208 | 16 | 48 | 48 | 24 | 136 | 232 |
| 7 | 8 | 89.8% | 0 | 112 | 208 | 16 | 48 | 56 | 24 | 136 | 232 |
| 8 | 8 | 91.3% | 0 | 112 | 200 | 16 | 56 | 64 | 24 | 136 | 232 |
| 9 | 8 | 92.1% | 0 | 112 | 192 | 16 | 56 | 72 | 24 | 136 | 232 |
| 10 | 8 | 92.6% | 0 | 104 | 192 | 16 | 56 | 72 | 24 | 136 | 232 |
| 11 | 8 | 93.1% | 0 | 104 | 184 | 16 | 56 | 80 | 24 | 128 | 232 |
| 12 | 8 | 93.4% | 0 | 104 | 176 | 16 | 56 | 88 | 24 | 128 | 232 |
| 13 | 8 | 94.0% | 0 | 88 | 176 | 16 | 56 | 96 | 24 | 128 | 232 |
| 14 | 8 | 94.4% | 0 | 80 | 152 | 16 | 64 | 104 | 24 | 128 | 232 |
| 15 | 8 | 94.6% | 0 | 80 | 152 | 16 | 64 | 112 | 24 | 128 | 232 |
| 16 | 8 | 94.9% | 0 | 16 | 152 | 16 | 64 | 112 | 24 | 128 | 232 |
| ∞ | 8 | 99.8% | 0 | 0 | 0 | 24 | 112 | 216 | 24 | 120 | 216 |
```
The first two columns show the number of available integer and
floating-point registers.
The first row shows the results for 0 integer and 0 floating-point
registers, which is equivalent to ABI0.
We found that any reasonable number of floating-point registers has
the same effect, so we fixed it at 8 for all other rows.
The “% fit” column gives the fraction of functions where all arguments
and results are register-assigned and no arguments are passed on the
stack.
The three “stack args” columns give the median, 95th and 99th
percentile number of bytes of stack arguments.
The “spills” columns likewise summarize the number of bytes in
on-stack spill space.
And “stack total” summarizes the sum of stack arguments and on-stack
spill slots.
Note that these are three different distributions; for example,
theres no single function that takes 0 stack argument bytes, 16 spill
bytes, and 24 total stack bytes.
From this, we can see that the fraction of functions that fit entirely
in registers grows very slowly once it reaches about 90%, though
curiously there is a small minority of functions that could benefit
from a huge number of registers.
Making 9 integer registers available on amd64 puts it in this realm.
We also see that the stack space required for most functions is fairly
small.
While the increasing space required for spills largely balances out
the decreasing space required for stack arguments as the number of
available registers increases, there is a general reduction in the
total stack space required with more available registers.
This does, however, suggest that eliminating spill slots in the future
would noticeably reduce stack requirements.

View File

@@ -1,612 +0,0 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package abi
import (
"cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
"sync"
)
//......................................................................
//
// Public/exported bits of the ABI utilities.
//
// ABIParamResultInfo stores the results of processing a given
// function type to compute stack layout and register assignments. For
// each input and output parameter we capture whether the param was
// register-assigned (and to which register(s)) or the stack offset
// for the param if is not going to be passed in registers according
// to the rules in the Go internal ABI specification (1.17).
type ABIParamResultInfo struct {
inparams []ABIParamAssignment // Includes receiver for method calls. Does NOT include hidden closure pointer.
outparams []ABIParamAssignment
offsetToSpillArea int64
spillAreaSize int64
inRegistersUsed int
outRegistersUsed int
config *ABIConfig // to enable String() method
}
func (a *ABIParamResultInfo) Config() *ABIConfig {
return a.config
}
func (a *ABIParamResultInfo) InParams() []ABIParamAssignment {
return a.inparams
}
func (a *ABIParamResultInfo) OutParams() []ABIParamAssignment {
return a.outparams
}
func (a *ABIParamResultInfo) InRegistersUsed() int {
return a.inRegistersUsed
}
func (a *ABIParamResultInfo) OutRegistersUsed() int {
return a.outRegistersUsed
}
func (a *ABIParamResultInfo) InParam(i int) *ABIParamAssignment {
return &a.inparams[i]
}
func (a *ABIParamResultInfo) OutParam(i int) *ABIParamAssignment {
return &a.outparams[i]
}
func (a *ABIParamResultInfo) SpillAreaOffset() int64 {
return a.offsetToSpillArea
}
func (a *ABIParamResultInfo) SpillAreaSize() int64 {
return a.spillAreaSize
}
// RegIndex stores the index into the set of machine registers used by
// the ABI on a specific architecture for parameter passing. RegIndex
// values 0 through N-1 (where N is the number of integer registers
// used for param passing according to the ABI rules) describe integer
// registers; values N through M (where M is the number of floating
// point registers used). Thus if the ABI says there are 5 integer
// registers and 7 floating point registers, then RegIndex value of 4
// indicates the 5th integer register, and a RegIndex value of 11
// indicates the 7th floating point register.
type RegIndex uint8
// ABIParamAssignment holds information about how a specific param or
// result will be passed: in registers (in which case 'Registers' is
// populated) or on the stack (in which case 'Offset' is set to a
// non-negative stack offset. The values in 'Registers' are indices
// (as described above), not architected registers.
type ABIParamAssignment struct {
Type *types.Type
Name types.Object // should always be *ir.Name, used to match with a particular ssa.OpArg.
Registers []RegIndex
offset int32
}
// Offset returns the stack offset for addressing the parameter that "a" describes.
// This will panic if "a" describes a register-allocated parameter.
func (a *ABIParamAssignment) Offset() int32 {
if len(a.Registers) > 0 {
panic("Register allocated parameters have no offset")
}
return a.offset
}
// SpillOffset returns the offset *within the spill area* for the parameter that "a" describes.
// Registers will be spilled here; if a memory home is needed (for a pointer method e.g.)
// then that will be the address.
// This will panic if "a" describes a stack-allocated parameter.
func (a *ABIParamAssignment) SpillOffset() int32 {
if len(a.Registers) == 0 {
panic("Stack-allocated parameters have no spill offset")
}
return a.offset
}
// FrameOffset returns the location that a value would spill to, if any exists.
// For register-allocated inputs, that is their spill offset reserved for morestack
// (might as well use it, it is there); for stack-allocated inputs and outputs,
// that is their location on the stack. For register-allocated outputs, there is
// no defined spill area, so return -1.
func (a *ABIParamAssignment) FrameOffset(i *ABIParamResultInfo) int64 {
if len(a.Registers) == 0 || a.offset == -1 {
return int64(a.offset)
}
return int64(a.offset) + i.SpillAreaOffset()
}
// RegAmounts holds a specified number of integer/float registers.
type RegAmounts struct {
intRegs int
floatRegs int
}
// ABIConfig captures the number of registers made available
// by the ABI rules for parameter passing and result returning.
type ABIConfig struct {
// Do we need anything more than this?
offsetForLocals int64 // e.g., obj.(*Link).FixedFrameSize() -- extra linkage information on some architectures.
regAmounts RegAmounts
regsForTypeCache map[*types.Type]int
}
// NewABIConfig returns a new ABI configuration for an architecture with
// iRegsCount integer/pointer registers and fRegsCount floating point registers.
func NewABIConfig(iRegsCount, fRegsCount int, offsetForLocals int64) *ABIConfig {
return &ABIConfig{offsetForLocals: offsetForLocals, regAmounts: RegAmounts{iRegsCount, fRegsCount}, regsForTypeCache: make(map[*types.Type]int)}
}
// Copy returns a copy of an ABIConfig for use in a function's compilation so that access to the cache does not need to be protected with a mutex.
func (a *ABIConfig) Copy() *ABIConfig {
b := *a
b.regsForTypeCache = make(map[*types.Type]int)
return &b
}
// LocalsOffset returns the architecture-dependent offset from SP for args and results.
// In theory this is only used for debugging; it ought to already be incorporated into
// results from the ABI-related methods
func (a *ABIConfig) LocalsOffset() int64 {
return a.offsetForLocals
}
// FloatIndexFor translates r into an index in the floating point parameter
// registers. If the result is negative, the input index was actually for the
// integer parameter registers.
func (a *ABIConfig) FloatIndexFor(r RegIndex) int64 {
return int64(r) - int64(a.regAmounts.intRegs)
}
// NumParamRegs returns the number of parameter registers used for a given type,
// without regard for the number available.
func (a *ABIConfig) NumParamRegs(t *types.Type) int {
var n int
if n, ok := a.regsForTypeCache[t]; ok {
return n
}
if t.IsScalar() || t.IsPtrShaped() {
if t.IsComplex() {
n = 2
} else {
n = (int(t.Size()) + types.RegSize - 1) / types.RegSize
}
} else {
typ := t.Kind()
switch typ {
case types.TARRAY:
n = a.NumParamRegs(t.Elem()) * int(t.NumElem())
case types.TSTRUCT:
for _, f := range t.FieldSlice() {
n += a.NumParamRegs(f.Type)
}
case types.TSLICE:
n = a.NumParamRegs(synthSlice)
case types.TSTRING:
n = a.NumParamRegs(synthString)
case types.TINTER:
n = a.NumParamRegs(synthIface)
}
}
a.regsForTypeCache[t] = n
return n
}
// preAllocateParams gets the slice sizes right for inputs and outputs.
func (a *ABIParamResultInfo) preAllocateParams(hasRcvr bool, nIns, nOuts int) {
if hasRcvr {
nIns++
}
a.inparams = make([]ABIParamAssignment, 0, nIns)
a.outparams = make([]ABIParamAssignment, 0, nOuts)
}
// ABIAnalyzeTypes takes an optional receiver type, arrays of ins and outs, and returns an ABIParamResultInfo,
// based on the given configuration. This is the same result computed by config.ABIAnalyze applied to the
// corresponding method/function type, except that all the embedded parameter names are nil.
// This is intended for use by ssagen/ssa.go:(*state).rtcall, for runtime functions that lack a parsed function type.
func (config *ABIConfig) ABIAnalyzeTypes(rcvr *types.Type, ins, outs []*types.Type) *ABIParamResultInfo {
setup()
s := assignState{
stackOffset: config.offsetForLocals,
rTotal: config.regAmounts,
}
result := &ABIParamResultInfo{config: config}
result.preAllocateParams(rcvr != nil, len(ins), len(outs))
// Receiver
if rcvr != nil {
result.inparams = append(result.inparams,
s.assignParamOrReturn(rcvr, nil, false))
}
// Inputs
for _, t := range ins {
result.inparams = append(result.inparams,
s.assignParamOrReturn(t, nil, false))
}
s.stackOffset = types.Rnd(s.stackOffset, int64(types.RegSize))
result.inRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs
// Outputs
s.rUsed = RegAmounts{}
for _, t := range outs {
result.outparams = append(result.outparams, s.assignParamOrReturn(t, nil, true))
}
// The spill area is at a register-aligned offset and its size is rounded up to a register alignment.
// TODO in theory could align offset only to minimum required by spilled data types.
result.offsetToSpillArea = alignTo(s.stackOffset, types.RegSize)
result.spillAreaSize = alignTo(s.spillOffset, types.RegSize)
result.outRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs
return result
}
// ABIAnalyzeFuncType takes a function type 'ft' and an ABI rules description
// 'config' and analyzes the function to determine how its parameters
// and results will be passed (in registers or on the stack), returning
// an ABIParamResultInfo object that holds the results of the analysis.
func (config *ABIConfig) ABIAnalyzeFuncType(ft *types.Func) *ABIParamResultInfo {
setup()
s := assignState{
stackOffset: config.offsetForLocals,
rTotal: config.regAmounts,
}
result := &ABIParamResultInfo{config: config}
result.preAllocateParams(ft.Receiver != nil, ft.Params.NumFields(), ft.Results.NumFields())
// Receiver
// TODO(register args) ? seems like "struct" and "fields" is not right anymore for describing function parameters
if ft.Receiver != nil && ft.Receiver.NumFields() != 0 {
r := ft.Receiver.FieldSlice()[0]
result.inparams = append(result.inparams,
s.assignParamOrReturn(r.Type, r.Nname, false))
}
// Inputs
ifsl := ft.Params.FieldSlice()
for _, f := range ifsl {
result.inparams = append(result.inparams,
s.assignParamOrReturn(f.Type, f.Nname, false))
}
s.stackOffset = types.Rnd(s.stackOffset, int64(types.RegSize))
result.inRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs
// Outputs
s.rUsed = RegAmounts{}
ofsl := ft.Results.FieldSlice()
for _, f := range ofsl {
result.outparams = append(result.outparams, s.assignParamOrReturn(f.Type, f.Nname, true))
}
// The spill area is at a register-aligned offset and its size is rounded up to a register alignment.
// TODO in theory could align offset only to minimum required by spilled data types.
result.offsetToSpillArea = alignTo(s.stackOffset, types.RegSize)
result.spillAreaSize = alignTo(s.spillOffset, types.RegSize)
result.outRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs
return result
}
// ABIAnalyze returns the same result as ABIAnalyzeFuncType, but also
// updates the offsets of all the receiver, input, and output fields.
func (config *ABIConfig) ABIAnalyze(t *types.Type) *ABIParamResultInfo {
ft := t.FuncType()
result := config.ABIAnalyzeFuncType(ft)
// Fill in the frame offsets for receiver, inputs, results
k := 0
if t.NumRecvs() != 0 {
config.updateOffset(result, ft.Receiver.FieldSlice()[0], result.inparams[0], false)
k++
}
for i, f := range ft.Params.FieldSlice() {
config.updateOffset(result, f, result.inparams[k+i], false)
}
for i, f := range ft.Results.FieldSlice() {
config.updateOffset(result, f, result.outparams[i], true)
}
return result
}
// parameterUpdateMu protects the Offset field of function/method parameters (a subset of structure Fields)
var parameterUpdateMu sync.Mutex
// FieldOffsetOf returns a concurency-safe version of f.Offset
func FieldOffsetOf(f *types.Field) int64 {
parameterUpdateMu.Lock()
defer parameterUpdateMu.Unlock()
return f.Offset
}
func (config *ABIConfig) updateOffset(result *ABIParamResultInfo, f *types.Field, a ABIParamAssignment, isReturn bool) {
// Everything except return values in registers has either a frame home (if not in a register) or a frame spill location.
if !isReturn || len(a.Registers) == 0 {
// The type frame offset DOES NOT show effects of minimum frame size.
// Getting this wrong breaks stackmaps, see liveness/plive.go:WriteFuncMap and typebits/typebits.go:Set
parameterUpdateMu.Lock()
defer parameterUpdateMu.Unlock()
off := a.FrameOffset(result) - config.LocalsOffset()
fOffset := f.Offset
if fOffset == types.BOGUS_FUNARG_OFFSET {
// Set the Offset the first time. After that, we may recompute it, but it should never change.
f.Offset = off
} else if fOffset != off {
panic(fmt.Errorf("Offset changed from %d to %d", fOffset, off))
}
}
}
//......................................................................
//
// Non-public portions.
// regString produces a human-readable version of a RegIndex.
func (c *RegAmounts) regString(r RegIndex) string {
if int(r) < c.intRegs {
return fmt.Sprintf("I%d", int(r))
} else if int(r) < c.intRegs+c.floatRegs {
return fmt.Sprintf("F%d", int(r)-c.intRegs)
}
return fmt.Sprintf("<?>%d", r)
}
// toString method renders an ABIParamAssignment in human-readable
// form, suitable for debugging or unit testing.
func (ri *ABIParamAssignment) toString(config *ABIConfig) string {
regs := "R{"
offname := "spilloffset" // offset is for spill for register(s)
if len(ri.Registers) == 0 {
offname = "offset" // offset is for memory arg
}
for _, r := range ri.Registers {
regs += " " + config.regAmounts.regString(r)
}
return fmt.Sprintf("%s } %s: %d typ: %v", regs, offname, ri.offset, ri.Type)
}
// toString method renders an ABIParamResultInfo in human-readable
// form, suitable for debugging or unit testing.
func (ri *ABIParamResultInfo) String() string {
res := ""
for k, p := range ri.inparams {
res += fmt.Sprintf("IN %d: %s\n", k, p.toString(ri.config))
}
for k, r := range ri.outparams {
res += fmt.Sprintf("OUT %d: %s\n", k, r.toString(ri.config))
}
res += fmt.Sprintf("offsetToSpillArea: %d spillAreaSize: %d",
ri.offsetToSpillArea, ri.spillAreaSize)
return res
}
// assignState holds intermediate state during the register assigning process
// for a given function signature.
type assignState struct {
rTotal RegAmounts // total reg amounts from ABI rules
rUsed RegAmounts // regs used by params completely assigned so far
pUsed RegAmounts // regs used by the current param (or pieces therein)
stackOffset int64 // current stack offset
spillOffset int64 // current spill offset
}
// align returns a rounded up to t's alignment
func align(a int64, t *types.Type) int64 {
return alignTo(a, int(t.Align))
}
// alignTo returns a rounded up to t, where t must be 0 or a power of 2.
func alignTo(a int64, t int) int64 {
if t == 0 {
return a
}
return types.Rnd(a, int64(t))
}
// stackSlot returns a stack offset for a param or result of the
// specified type.
func (state *assignState) stackSlot(t *types.Type) int64 {
rv := align(state.stackOffset, t)
state.stackOffset = rv + t.Width
return rv
}
// allocateRegs returns a set of register indices for a parameter or result
// that we've just determined to be register-assignable. The number of registers
// needed is assumed to be stored in state.pUsed.
func (state *assignState) allocateRegs() []RegIndex {
regs := []RegIndex{}
// integer
for r := state.rUsed.intRegs; r < state.rUsed.intRegs+state.pUsed.intRegs; r++ {
regs = append(regs, RegIndex(r))
}
state.rUsed.intRegs += state.pUsed.intRegs
// floating
for r := state.rUsed.floatRegs; r < state.rUsed.floatRegs+state.pUsed.floatRegs; r++ {
regs = append(regs, RegIndex(r+state.rTotal.intRegs))
}
state.rUsed.floatRegs += state.pUsed.floatRegs
return regs
}
// regAllocate creates a register ABIParamAssignment object for a param
// or result with the specified type, as a final step (this assumes
// that all of the safety/suitability analysis is complete).
func (state *assignState) regAllocate(t *types.Type, name types.Object, isReturn bool) ABIParamAssignment {
spillLoc := int64(-1)
if !isReturn {
// Spill for register-resident t must be aligned for storage of a t.
spillLoc = align(state.spillOffset, t)
state.spillOffset = spillLoc + t.Size()
}
return ABIParamAssignment{
Type: t,
Name: name,
Registers: state.allocateRegs(),
offset: int32(spillLoc),
}
}
// stackAllocate creates a stack memory ABIParamAssignment object for
// a param or result with the specified type, as a final step (this
// assumes that all of the safety/suitability analysis is complete).
func (state *assignState) stackAllocate(t *types.Type, name types.Object) ABIParamAssignment {
return ABIParamAssignment{
Type: t,
Name: name,
offset: int32(state.stackSlot(t)),
}
}
// intUsed returns the number of integer registers consumed
// at a given point within an assignment stage.
func (state *assignState) intUsed() int {
return state.rUsed.intRegs + state.pUsed.intRegs
}
// floatUsed returns the number of floating point registers consumed at
// a given point within an assignment stage.
func (state *assignState) floatUsed() int {
return state.rUsed.floatRegs + state.pUsed.floatRegs
}
// regassignIntegral examines a param/result of integral type 't' to
// determines whether it can be register-assigned. Returns TRUE if we
// can register allocate, FALSE otherwise (and updates state
// accordingly).
func (state *assignState) regassignIntegral(t *types.Type) bool {
regsNeeded := int(types.Rnd(t.Width, int64(types.PtrSize)) / int64(types.PtrSize))
if t.IsComplex() {
regsNeeded = 2
}
// Floating point and complex.
if t.IsFloat() || t.IsComplex() {
if regsNeeded+state.floatUsed() > state.rTotal.floatRegs {
// not enough regs
return false
}
state.pUsed.floatRegs += regsNeeded
return true
}
// Non-floating point
if regsNeeded+state.intUsed() > state.rTotal.intRegs {
// not enough regs
return false
}
state.pUsed.intRegs += regsNeeded
return true
}
// regassignArray processes an array type (or array component within some
// other enclosing type) to determine if it can be register assigned.
// Returns TRUE if we can register allocate, FALSE otherwise.
func (state *assignState) regassignArray(t *types.Type) bool {
nel := t.NumElem()
if nel == 0 {
return true
}
if nel > 1 {
// Not an array of length 1: stack assign
return false
}
// Visit element
return state.regassign(t.Elem())
}
// regassignStruct processes a struct type (or struct component within
// some other enclosing type) to determine if it can be register
// assigned. Returns TRUE if we can register allocate, FALSE otherwise.
func (state *assignState) regassignStruct(t *types.Type) bool {
for _, field := range t.FieldSlice() {
if !state.regassign(field.Type) {
return false
}
}
return true
}
// synthOnce ensures that we only create the synth* fake types once.
var synthOnce sync.Once
// synthSlice, synthString, and syncIface are synthesized struct types
// meant to capture the underlying implementations of string/slice/interface.
var synthSlice *types.Type
var synthString *types.Type
var synthIface *types.Type
// setup performs setup for the register assignment utilities, manufacturing
// a small set of synthesized types that we'll need along the way.
func setup() {
synthOnce.Do(func() {
fname := types.BuiltinPkg.Lookup
nxp := src.NoXPos
unsp := types.Types[types.TUNSAFEPTR]
ui := types.Types[types.TUINTPTR]
synthSlice = types.NewStruct(types.NoPkg, []*types.Field{
types.NewField(nxp, fname("ptr"), unsp),
types.NewField(nxp, fname("len"), ui),
types.NewField(nxp, fname("cap"), ui),
})
synthString = types.NewStruct(types.NoPkg, []*types.Field{
types.NewField(nxp, fname("data"), unsp),
types.NewField(nxp, fname("len"), ui),
})
synthIface = types.NewStruct(types.NoPkg, []*types.Field{
types.NewField(nxp, fname("f1"), unsp),
types.NewField(nxp, fname("f2"), unsp),
})
})
}
// regassign examines a given param type (or component within some
// composite) to determine if it can be register assigned. Returns
// TRUE if we can register allocate, FALSE otherwise.
func (state *assignState) regassign(pt *types.Type) bool {
typ := pt.Kind()
if pt.IsScalar() || pt.IsPtrShaped() {
return state.regassignIntegral(pt)
}
switch typ {
case types.TARRAY:
return state.regassignArray(pt)
case types.TSTRUCT:
return state.regassignStruct(pt)
case types.TSLICE:
return state.regassignStruct(synthSlice)
case types.TSTRING:
return state.regassignStruct(synthString)
case types.TINTER:
return state.regassignStruct(synthIface)
default:
panic("not expected")
}
}
// assignParamOrReturn processes a given receiver, param, or result
// of field f to determine whether it can be register assigned.
// The result of the analysis is recorded in the result
// ABIParamResultInfo held in 'state'.
func (state *assignState) assignParamOrReturn(pt *types.Type, n types.Object, isReturn bool) ABIParamAssignment {
state.pUsed = RegAmounts{}
if pt.Width == types.BADWIDTH {
panic("should never happen")
} else if pt.Width == 0 {
return state.stackAllocate(pt, n)
} else if state.regassign(pt) {
return state.regAllocate(pt, n, isReturn)
} else {
return state.stackAllocate(pt, n)
}
}

View File

@@ -5,13 +5,13 @@
package amd64
import (
"cmd/compile/internal/ssagen"
"cmd/compile/internal/gc"
"cmd/internal/obj/x86"
)
var leaptr = x86.ALEAQ
func Init(arch *ssagen.ArchInfo) {
func Init(arch *gc.Arch) {
arch.LinkArch = &x86.Linkamd64
arch.REGSP = x86.REGSP
arch.MAXWIDTH = 1 << 50

View File

@@ -5,10 +5,7 @@
package amd64
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/types"
"cmd/compile/internal/gc"
"cmd/internal/obj"
"cmd/internal/obj/x86"
"cmd/internal/objabi"
@@ -22,8 +19,8 @@ var isPlan9 = objabi.GOOS == "plan9"
const (
dzBlocks = 16 // number of MOV/ADD blocks
dzBlockLen = 4 // number of clears per block
dzBlockSize = 23 // size of instructions in a single block
dzMovSize = 5 // size of single MOV instruction w/ offset
dzBlockSize = 19 // size of instructions in a single block
dzMovSize = 4 // size of single MOV instruction w/ offset
dzLeaqSize = 4 // size of single LEAQ instruction
dzClearStep = 16 // number of bytes cleared by each MOV instruction
@@ -54,77 +51,77 @@ func dzDI(b int64) int64 {
return -dzClearStep * (dzBlockLen - tailSteps)
}
func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
const (
ax = 1 << iota // if AX is already zeroed.
x15 // if X15 is already zeroed. Note: in new ABI, X15 is always zero.
ax = 1 << iota
x0
)
if cnt == 0 {
return p
}
if cnt%int64(types.RegSize) != 0 {
if cnt%int64(gc.Widthreg) != 0 {
// should only happen with nacl
if cnt%int64(types.PtrSize) != 0 {
base.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
if cnt%int64(gc.Widthptr) != 0 {
gc.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
}
if *state&ax == 0 {
p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
*state |= ax
}
p = pp.Append(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
off += int64(types.PtrSize)
cnt -= int64(types.PtrSize)
p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
off += int64(gc.Widthptr)
cnt -= int64(gc.Widthptr)
}
if cnt == 8 {
if *state&ax == 0 {
p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
*state |= ax
}
p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
} else if !isPlan9 && cnt <= int64(8*types.RegSize) {
if objabi.Regabi_enabled == 0 && *state&x15 == 0 {
p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_REG, x86.REG_X15, 0)
*state |= x15
p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
} else if !isPlan9 && cnt <= int64(8*gc.Widthreg) {
if *state&x0 == 0 {
p = pp.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
*state |= x0
}
for i := int64(0); i < cnt/16; i++ {
p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16)
p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16)
}
if cnt%16 != 0 {
p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16))
p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16))
}
} else if !isPlan9 && (cnt <= int64(128*types.RegSize)) {
if objabi.Regabi_enabled == 0 && *state&x15 == 0 {
p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_REG, x86.REG_X15, 0)
*state |= x15
} else if !isPlan9 && (cnt <= int64(128*gc.Widthreg)) {
if *state&x0 == 0 {
p = pp.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
*state |= x0
}
p = pp.Append(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0)
p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt))
p.To.Sym = ir.Syms.Duffzero
p = pp.Appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0)
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt))
p.To.Sym = gc.Duffzero
if cnt%16 != 0 {
p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8))
p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8))
}
} else {
if *state&ax == 0 {
p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
*state |= ax
}
p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(types.RegSize), obj.TYPE_REG, x86.REG_CX, 0)
p = pp.Append(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
p = pp.Append(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
p = pp.Append(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, x86.REG_CX, 0)
p = pp.Appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
p = pp.Appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
p = pp.Appendpp(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
}
return p
}
func ginsnop(pp *objw.Progs) *obj.Prog {
func ginsnop(pp *gc.Progs) *obj.Prog {
// This is a hardware nop (1-byte 0x90) instruction,
// even though we describe it as an explicit XCHGL here.
// Particularly, this does not zero the high 32 bits

View File

@@ -8,19 +8,16 @@ import (
"fmt"
"math"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/gc"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/x86"
"cmd/internal/objabi"
)
// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
flive := b.FlagsLiveAtEnd
for _, c := range b.ControlValues() {
flive = c.Type.IsFlags() || flive
@@ -113,7 +110,7 @@ func moveByType(t *types.Type) obj.As {
// dest := dest(To) op src(From)
// and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc).
func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog {
func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
p := s.Prog(op)
p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG
@@ -167,41 +164,16 @@ func duff(size int64) (int64, int64) {
return off, adj
}
func getgFromTLS(s *ssagen.State, r int16) {
// See the comments in cmd/internal/obj/x86/obj6.go
// near CanUse1InsnTLS for a detailed explanation of these instructions.
if x86.CanUse1InsnTLS(base.Ctxt) {
// MOVQ (TLS), r
p := s.Prog(x86.AMOVQ)
p.From.Type = obj.TYPE_MEM
p.From.Reg = x86.REG_TLS
p.To.Type = obj.TYPE_REG
p.To.Reg = r
} else {
// MOVQ TLS, r
// MOVQ (r)(TLS*1), r
p := s.Prog(x86.AMOVQ)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_TLS
p.To.Type = obj.TYPE_REG
p.To.Reg = r
q := s.Prog(x86.AMOVQ)
q.From.Type = obj.TYPE_MEM
q.From.Reg = r
q.From.Index = x86.REG_TLS
q.From.Scale = 1
q.To.Type = obj.TYPE_REG
q.To.Reg = r
}
}
func ssaGenValue(s *ssagen.State, v *ssa.Value) {
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
switch v.Op {
case ssa.OpAMD64VFMADD231SD:
p := s.Prog(v.Op.Asm())
p.From = obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[2].Reg()}
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()}
p.SetFrom3Reg(v.Args[1].Reg())
p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[1].Reg()})
if v.Reg() != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
case ssa.OpAMD64ADDQ, ssa.OpAMD64ADDL:
r := v.Reg()
r1 := v.Args[0].Reg()
@@ -251,7 +223,11 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
ssa.OpAMD64BTSL, ssa.OpAMD64BTSQ,
ssa.OpAMD64BTCL, ssa.OpAMD64BTCQ,
ssa.OpAMD64BTRL, ssa.OpAMD64BTRQ:
opregreg(s, v.Op.Asm(), v.Reg(), v.Args[1].Reg())
r := v.Reg()
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
opregreg(s, v.Op.Asm(), r, v.Args[1].Reg())
case ssa.OpAMD64DIVQU, ssa.OpAMD64DIVLU, ssa.OpAMD64DIVWU:
// Arg[0] (the dividend) is in AX.
@@ -394,16 +370,20 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
// compute (x+y)/2 unsigned.
// Do a 64-bit add, the overflow goes into the carry.
// Shift right once and pull the carry back into the 63rd bit.
r := v.Reg()
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
p := s.Prog(x86.AADDQ)
p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
p.To.Reg = r
p.From.Reg = v.Args[1].Reg()
p = s.Prog(x86.ARCRQ)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 1
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
p.To.Reg = r
case ssa.OpAMD64ADDQcarry, ssa.OpAMD64ADCQ:
r := v.Reg0()
@@ -519,13 +499,21 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
ssa.OpAMD64CMOVQCS, ssa.OpAMD64CMOVLCS, ssa.OpAMD64CMOVWCS,
ssa.OpAMD64CMOVQGTF, ssa.OpAMD64CMOVLGTF, ssa.OpAMD64CMOVWGTF,
ssa.OpAMD64CMOVQGEF, ssa.OpAMD64CMOVLGEF, ssa.OpAMD64CMOVWGEF:
r := v.Reg()
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
p.To.Reg = r
case ssa.OpAMD64CMOVQNEF, ssa.OpAMD64CMOVLNEF, ssa.OpAMD64CMOVWNEF:
r := v.Reg()
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
// Flag condition: ^ZERO || PARITY
// Generate:
// CMOV*NE SRC,DST
@@ -534,7 +522,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
p.To.Reg = r
var q *obj.Prog
if v.Op == ssa.OpAMD64CMOVQNEF {
q = s.Prog(x86.ACMOVQPS)
@@ -546,9 +534,14 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
q.From.Type = obj.TYPE_REG
q.From.Reg = v.Args[1].Reg()
q.To.Type = obj.TYPE_REG
q.To.Reg = v.Reg()
q.To.Reg = r
case ssa.OpAMD64CMOVQEQF, ssa.OpAMD64CMOVLEQF, ssa.OpAMD64CMOVWEQF:
r := v.Reg()
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
// Flag condition: ZERO && !PARITY
// Generate:
// MOV SRC,AX
@@ -565,7 +558,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
}
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Reg()
p.From.Reg = r
p.To.Type = obj.TYPE_REG
p.To.Reg = x86.REG_AX
var q *obj.Prog
@@ -579,7 +572,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
q.From.Type = obj.TYPE_REG
q.From.Reg = x86.REG_AX
q.To.Type = obj.TYPE_REG
q.To.Reg = v.Reg()
q.To.Reg = r
case ssa.OpAMD64MULQconst, ssa.OpAMD64MULLconst:
r := v.Reg()
@@ -588,7 +581,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = r
p.SetFrom3Reg(v.Args[0].Reg())
p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[0].Reg()})
case ssa.OpAMD64SUBQconst, ssa.OpAMD64SUBLconst,
ssa.OpAMD64ANDQconst, ssa.OpAMD64ANDLconst,
@@ -598,11 +591,15 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
ssa.OpAMD64SHRQconst, ssa.OpAMD64SHRLconst, ssa.OpAMD64SHRWconst, ssa.OpAMD64SHRBconst,
ssa.OpAMD64SARQconst, ssa.OpAMD64SARLconst, ssa.OpAMD64SARWconst, ssa.OpAMD64SARBconst,
ssa.OpAMD64ROLQconst, ssa.OpAMD64ROLLconst, ssa.OpAMD64ROLWconst, ssa.OpAMD64ROLBconst:
r := v.Reg()
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
p.To.Reg = r
case ssa.OpAMD64SBBQcarrymask, ssa.OpAMD64SBBLcarrymask:
r := v.Reg()
p := s.Prog(v.Op.Asm())
@@ -633,12 +630,12 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = o
}
ssagen.AddAux(&p.From, v)
gc.AddAux(&p.From, v)
case ssa.OpAMD64LEAQ, ssa.OpAMD64LEAL, ssa.OpAMD64LEAW:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
ssagen.AddAux(&p.From, v)
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64CMPQ, ssa.OpAMD64CMPL, ssa.OpAMD64CMPW, ssa.OpAMD64CMPB,
@@ -674,7 +671,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
ssagen.AddAux(&p.From, v)
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[1].Reg()
case ssa.OpAMD64CMPQconstload, ssa.OpAMD64CMPLconstload, ssa.OpAMD64CMPWconstload, ssa.OpAMD64CMPBconstload:
@@ -682,20 +679,20 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
ssagen.AddAux2(&p.From, v, sc.Off())
gc.AddAux2(&p.From, v, sc.Off())
p.To.Type = obj.TYPE_CONST
p.To.Offset = sc.Val()
case ssa.OpAMD64CMPQloadidx8, ssa.OpAMD64CMPQloadidx1, ssa.OpAMD64CMPLloadidx4, ssa.OpAMD64CMPLloadidx1, ssa.OpAMD64CMPWloadidx2, ssa.OpAMD64CMPWloadidx1, ssa.OpAMD64CMPBloadidx1:
p := s.Prog(v.Op.Asm())
memIdx(&p.From, v)
ssagen.AddAux(&p.From, v)
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[2].Reg()
case ssa.OpAMD64CMPQconstloadidx8, ssa.OpAMD64CMPQconstloadidx1, ssa.OpAMD64CMPLconstloadidx4, ssa.OpAMD64CMPLconstloadidx1, ssa.OpAMD64CMPWconstloadidx2, ssa.OpAMD64CMPWconstloadidx1, ssa.OpAMD64CMPBconstloadidx1:
sc := v.AuxValAndOff()
p := s.Prog(v.Op.Asm())
memIdx(&p.From, v)
ssagen.AddAux2(&p.From, v, sc.Off())
gc.AddAux2(&p.From, v, sc.Off())
p.To.Type = obj.TYPE_CONST
p.To.Offset = sc.Val()
case ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst:
@@ -735,14 +732,14 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
ssagen.AddAux(&p.From, v)
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64MOVBloadidx1, ssa.OpAMD64MOVWloadidx1, ssa.OpAMD64MOVLloadidx1, ssa.OpAMD64MOVQloadidx1, ssa.OpAMD64MOVSSloadidx1, ssa.OpAMD64MOVSDloadidx1,
ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8, ssa.OpAMD64MOVLloadidx8, ssa.OpAMD64MOVLloadidx4, ssa.OpAMD64MOVSSloadidx4, ssa.OpAMD64MOVWloadidx2:
p := s.Prog(v.Op.Asm())
memIdx(&p.From, v)
ssagen.AddAux(&p.From, v)
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVSSstore, ssa.OpAMD64MOVSDstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore, ssa.OpAMD64MOVOstore,
@@ -754,7 +751,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
ssagen.AddAux(&p.To, v)
gc.AddAux(&p.To, v)
case ssa.OpAMD64MOVBstoreidx1, ssa.OpAMD64MOVWstoreidx1, ssa.OpAMD64MOVLstoreidx1, ssa.OpAMD64MOVQstoreidx1, ssa.OpAMD64MOVSSstoreidx1, ssa.OpAMD64MOVSDstoreidx1,
ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8, ssa.OpAMD64MOVLstoreidx8, ssa.OpAMD64MOVSSstoreidx4, ssa.OpAMD64MOVLstoreidx4, ssa.OpAMD64MOVWstoreidx2,
ssa.OpAMD64ADDLmodifyidx1, ssa.OpAMD64ADDLmodifyidx4, ssa.OpAMD64ADDLmodifyidx8, ssa.OpAMD64ADDQmodifyidx1, ssa.OpAMD64ADDQmodifyidx8,
@@ -766,7 +763,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
memIdx(&p.To, v)
ssagen.AddAux(&p.To, v)
gc.AddAux(&p.To, v)
case ssa.OpAMD64ADDQconstmodify, ssa.OpAMD64ADDLconstmodify:
sc := v.AuxValAndOff()
off := sc.Off()
@@ -789,7 +786,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p := s.Prog(asm)
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
ssagen.AddAux2(&p.To, v, off)
gc.AddAux2(&p.To, v, off)
break
}
fallthrough
@@ -804,7 +801,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.From.Offset = val
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
ssagen.AddAux2(&p.To, v, off)
gc.AddAux2(&p.To, v, off)
case ssa.OpAMD64MOVQstoreconst, ssa.OpAMD64MOVLstoreconst, ssa.OpAMD64MOVWstoreconst, ssa.OpAMD64MOVBstoreconst:
p := s.Prog(v.Op.Asm())
@@ -813,21 +810,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.From.Offset = sc.Val()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
ssagen.AddAux2(&p.To, v, sc.Off())
case ssa.OpAMD64MOVOstorezero:
if s.ABI != obj.ABIInternal {
v.Fatalf("MOVOstorezero can be only used in ABIInternal functions")
}
if !(objabi.Regabi_enabled == 1 && base.Flag.ABIWrap) {
// zeroing X15 manually if wrappers are not used
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
}
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_X15
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
ssagen.AddAux(&p.To, v)
gc.AddAux2(&p.To, v, sc.Off())
case ssa.OpAMD64MOVQstoreconstidx1, ssa.OpAMD64MOVQstoreconstidx8, ssa.OpAMD64MOVLstoreconstidx1, ssa.OpAMD64MOVLstoreconstidx4, ssa.OpAMD64MOVWstoreconstidx1, ssa.OpAMD64MOVWstoreconstidx2, ssa.OpAMD64MOVBstoreconstidx1,
ssa.OpAMD64ADDLconstmodifyidx1, ssa.OpAMD64ADDLconstmodifyidx4, ssa.OpAMD64ADDLconstmodifyidx8, ssa.OpAMD64ADDQconstmodifyidx1, ssa.OpAMD64ADDQconstmodifyidx8,
ssa.OpAMD64ANDLconstmodifyidx1, ssa.OpAMD64ANDLconstmodifyidx4, ssa.OpAMD64ANDLconstmodifyidx8, ssa.OpAMD64ANDQconstmodifyidx1, ssa.OpAMD64ANDQconstmodifyidx8,
@@ -852,7 +835,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.From.Type = obj.TYPE_NONE
}
memIdx(&p.To, v)
ssagen.AddAux2(&p.To, v, sc.Off())
gc.AddAux2(&p.To, v, sc.Off())
case ssa.OpAMD64MOVLQSX, ssa.OpAMD64MOVWQSX, ssa.OpAMD64MOVBQSX, ssa.OpAMD64MOVLQZX, ssa.OpAMD64MOVWQZX, ssa.OpAMD64MOVBQZX,
ssa.OpAMD64CVTTSS2SL, ssa.OpAMD64CVTTSD2SL, ssa.OpAMD64CVTTSS2SQ, ssa.OpAMD64CVTTSD2SQ,
ssa.OpAMD64CVTSS2SD, ssa.OpAMD64CVTSD2SS:
@@ -882,9 +865,12 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[1].Reg()
ssagen.AddAux(&p.From, v)
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
if v.Reg() != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
case ssa.OpAMD64ADDLloadidx1, ssa.OpAMD64ADDLloadidx4, ssa.OpAMD64ADDLloadidx8, ssa.OpAMD64ADDQloadidx1, ssa.OpAMD64ADDQloadidx8,
ssa.OpAMD64SUBLloadidx1, ssa.OpAMD64SUBLloadidx4, ssa.OpAMD64SUBLloadidx8, ssa.OpAMD64SUBQloadidx1, ssa.OpAMD64SUBQloadidx8,
ssa.OpAMD64ANDLloadidx1, ssa.OpAMD64ANDLloadidx4, ssa.OpAMD64ANDLloadidx8, ssa.OpAMD64ANDQloadidx1, ssa.OpAMD64ANDQloadidx8,
@@ -905,17 +891,13 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.From.Reg = r
p.From.Index = i
ssagen.AddAux(&p.From, v)
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
if v.Reg() != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
case ssa.OpAMD64DUFFZERO:
if s.ABI != obj.ABIInternal {
v.Fatalf("MOVOconst can be only used in ABIInternal functions")
}
if !(objabi.Regabi_enabled == 1 && base.Flag.ABIWrap) {
// zeroing X15 manually if wrappers are not used
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
}
off := duffStart(v.AuxInt)
adj := duffAdj(v.AuxInt)
var p *obj.Prog
@@ -929,12 +911,18 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
}
p = s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = ir.Syms.Duffzero
p.To.Sym = gc.Duffzero
p.To.Offset = off
case ssa.OpAMD64MOVOconst:
if v.AuxInt != 0 {
v.Fatalf("MOVOconst can only do constant=0")
}
r := v.Reg()
opregreg(s, x86.AXORPS, r, r)
case ssa.OpAMD64DUFFCOPY:
p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = ir.Syms.Duffcopy
p.To.Sym = gc.Duffcopy
if v.AuxInt%16 != 0 {
v.Fatalf("bad DUFFCOPY AuxInt %v", v.AuxInt)
}
@@ -961,7 +949,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
return
}
p := s.Prog(loadByType(v.Type))
ssagen.AddrAuto(&p.From, v.Args[0])
gc.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
@@ -973,37 +961,44 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
ssagen.AddrAuto(&p.To, v)
gc.AddrAuto(&p.To, v)
case ssa.OpAMD64LoweredHasCPUFeature:
p := s.Prog(x86.AMOVBQZX)
p.From.Type = obj.TYPE_MEM
ssagen.AddAux(&p.From, v)
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64LoweredGetClosurePtr:
// Closure pointer is DX.
ssagen.CheckLoweredGetClosurePtr(v)
gc.CheckLoweredGetClosurePtr(v)
case ssa.OpAMD64LoweredGetG:
if objabi.Regabi_enabled == 1 && base.Flag.ABIWrap {
v.Fatalf("LoweredGetG should not appear in new ABI")
}
r := v.Reg()
getgFromTLS(s, r)
case ssa.OpAMD64CALLstatic:
if objabi.Regabi_enabled == 1 && s.ABI == obj.ABI0 && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABIInternal {
// zeroing X15 when entering ABIInternal from ABI0
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
// set G register from TLS
getgFromTLS(s, x86.REG_R14)
// See the comments in cmd/internal/obj/x86/obj6.go
// near CanUse1InsnTLS for a detailed explanation of these instructions.
if x86.CanUse1InsnTLS(gc.Ctxt) {
// MOVQ (TLS), r
p := s.Prog(x86.AMOVQ)
p.From.Type = obj.TYPE_MEM
p.From.Reg = x86.REG_TLS
p.To.Type = obj.TYPE_REG
p.To.Reg = r
} else {
// MOVQ TLS, r
// MOVQ (r)(TLS*1), r
p := s.Prog(x86.AMOVQ)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_TLS
p.To.Type = obj.TYPE_REG
p.To.Reg = r
q := s.Prog(x86.AMOVQ)
q.From.Type = obj.TYPE_MEM
q.From.Reg = r
q.From.Index = x86.REG_TLS
q.From.Scale = 1
q.To.Type = obj.TYPE_REG
q.To.Reg = r
}
s.Call(v)
if objabi.Regabi_enabled == 1 && s.ABI == obj.ABIInternal && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABI0 {
// zeroing X15 when entering ABIInternal from ABI0
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
// set G register from TLS
getgFromTLS(s, x86.REG_R14)
}
case ssa.OpAMD64CALLclosure, ssa.OpAMD64CALLinter:
case ssa.OpAMD64CALLstatic, ssa.OpAMD64CALLclosure, ssa.OpAMD64CALLinter:
s.Call(v)
case ssa.OpAMD64LoweredGetCallerPC:
@@ -1017,12 +1012,12 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
case ssa.OpAMD64LoweredGetCallerSP:
// caller's SP is the address of the first arg
mov := x86.AMOVQ
if types.PtrSize == 4 {
if gc.Widthptr == 4 {
mov = x86.AMOVL
}
p := s.Prog(mov)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = -base.Ctxt.FixedFrameSize() // 0 on amd64, just to be consistent with other architectures
p.From.Offset = -gc.Ctxt.FixedFrameSize() // 0 on amd64, just to be consistent with other architectures
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
@@ -1032,28 +1027,36 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
// arg0 is in DI. Set sym to match where regalloc put arg1.
p.To.Sym = ssagen.GCWriteBarrierReg[v.Args[1].Reg()]
p.To.Sym = gc.GCWriteBarrierReg[v.Args[1].Reg()]
case ssa.OpAMD64LoweredPanicBoundsA, ssa.OpAMD64LoweredPanicBoundsB, ssa.OpAMD64LoweredPanicBoundsC:
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
s.UseArgs(int64(2 * types.PtrSize)) // space used in callee args area by assembly stubs
p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
s.UseArgs(int64(2 * gc.Widthptr)) // space used in callee args area by assembly stubs
case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL,
ssa.OpAMD64BSWAPQ, ssa.OpAMD64BSWAPL,
ssa.OpAMD64NOTQ, ssa.OpAMD64NOTL:
r := v.Reg()
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
p.To.Reg = r
case ssa.OpAMD64NEGLflags:
r := v.Reg0()
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
p.To.Reg = r
case ssa.OpAMD64BSFQ, ssa.OpAMD64BSRQ, ssa.OpAMD64BSFL, ssa.OpAMD64BSRL, ssa.OpAMD64SQRTSD, ssa.OpAMD64SQRTSS:
case ssa.OpAMD64BSFQ, ssa.OpAMD64BSRQ, ssa.OpAMD64BSFL, ssa.OpAMD64BSRL, ssa.OpAMD64SQRTSD:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
@@ -1061,7 +1064,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
switch v.Op {
case ssa.OpAMD64BSFQ, ssa.OpAMD64BSRQ:
p.To.Reg = v.Reg0()
case ssa.OpAMD64BSFL, ssa.OpAMD64BSRL, ssa.OpAMD64SQRTSD, ssa.OpAMD64SQRTSS:
case ssa.OpAMD64BSFL, ssa.OpAMD64BSRL, ssa.OpAMD64SQRTSD:
p.To.Reg = v.Reg()
}
case ssa.OpAMD64ROUNDSD:
@@ -1073,7 +1076,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
}
p.From.Offset = val
p.From.Type = obj.TYPE_CONST
p.SetFrom3Reg(v.Args[0].Reg())
p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[0].Reg()})
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64POPCNTQ, ssa.OpAMD64POPCNTL:
@@ -1112,7 +1115,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
ssagen.AddAux(&p.To, v)
gc.AddAux(&p.To, v)
case ssa.OpAMD64SETNEF:
p := s.Prog(v.Op.Asm())
@@ -1161,31 +1164,39 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
base.WarnfAt(v.Pos, "generated nil check")
if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
gc.Warnl(v.Pos, "generated nil check")
}
case ssa.OpAMD64MOVBatomicload, ssa.OpAMD64MOVLatomicload, ssa.OpAMD64MOVQatomicload:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
ssagen.AddAux(&p.From, v)
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
case ssa.OpAMD64XCHGB, ssa.OpAMD64XCHGL, ssa.OpAMD64XCHGQ:
r := v.Reg0()
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output[0] not in same register %s", v.LongString())
}
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Reg0()
p.From.Reg = r
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[1].Reg()
ssagen.AddAux(&p.To, v)
gc.AddAux(&p.To, v)
case ssa.OpAMD64XADDLlock, ssa.OpAMD64XADDQlock:
r := v.Reg0()
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output[0] not in same register %s", v.LongString())
}
s.Prog(x86.ALOCK)
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Reg0()
p.From.Reg = r
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[1].Reg()
ssagen.AddAux(&p.To, v)
gc.AddAux(&p.To, v)
case ssa.OpAMD64CMPXCHGLlock, ssa.OpAMD64CMPXCHGQlock:
if v.Args[1].Reg() != x86.REG_AX {
v.Fatalf("input[1] not in AX %s", v.LongString())
@@ -1196,7 +1207,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
ssagen.AddAux(&p.To, v)
gc.AddAux(&p.To, v)
p = s.Prog(x86.ASETEQ)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
@@ -1207,20 +1218,20 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
ssagen.AddAux(&p.To, v)
gc.AddAux(&p.To, v)
case ssa.OpClobber:
p := s.Prog(x86.AMOVL)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0xdeaddead
p.To.Type = obj.TYPE_MEM
p.To.Reg = x86.REG_SP
ssagen.AddAux(&p.To, v)
gc.AddAux(&p.To, v)
p = s.Prog(x86.AMOVL)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0xdeaddead
p.To.Type = obj.TYPE_MEM
p.To.Reg = x86.REG_SP
ssagen.AddAux(&p.To, v)
gc.AddAux(&p.To, v)
p.To.Offset += 4
default:
v.Fatalf("genValue not implemented: %s", v.LongString())
@@ -1246,22 +1257,22 @@ var blockJump = [...]struct {
ssa.BlockAMD64NAN: {x86.AJPS, x86.AJPC},
}
var eqfJumps = [2][2]ssagen.IndexJump{
var eqfJumps = [2][2]gc.IndexJump{
{{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPS, Index: 1}}, // next == b.Succs[0]
{{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPC, Index: 0}}, // next == b.Succs[1]
}
var nefJumps = [2][2]ssagen.IndexJump{
var nefJumps = [2][2]gc.IndexJump{
{{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPC, Index: 1}}, // next == b.Succs[0]
{{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPS, Index: 0}}, // next == b.Succs[1]
}
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockDefer:
// defer returns in rax:
@@ -1274,22 +1285,16 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
p.To.Reg = x86.REG_AX
p = s.Prog(x86.AJNE)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
case ssa.BlockRet:
s.Prog(obj.ARET)
case ssa.BlockRetJmp:
if objabi.Regabi_enabled == 1 && s.ABI == obj.ABI0 && b.Aux.(*obj.LSym).ABI() == obj.ABIInternal {
// zeroing X15 when entering ABIInternal from ABI0
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
// set G register from TLS
getgFromTLS(s, x86.REG_R14)
}
p := s.Prog(obj.ARET)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN

View File

@@ -5,13 +5,13 @@
package arm
import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/internal/obj/arm"
"cmd/internal/objabi"
)
func Init(arch *ssagen.ArchInfo) {
func Init(arch *gc.Arch) {
arch.LinkArch = &arm.Linkarm
arch.REGSP = arm.REGSP
arch.MAXWIDTH = (1 << 32) - 1
@@ -20,7 +20,7 @@ func Init(arch *ssagen.ArchInfo) {
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock
}

View File

@@ -5,51 +5,49 @@
package arm
import (
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/types"
"cmd/compile/internal/gc"
"cmd/internal/obj"
"cmd/internal/obj/arm"
)
func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, r0 *uint32) *obj.Prog {
func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, r0 *uint32) *obj.Prog {
if cnt == 0 {
return p
}
if *r0 == 0 {
p = pp.Append(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0)
p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0)
*r0 = 1
}
if cnt < int64(4*types.PtrSize) {
for i := int64(0); i < cnt; i += int64(types.PtrSize) {
p = pp.Append(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, 4+off+i)
if cnt < int64(4*gc.Widthptr) {
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, 4+off+i)
}
} else if cnt <= int64(128*types.PtrSize) {
p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
} else if cnt <= int64(128*gc.Widthptr) {
p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
p.Reg = arm.REGSP
p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN
p.To.Sym = ir.Syms.Duffzero
p.To.Offset = 4 * (128 - cnt/int64(types.PtrSize))
p.To.Sym = gc.Duffzero
p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
} else {
p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
p.Reg = arm.REGSP
p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, arm.REG_R2, 0)
p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, arm.REG_R2, 0)
p.Reg = arm.REG_R1
p = pp.Append(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
p1 := p
p.Scond |= arm.C_PBIT
p = pp.Append(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
p = pp.Appendpp(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
p.Reg = arm.REG_R2
p = pp.Append(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
p.To.SetTarget(p1)
p = pp.Appendpp(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
gc.Patch(p, p1)
}
return p
}
func ginsnop(pp *objw.Progs) *obj.Prog {
func ginsnop(pp *gc.Progs) *obj.Prog {
p := pp.Prog(arm.AAND)
p.From.Type = obj.TYPE_REG
p.From.Reg = arm.REG_R0

View File

@@ -9,11 +9,9 @@ import (
"math"
"math/bits"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/gc"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/arm"
@@ -93,7 +91,7 @@ func makeshift(reg int16, typ int64, s int64) shift {
}
// genshift generates a Prog for r = r0 op (r1 shifted by n)
func genshift(s *ssagen.State, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
func genshift(s *gc.SSAGenState, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
p := s.Prog(as)
p.From.Type = obj.TYPE_SHIFT
p.From.Offset = int64(makeshift(r1, typ, n))
@@ -111,7 +109,7 @@ func makeregshift(r1 int16, typ int64, r2 int16) shift {
}
// genregshift generates a Prog for r = r0 op (r1 shifted by r2)
func genregshift(s *ssagen.State, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
func genregshift(s *gc.SSAGenState, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
p := s.Prog(as)
p.From.Type = obj.TYPE_SHIFT
p.From.Offset = int64(makeregshift(r1, typ, r2))
@@ -145,7 +143,7 @@ func getBFC(v uint32) (uint32, uint32) {
return 0xffffffff, 0
}
func ssaGenValue(s *ssagen.State, v *ssa.Value) {
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
switch v.Op {
case ssa.OpCopy, ssa.OpARMMOVWreg:
if v.Type.IsMemory() {
@@ -173,6 +171,9 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = y
case ssa.OpARMMOVWnop:
if v.Reg() != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
// nothing to do
case ssa.OpLoadReg:
if v.Type.IsFlags() {
@@ -180,7 +181,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
return
}
p := s.Prog(loadByType(v.Type))
ssagen.AddrAuto(&p.From, v.Args[0])
gc.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpStoreReg:
@@ -191,7 +192,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
ssagen.AddrAuto(&p.To, v)
gc.AddrAuto(&p.To, v)
case ssa.OpARMADD,
ssa.OpARMADC,
ssa.OpARMSUB,
@@ -279,7 +280,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt >> 8
p.SetFrom3Const(v.AuxInt & 0xff)
p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt & 0xff})
p.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
@@ -299,7 +300,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p := s.Prog(arm.ABFC)
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(width)
p.SetFrom3Const(int64(lsb))
p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: int64(lsb)})
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
break
@@ -542,10 +543,10 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
v.Fatalf("aux is of unknown type %T", v.Aux)
case *obj.LSym:
wantreg = "SB"
ssagen.AddAux(&p.From, v)
case *ir.Name:
gc.AddAux(&p.From, v)
case *gc.Node:
wantreg = "SP"
ssagen.AddAux(&p.From, v)
gc.AddAux(&p.From, v)
case nil:
// No sym, just MOVW $off(SP), R
wantreg = "SP"
@@ -565,7 +566,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
ssagen.AddAux(&p.From, v)
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpARMMOVBstore,
@@ -578,7 +579,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
ssagen.AddAux(&p.To, v)
gc.AddAux(&p.To, v)
case ssa.OpARMMOVWloadidx, ssa.OpARMMOVBUloadidx, ssa.OpARMMOVBloadidx, ssa.OpARMMOVHUloadidx, ssa.OpARMMOVHloadidx:
// this is just shift 0 bits
fallthrough
@@ -654,7 +655,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
ssa.OpARMREV,
ssa.OpARMREV16,
ssa.OpARMRBIT,
ssa.OpARMSQRTF,
ssa.OpARMSQRTD,
ssa.OpARMNEGF,
ssa.OpARMNEGD,
@@ -700,7 +700,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = ir.Syms.Udiv
p.To.Sym = gc.Udiv
case ssa.OpARMLoweredWB:
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
@@ -710,39 +710,39 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
s.UseArgs(8) // space used in callee args area by assembly stubs
case ssa.OpARMLoweredPanicExtendA, ssa.OpARMLoweredPanicExtendB, ssa.OpARMLoweredPanicExtendC:
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = ssagen.ExtendCheckFunc[v.AuxInt]
p.To.Sym = gc.ExtendCheckFunc[v.AuxInt]
s.UseArgs(12) // space used in callee args area by assembly stubs
case ssa.OpARMDUFFZERO:
p := s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = ir.Syms.Duffzero
p.To.Sym = gc.Duffzero
p.To.Offset = v.AuxInt
case ssa.OpARMDUFFCOPY:
p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = ir.Syms.Duffcopy
p.To.Sym = gc.Duffcopy
p.To.Offset = v.AuxInt
case ssa.OpARMLoweredNilCheck:
// Issue a load which will fault if arg is nil.
p := s.Prog(arm.AMOVB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
ssagen.AddAux(&p.From, v)
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = arm.REGTMP
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
base.WarnfAt(v.Pos, "generated nil check")
if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
gc.Warnl(v.Pos, "generated nil check")
}
case ssa.OpARMLoweredZero:
// MOVW.P Rarg2, 4(R1)
@@ -777,7 +777,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p2.Reg = arm.REG_R1
p3 := s.Prog(arm.ABLE)
p3.To.Type = obj.TYPE_BRANCH
p3.To.SetTarget(p)
gc.Patch(p3, p)
case ssa.OpARMLoweredMove:
// MOVW.P 4(R1), Rtmp
// MOVW.P Rtmp, 4(R2)
@@ -818,7 +818,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p3.Reg = arm.REG_R1
p4 := s.Prog(arm.ABLE)
p4.To.Type = obj.TYPE_BRANCH
p4.To.SetTarget(p)
gc.Patch(p4, p)
case ssa.OpARMEqual,
ssa.OpARMNotEqual,
ssa.OpARMLessThan,
@@ -844,12 +844,12 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Reg = v.Reg()
case ssa.OpARMLoweredGetClosurePtr:
// Closure pointer is R7 (arm.REGCTXT).
ssagen.CheckLoweredGetClosurePtr(v)
gc.CheckLoweredGetClosurePtr(v)
case ssa.OpARMLoweredGetCallerSP:
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(arm.AMOVW)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = -base.Ctxt.FixedFrameSize()
p.From.Offset = -gc.Ctxt.FixedFrameSize()
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
@@ -899,24 +899,24 @@ var blockJump = map[ssa.BlockKind]struct {
}
// To model a 'LEnoov' ('<=' without overflow checking) branching
var leJumps = [2][2]ssagen.IndexJump{
var leJumps = [2][2]gc.IndexJump{
{{Jump: arm.ABEQ, Index: 0}, {Jump: arm.ABPL, Index: 1}}, // next == b.Succs[0]
{{Jump: arm.ABMI, Index: 0}, {Jump: arm.ABEQ, Index: 0}}, // next == b.Succs[1]
}
// To model a 'GTnoov' ('>' without overflow checking) branching
var gtJumps = [2][2]ssagen.IndexJump{
var gtJumps = [2][2]gc.IndexJump{
{{Jump: arm.ABMI, Index: 1}, {Jump: arm.ABEQ, Index: 1}}, // next == b.Succs[0]
{{Jump: arm.ABEQ, Index: 1}, {Jump: arm.ABPL, Index: 0}}, // next == b.Succs[1]
}
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockDefer:
@@ -929,11 +929,11 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
p.Reg = arm.REG_R0
p = s.Prog(arm.ABNE)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:

View File

@@ -5,12 +5,12 @@
package arm64
import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/internal/obj/arm64"
)
func Init(arch *ssagen.ArchInfo) {
func Init(arch *gc.Arch) {
arch.LinkArch = &arm64.Linkarm64
arch.REGSP = arm64.REGSP
arch.MAXWIDTH = 1 << 50
@@ -20,7 +20,7 @@ func Init(arch *ssagen.ArchInfo) {
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock
}

View File

@@ -5,9 +5,7 @@
package arm64
import (
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/types"
"cmd/compile/internal/gc"
"cmd/internal/obj"
"cmd/internal/obj/arm64"
"cmd/internal/objabi"
@@ -24,52 +22,52 @@ func padframe(frame int64) int64 {
return frame
}
func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
if cnt == 0 {
return p
}
if cnt < int64(4*types.PtrSize) {
for i := int64(0); i < cnt; i += int64(types.PtrSize) {
p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off+i)
if cnt < int64(4*gc.Widthptr) {
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off+i)
}
} else if cnt <= int64(128*types.PtrSize) && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend
if cnt%(2*int64(types.PtrSize)) != 0 {
p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off)
off += int64(types.PtrSize)
cnt -= int64(types.PtrSize)
} else if cnt <= int64(128*gc.Widthptr) && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend
if cnt%(2*int64(gc.Widthptr)) != 0 {
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off)
off += int64(gc.Widthptr)
cnt -= int64(gc.Widthptr)
}
p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REG_R20, 0)
p = pp.Append(p, arm64.AADD, obj.TYPE_CONST, 0, 8+off, obj.TYPE_REG, arm64.REG_R20, 0)
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REG_R20, 0)
p = pp.Appendpp(p, arm64.AADD, obj.TYPE_CONST, 0, 8+off, obj.TYPE_REG, arm64.REG_R20, 0)
p.Reg = arm64.REG_R20
p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN
p.To.Sym = ir.Syms.Duffzero
p.To.Offset = 4 * (64 - cnt/(2*int64(types.PtrSize)))
p.To.Sym = gc.Duffzero
p.To.Offset = 4 * (64 - cnt/(2*int64(gc.Widthptr)))
} else {
// Not using REGTMP, so this is async preemptible (async preemption clobbers REGTMP).
// We are at the function entry, where no register is live, so it is okay to clobber
// other registers
const rtmp = arm64.REG_R20
p = pp.Append(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, rtmp, 0)
p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
p = pp.Append(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT1, 0)
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, rtmp, 0)
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
p = pp.Appendpp(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT1, 0)
p.Reg = arm64.REGRT1
p = pp.Append(p, arm64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, rtmp, 0)
p = pp.Append(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT2, 0)
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, rtmp, 0)
p = pp.Appendpp(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT2, 0)
p.Reg = arm64.REGRT1
p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(types.PtrSize))
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(gc.Widthptr))
p.Scond = arm64.C_XPRE
p1 := p
p = pp.Append(p, arm64.ACMP, obj.TYPE_REG, arm64.REGRT1, 0, obj.TYPE_NONE, 0, 0)
p = pp.Appendpp(p, arm64.ACMP, obj.TYPE_REG, arm64.REGRT1, 0, obj.TYPE_NONE, 0, 0)
p.Reg = arm64.REGRT2
p = pp.Append(p, arm64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
p.To.SetTarget(p1)
p = pp.Appendpp(p, arm64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
gc.Patch(p, p1)
}
return p
}
func ginsnop(pp *objw.Progs) *obj.Prog {
func ginsnop(pp *gc.Progs) *obj.Prog {
p := pp.Prog(arm64.AHINT)
p.From.Type = obj.TYPE_CONST
return p

View File

@@ -7,11 +7,9 @@ package arm64
import (
"math"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/gc"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/arm64"
@@ -83,7 +81,7 @@ func makeshift(reg int16, typ int64, s int64) int64 {
}
// genshift generates a Prog for r = r0 op (r1 shifted by n)
func genshift(s *ssagen.State, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
func genshift(s *gc.SSAGenState, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
p := s.Prog(as)
p.From.Type = obj.TYPE_SHIFT
p.From.Offset = makeshift(r1, typ, n)
@@ -100,11 +98,9 @@ func genIndexedOperand(v *ssa.Value) obj.Addr {
// Reg: base register, Index: (shifted) index register
mop := obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[0].Reg()}
switch v.Op {
case ssa.OpARM64MOVDloadidx8, ssa.OpARM64MOVDstoreidx8, ssa.OpARM64MOVDstorezeroidx8,
ssa.OpARM64FMOVDloadidx8, ssa.OpARM64FMOVDstoreidx8:
case ssa.OpARM64MOVDloadidx8, ssa.OpARM64MOVDstoreidx8, ssa.OpARM64MOVDstorezeroidx8:
mop.Index = arm64.REG_LSL | 3<<5 | v.Args[1].Reg()&31
case ssa.OpARM64MOVWloadidx4, ssa.OpARM64MOVWUloadidx4, ssa.OpARM64MOVWstoreidx4, ssa.OpARM64MOVWstorezeroidx4,
ssa.OpARM64FMOVSloadidx4, ssa.OpARM64FMOVSstoreidx4:
case ssa.OpARM64MOVWloadidx4, ssa.OpARM64MOVWUloadidx4, ssa.OpARM64MOVWstoreidx4, ssa.OpARM64MOVWstorezeroidx4:
mop.Index = arm64.REG_LSL | 2<<5 | v.Args[1].Reg()&31
case ssa.OpARM64MOVHloadidx2, ssa.OpARM64MOVHUloadidx2, ssa.OpARM64MOVHstoreidx2, ssa.OpARM64MOVHstorezeroidx2:
mop.Index = arm64.REG_LSL | 1<<5 | v.Args[1].Reg()&31
@@ -114,7 +110,7 @@ func genIndexedOperand(v *ssa.Value) obj.Addr {
return mop
}
func ssaGenValue(s *ssagen.State, v *ssa.Value) {
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
switch v.Op {
case ssa.OpCopy, ssa.OpARM64MOVDreg:
if v.Type.IsMemory() {
@@ -142,6 +138,9 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = y
case ssa.OpARM64MOVDnop:
if v.Reg() != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
// nothing to do
case ssa.OpLoadReg:
if v.Type.IsFlags() {
@@ -149,7 +148,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
return
}
p := s.Prog(loadByType(v.Type))
ssagen.AddrAuto(&p.From, v.Args[0])
gc.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpStoreReg:
@@ -160,7 +159,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
ssagen.AddrAuto(&p.To, v)
gc.AddrAuto(&p.To, v)
case ssa.OpARM64ADD,
ssa.OpARM64SUB,
ssa.OpARM64AND,
@@ -229,7 +228,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.Reg = ra
p.From.Type = obj.TYPE_REG
p.From.Reg = rm
p.SetFrom3Reg(rn)
p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: rn})
p.To.Type = obj.TYPE_REG
p.To.Reg = rt
case ssa.OpARM64ADDconst,
@@ -292,7 +291,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.SetFrom3Reg(v.Args[0].Reg())
p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[0].Reg()})
p.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
@@ -394,10 +393,10 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
v.Fatalf("aux is of unknown type %T", v.Aux)
case *obj.LSym:
wantreg = "SB"
ssagen.AddAux(&p.From, v)
case *ir.Name:
gc.AddAux(&p.From, v)
case *gc.Node:
wantreg = "SP"
ssagen.AddAux(&p.From, v)
gc.AddAux(&p.From, v)
case nil:
// No sym, just MOVD $off(SP), R
wantreg = "SP"
@@ -418,7 +417,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
ssagen.AddAux(&p.From, v)
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpARM64MOVBloadidx,
@@ -434,9 +433,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
ssa.OpARM64MOVHUloadidx2,
ssa.OpARM64MOVWloadidx4,
ssa.OpARM64MOVWUloadidx4,
ssa.OpARM64MOVDloadidx8,
ssa.OpARM64FMOVDloadidx8,
ssa.OpARM64FMOVSloadidx4:
ssa.OpARM64MOVDloadidx8:
p := s.Prog(v.Op.Asm())
p.From = genIndexedOperand(v)
p.To.Type = obj.TYPE_REG
@@ -447,7 +444,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
ssagen.AddAux(&p.From, v)
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
case ssa.OpARM64MOVBstore,
@@ -464,7 +461,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
ssagen.AddAux(&p.To, v)
gc.AddAux(&p.To, v)
case ssa.OpARM64MOVBstoreidx,
ssa.OpARM64MOVHstoreidx,
ssa.OpARM64MOVWstoreidx,
@@ -473,9 +470,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
ssa.OpARM64FMOVDstoreidx,
ssa.OpARM64MOVHstoreidx2,
ssa.OpARM64MOVWstoreidx4,
ssa.OpARM64FMOVSstoreidx4,
ssa.OpARM64MOVDstoreidx8,
ssa.OpARM64FMOVDstoreidx8:
ssa.OpARM64MOVDstoreidx8:
p := s.Prog(v.Op.Asm())
p.To = genIndexedOperand(v)
p.From.Type = obj.TYPE_REG
@@ -487,7 +482,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.From.Offset = int64(v.Args[2].Reg())
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
ssagen.AddAux(&p.To, v)
gc.AddAux(&p.To, v)
case ssa.OpARM64MOVBstorezero,
ssa.OpARM64MOVHstorezero,
ssa.OpARM64MOVWstorezero,
@@ -497,7 +492,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.From.Reg = arm64.REGZERO
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
ssagen.AddAux(&p.To, v)
gc.AddAux(&p.To, v)
case ssa.OpARM64MOVBstorezeroidx,
ssa.OpARM64MOVHstorezeroidx,
ssa.OpARM64MOVWstorezeroidx,
@@ -516,16 +511,20 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.From.Offset = int64(arm64.REGZERO)
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
ssagen.AddAux(&p.To, v)
gc.AddAux(&p.To, v)
case ssa.OpARM64BFI,
ssa.OpARM64BFXIL:
r := v.Reg()
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt >> 8
p.SetFrom3Const(v.AuxInt & 0xff)
p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt & 0xff})
p.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
p.To.Reg = r
case ssa.OpARM64SBFIZ,
ssa.OpARM64SBFX,
ssa.OpARM64UBFIZ,
@@ -533,7 +532,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt >> 8
p.SetFrom3Const(v.AuxInt & 0xff)
p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt & 0xff})
p.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
@@ -581,7 +580,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p2.From.Type = obj.TYPE_REG
p2.From.Reg = arm64.REGTMP
p2.To.Type = obj.TYPE_BRANCH
p2.To.SetTarget(p)
gc.Patch(p2, p)
case ssa.OpARM64LoweredAtomicExchange64Variant,
ssa.OpARM64LoweredAtomicExchange32Variant:
swap := arm64.ASWPALD
@@ -635,7 +634,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p3.From.Type = obj.TYPE_REG
p3.From.Reg = arm64.REGTMP
p3.To.Type = obj.TYPE_BRANCH
p3.To.SetTarget(p)
gc.Patch(p3, p)
case ssa.OpARM64LoweredAtomicAdd64Variant,
ssa.OpARM64LoweredAtomicAdd32Variant:
// LDADDAL Rarg1, (Rarg0), Rout
@@ -699,13 +698,13 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p4.From.Type = obj.TYPE_REG
p4.From.Reg = arm64.REGTMP
p4.To.Type = obj.TYPE_BRANCH
p4.To.SetTarget(p)
gc.Patch(p4, p)
p5 := s.Prog(arm64.ACSET)
p5.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
p5.From.Reg = arm64.COND_EQ
p5.To.Type = obj.TYPE_REG
p5.To.Reg = out
p2.To.SetTarget(p5)
gc.Patch(p2, p5)
case ssa.OpARM64LoweredAtomicCas64Variant,
ssa.OpARM64LoweredAtomicCas32Variant:
// Rarg0: ptr
@@ -793,7 +792,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p3.From.Type = obj.TYPE_REG
p3.From.Reg = arm64.REGTMP
p3.To.Type = obj.TYPE_BRANCH
p3.To.SetTarget(p)
gc.Patch(p3, p)
case ssa.OpARM64LoweredAtomicAnd8Variant,
ssa.OpARM64LoweredAtomicAnd32Variant:
atomic_clear := arm64.ALDCLRALW
@@ -893,7 +892,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
ssa.OpARM64FMOVSgpfp,
ssa.OpARM64FNEGS,
ssa.OpARM64FNEGD,
ssa.OpARM64FSQRTS,
ssa.OpARM64FSQRTD,
ssa.OpARM64FCVTZSSW,
ssa.OpARM64FCVTZSDW,
@@ -953,7 +951,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
p.From.Reg = condBits[ssa.Op(v.AuxInt)]
p.Reg = v.Args[0].Reg()
p.SetFrom3Reg(r1)
p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: r1})
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpARM64DUFFZERO:
@@ -961,7 +959,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p := s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = ir.Syms.Duffzero
p.To.Sym = gc.Duffzero
p.To.Offset = v.AuxInt
case ssa.OpARM64LoweredZero:
// STP.P (ZR,ZR), 16(R16)
@@ -982,12 +980,12 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p2.Reg = arm64.REG_R16
p3 := s.Prog(arm64.ABLE)
p3.To.Type = obj.TYPE_BRANCH
p3.To.SetTarget(p)
gc.Patch(p3, p)
case ssa.OpARM64DUFFCOPY:
p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = ir.Syms.Duffcopy
p.To.Sym = gc.Duffcopy
p.To.Offset = v.AuxInt
case ssa.OpARM64LoweredMove:
// MOVD.P 8(R16), Rtmp
@@ -1015,7 +1013,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p3.Reg = arm64.REG_R16
p4 := s.Prog(arm64.ABLE)
p4.To.Type = obj.TYPE_BRANCH
p4.To.SetTarget(p)
gc.Patch(p4, p)
case ssa.OpARM64CALLstatic, ssa.OpARM64CALLclosure, ssa.OpARM64CALLinter:
s.Call(v)
case ssa.OpARM64LoweredWB:
@@ -1027,21 +1025,21 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
s.UseArgs(16) // space used in callee args area by assembly stubs
case ssa.OpARM64LoweredNilCheck:
// Issue a load which will fault if arg is nil.
p := s.Prog(arm64.AMOVB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
ssagen.AddAux(&p.From, v)
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = arm64.REGTMP
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Line==1 in generated wrappers
base.WarnfAt(v.Pos, "generated nil check")
if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Line==1 in generated wrappers
gc.Warnl(v.Pos, "generated nil check")
}
case ssa.OpARM64Equal,
ssa.OpARM64NotEqual,
@@ -1069,12 +1067,12 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Reg = v.Reg()
case ssa.OpARM64LoweredGetClosurePtr:
// Closure pointer is R26 (arm64.REGCTXT).
ssagen.CheckLoweredGetClosurePtr(v)
gc.CheckLoweredGetClosurePtr(v)
case ssa.OpARM64LoweredGetCallerSP:
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(arm64.AMOVD)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = -base.Ctxt.FixedFrameSize()
p.From.Offset = -gc.Ctxt.FixedFrameSize()
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
@@ -1144,24 +1142,24 @@ var blockJump = map[ssa.BlockKind]struct {
}
// To model a 'LEnoov' ('<=' without overflow checking) branching
var leJumps = [2][2]ssagen.IndexJump{
var leJumps = [2][2]gc.IndexJump{
{{Jump: arm64.ABEQ, Index: 0}, {Jump: arm64.ABPL, Index: 1}}, // next == b.Succs[0]
{{Jump: arm64.ABMI, Index: 0}, {Jump: arm64.ABEQ, Index: 0}}, // next == b.Succs[1]
}
// To model a 'GTnoov' ('>' without overflow checking) branching
var gtJumps = [2][2]ssagen.IndexJump{
var gtJumps = [2][2]gc.IndexJump{
{{Jump: arm64.ABMI, Index: 1}, {Jump: arm64.ABEQ, Index: 1}}, // next == b.Succs[0]
{{Jump: arm64.ABEQ, Index: 1}, {Jump: arm64.ABPL, Index: 0}}, // next == b.Succs[1]
}
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockDefer:
@@ -1174,11 +1172,11 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
p.Reg = arm64.REG_R0
p = s.Prog(arm64.ABNE)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:

View File

@@ -1,195 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Debug arguments, set by -d flag.
package base
import (
"fmt"
"log"
"os"
"reflect"
"strconv"
"strings"
"cmd/internal/objabi"
)
// Debug holds the parsed debugging configuration values.
var Debug = DebugFlags{
Fieldtrack: &objabi.Fieldtrack_enabled,
}
// DebugFlags defines the debugging configuration values (see var Debug).
// Each struct field is a different value, named for the lower-case of the field name.
// Each field must be an int or string and must have a `help` struct tag.
//
// The -d option takes a comma-separated list of settings.
// Each setting is name=value; for ints, name is short for name=1.
type DebugFlags struct {
Append int `help:"print information about append compilation"`
Checkptr int `help:"instrument unsafe pointer conversions"`
Closure int `help:"print information about closure compilation"`
DclStack int `help:"run internal dclstack check"`
Defer int `help:"print information about defer compilation"`
DisableNil int `help:"disable nil checks"`
DumpPtrs int `help:"show Node pointers values in dump output"`
DwarfInl int `help:"print information about DWARF inlined function creation"`
Export int `help:"print export data"`
Fieldtrack *int `help:"enable field tracking"`
GCProg int `help:"print dump of GC programs"`
InlFuncsWithClosures int `help:"allow functions with closures to be inlined"`
Libfuzzer int `help:"enable coverage instrumentation for libfuzzer"`
LocationLists int `help:"print information about DWARF location list creation"`
Nil int `help:"print information about nil checks"`
PCTab string `help:"print named pc-value table"`
Panic int `help:"show all compiler panics"`
Slice int `help:"print information about slice compilation"`
SoftFloat int `help:"force compiler to emit soft-float code"`
TypeAssert int `help:"print information about type assertion inlining"`
TypecheckInl int `help:"eager typechecking of inline function bodies"`
WB int `help:"print information about write barriers"`
ABIWrap int `help:"print information about ABI wrapper generation"`
any bool // set when any of the values have been set
}
// Any reports whether any of the debug flags have been set.
func (d *DebugFlags) Any() bool { return d.any }
type debugField struct {
name string
help string
val interface{} // *int or *string
}
var debugTab []debugField
func init() {
v := reflect.ValueOf(&Debug).Elem()
t := v.Type()
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
if f.Name == "any" {
continue
}
name := strings.ToLower(f.Name)
help := f.Tag.Get("help")
if help == "" {
panic(fmt.Sprintf("base.Debug.%s is missing help text", f.Name))
}
ptr := v.Field(i).Addr().Interface()
switch ptr.(type) {
default:
panic(fmt.Sprintf("base.Debug.%s has invalid type %v (must be int or string)", f.Name, f.Type))
case *int, *string:
// ok
case **int:
ptr = *ptr.(**int) // record the *int itself
}
debugTab = append(debugTab, debugField{name, help, ptr})
}
}
// DebugSSA is called to set a -d ssa/... option.
// If nil, those options are reported as invalid options.
// If DebugSSA returns a non-empty string, that text is reported as a compiler error.
var DebugSSA func(phase, flag string, val int, valString string) string
// parseDebug parses the -d debug string argument.
func parseDebug(debugstr string) {
// parse -d argument
if debugstr == "" {
return
}
Debug.any = true
Split:
for _, name := range strings.Split(debugstr, ",") {
if name == "" {
continue
}
// display help about the -d option itself and quit
if name == "help" {
fmt.Print(debugHelpHeader)
maxLen := len("ssa/help")
for _, t := range debugTab {
if len(t.name) > maxLen {
maxLen = len(t.name)
}
}
for _, t := range debugTab {
fmt.Printf("\t%-*s\t%s\n", maxLen, t.name, t.help)
}
// ssa options have their own help
fmt.Printf("\t%-*s\t%s\n", maxLen, "ssa/help", "print help about SSA debugging")
fmt.Print(debugHelpFooter)
os.Exit(0)
}
val, valstring, haveInt := 1, "", true
if i := strings.IndexAny(name, "=:"); i >= 0 {
var err error
name, valstring = name[:i], name[i+1:]
val, err = strconv.Atoi(valstring)
if err != nil {
val, haveInt = 1, false
}
}
for _, t := range debugTab {
if t.name != name {
continue
}
switch vp := t.val.(type) {
case nil:
// Ignore
case *string:
*vp = valstring
case *int:
if !haveInt {
log.Fatalf("invalid debug value %v", name)
}
*vp = val
default:
panic("bad debugtab type")
}
continue Split
}
// special case for ssa for now
if DebugSSA != nil && strings.HasPrefix(name, "ssa/") {
// expect form ssa/phase/flag
// e.g. -d=ssa/generic_cse/time
// _ in phase name also matches space
phase := name[4:]
flag := "debug" // default flag is debug
if i := strings.Index(phase, "/"); i >= 0 {
flag = phase[i+1:]
phase = phase[:i]
}
err := DebugSSA(phase, flag, val, valstring)
if err != "" {
log.Fatalf(err)
}
continue Split
}
log.Fatalf("unknown debug key -d %s\n", name)
}
}
const debugHelpHeader = `usage: -d arg[,arg]* and arg is <key>[=<value>]
<key> is one of:
`
const debugHelpFooter = `
<value> is key-specific.
Key "checkptr" supports values:
"0": instrumentation disabled
"1": conversions involving unsafe.Pointer are instrumented
"2": conversions to unsafe.Pointer force heap allocation
Key "pctab" supports values:
"pctospadj", "pctofile", "pctoline", "pctoinline", "pctopcdata"
`

View File

@@ -1,461 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package base
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"reflect"
"runtime"
"strings"
"cmd/internal/objabi"
"cmd/internal/sys"
)
func usage() {
fmt.Fprintf(os.Stderr, "usage: compile [options] file.go...\n")
objabi.Flagprint(os.Stderr)
Exit(2)
}
// Flag holds the parsed command-line flags.
// See ParseFlag for non-zero defaults.
var Flag CmdFlags
// A CountFlag is a counting integer flag.
// It accepts -name=value to set the value directly,
// but it also accepts -name with no =value to increment the count.
type CountFlag int
// CmdFlags defines the command-line flags (see var Flag).
// Each struct field is a different flag, by default named for the lower-case of the field name.
// If the flag name is a single letter, the default flag name is left upper-case.
// If the flag name is "Lower" followed by a single letter, the default flag name is the lower-case of the last letter.
//
// If this default flag name can't be made right, the `flag` struct tag can be used to replace it,
// but this should be done only in exceptional circumstances: it helps everyone if the flag name
// is obvious from the field name when the flag is used elsewhere in the compiler sources.
// The `flag:"-"` struct tag makes a field invisible to the flag logic and should also be used sparingly.
//
// Each field must have a `help` struct tag giving the flag help message.
//
// The allowed field types are bool, int, string, pointers to those (for values stored elsewhere),
// CountFlag (for a counting flag), and func(string) (for a flag that uses special code for parsing).
type CmdFlags struct {
// Single letters
B CountFlag "help:\"disable bounds checking\""
C CountFlag "help:\"disable printing of columns in error messages\""
D string "help:\"set relative `path` for local imports\""
E CountFlag "help:\"debug symbol export\""
G CountFlag "help:\"accept generic code\""
I func(string) "help:\"add `directory` to import search path\""
K CountFlag "help:\"debug missing line numbers\""
L CountFlag "help:\"show full file names in error messages\""
N CountFlag "help:\"disable optimizations\""
S CountFlag "help:\"print assembly listing\""
// V is added by objabi.AddVersionFlag
W CountFlag "help:\"debug parse tree after type checking\""
LowerC int "help:\"concurrency during compilation (1 means no concurrency)\""
LowerD func(string) "help:\"enable debugging settings; try -d help\""
LowerE CountFlag "help:\"no limit on number of errors reported\""
LowerH CountFlag "help:\"halt on error\""
LowerJ CountFlag "help:\"debug runtime-initialized variables\""
LowerL CountFlag "help:\"disable inlining\""
LowerM CountFlag "help:\"print optimization decisions\""
LowerO string "help:\"write output to `file`\""
LowerP *string "help:\"set expected package import `path`\"" // &Ctxt.Pkgpath, set below
LowerR CountFlag "help:\"debug generated wrappers\""
LowerT bool "help:\"enable tracing for debugging the compiler\""
LowerW CountFlag "help:\"debug type checking\""
LowerV *bool "help:\"increase debug verbosity\""
// Special characters
Percent int "flag:\"%\" help:\"debug non-static initializers\""
CompilingRuntime bool "flag:\"+\" help:\"compiling runtime\""
// Longer names
ABIWrap bool "help:\"enable generation of ABI wrappers\""
ABIWrapLimit int "help:\"emit at most N ABI wrappers (for debugging)\""
AsmHdr string "help:\"write assembly header to `file`\""
Bench string "help:\"append benchmark times to `file`\""
BlockProfile string "help:\"write block profile to `file`\""
BuildID string "help:\"record `id` as the build id in the export metadata\""
CPUProfile string "help:\"write cpu profile to `file`\""
Complete bool "help:\"compiling complete package (no C or assembly)\""
Dwarf bool "help:\"generate DWARF symbols\""
DwarfBASEntries *bool "help:\"use base address selection entries in DWARF\"" // &Ctxt.UseBASEntries, set below
DwarfLocationLists *bool "help:\"add location lists to DWARF in optimized mode\"" // &Ctxt.Flag_locationlists, set below
Dynlink *bool "help:\"support references to Go symbols defined in other shared libraries\"" // &Ctxt.Flag_dynlink, set below
EmbedCfg func(string) "help:\"read go:embed configuration from `file`\""
GenDwarfInl int "help:\"generate DWARF inline info records\"" // 0=disabled, 1=funcs, 2=funcs+formals/locals
GoVersion string "help:\"required version of the runtime\""
ImportCfg func(string) "help:\"read import configuration from `file`\""
ImportMap func(string) "help:\"add `definition` of the form source=actual to import map\""
InstallSuffix string "help:\"set pkg directory `suffix`\""
JSON string "help:\"version,file for JSON compiler/optimizer detail output\""
Lang string "help:\"Go language version source code expects\""
LinkObj string "help:\"write linker-specific object to `file`\""
LinkShared *bool "help:\"generate code that will be linked against Go shared libraries\"" // &Ctxt.Flag_linkshared, set below
Live CountFlag "help:\"debug liveness analysis\""
MSan bool "help:\"build code compatible with C/C++ memory sanitizer\""
MemProfile string "help:\"write memory profile to `file`\""
MemProfileRate int64 "help:\"set runtime.MemProfileRate to `rate`\""
MutexProfile string "help:\"write mutex profile to `file`\""
NoLocalImports bool "help:\"reject local (relative) imports\""
Pack bool "help:\"write to file.a instead of file.o\""
Race bool "help:\"enable race detector\""
Shared *bool "help:\"generate code that can be linked into a shared library\"" // &Ctxt.Flag_shared, set below
SmallFrames bool "help:\"reduce the size limit for stack allocated objects\"" // small stacks, to diagnose GC latency; see golang.org/issue/27732
Spectre string "help:\"enable spectre mitigations in `list` (all, index, ret)\""
Std bool "help:\"compiling standard library\""
SymABIs string "help:\"read symbol ABIs from `file`\""
TraceProfile string "help:\"write an execution trace to `file`\""
TrimPath string "help:\"remove `prefix` from recorded source file paths\""
WB bool "help:\"enable write barrier\"" // TODO: remove
// Configuration derived from flags; not a flag itself.
Cfg struct {
Embed struct { // set by -embedcfg
Patterns map[string][]string
Files map[string]string
}
ImportDirs []string // appended to by -I
ImportMap map[string]string // set by -importmap OR -importcfg
PackageFile map[string]string // set by -importcfg; nil means not in use
SpectreIndex bool // set by -spectre=index or -spectre=all
// Whether we are adding any sort of code instrumentation, such as
// when the race detector is enabled.
Instrumenting bool
}
}
// ParseFlags parses the command-line flags into Flag.
func ParseFlags() {
Flag.I = addImportDir
Flag.LowerC = 1
Flag.LowerD = parseDebug
Flag.LowerP = &Ctxt.Pkgpath
Flag.LowerV = &Ctxt.Debugvlog
Flag.ABIWrap = objabi.Regabi_enabled != 0
Flag.Dwarf = objabi.GOARCH != "wasm"
Flag.DwarfBASEntries = &Ctxt.UseBASEntries
Flag.DwarfLocationLists = &Ctxt.Flag_locationlists
*Flag.DwarfLocationLists = true
Flag.Dynlink = &Ctxt.Flag_dynlink
Flag.EmbedCfg = readEmbedCfg
Flag.GenDwarfInl = 2
Flag.ImportCfg = readImportCfg
Flag.ImportMap = addImportMap
Flag.LinkShared = &Ctxt.Flag_linkshared
Flag.Shared = &Ctxt.Flag_shared
Flag.WB = true
Debug.InlFuncsWithClosures = 1
Flag.Cfg.ImportMap = make(map[string]string)
objabi.AddVersionFlag() // -V
registerFlags()
objabi.Flagparse(usage)
if Flag.MSan && !sys.MSanSupported(objabi.GOOS, objabi.GOARCH) {
log.Fatalf("%s/%s does not support -msan", objabi.GOOS, objabi.GOARCH)
}
if Flag.Race && !sys.RaceDetectorSupported(objabi.GOOS, objabi.GOARCH) {
log.Fatalf("%s/%s does not support -race", objabi.GOOS, objabi.GOARCH)
}
if (*Flag.Shared || *Flag.Dynlink || *Flag.LinkShared) && !Ctxt.Arch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X) {
log.Fatalf("%s/%s does not support -shared", objabi.GOOS, objabi.GOARCH)
}
parseSpectre(Flag.Spectre) // left as string for RecordFlags
Ctxt.Flag_shared = Ctxt.Flag_dynlink || Ctxt.Flag_shared
Ctxt.Flag_optimize = Flag.N == 0
Ctxt.Debugasm = int(Flag.S)
if flag.NArg() < 1 {
usage()
}
if Flag.GoVersion != "" && Flag.GoVersion != runtime.Version() {
fmt.Printf("compile: version %q does not match go tool version %q\n", runtime.Version(), Flag.GoVersion)
Exit(2)
}
if Flag.LowerO == "" {
p := flag.Arg(0)
if i := strings.LastIndex(p, "/"); i >= 0 {
p = p[i+1:]
}
if runtime.GOOS == "windows" {
if i := strings.LastIndex(p, `\`); i >= 0 {
p = p[i+1:]
}
}
if i := strings.LastIndex(p, "."); i >= 0 {
p = p[:i]
}
suffix := ".o"
if Flag.Pack {
suffix = ".a"
}
Flag.LowerO = p + suffix
}
if Flag.Race && Flag.MSan {
log.Fatal("cannot use both -race and -msan")
}
if Flag.Race || Flag.MSan {
// -race and -msan imply -d=checkptr for now.
Debug.Checkptr = 1
}
if Flag.CompilingRuntime && Flag.N != 0 {
log.Fatal("cannot disable optimizations while compiling runtime")
}
if Flag.LowerC < 1 {
log.Fatalf("-c must be at least 1, got %d", Flag.LowerC)
}
if Flag.LowerC > 1 && !concurrentBackendAllowed() {
log.Fatalf("cannot use concurrent backend compilation with provided flags; invoked as %v", os.Args)
}
if Flag.CompilingRuntime {
// Runtime can't use -d=checkptr, at least not yet.
Debug.Checkptr = 0
// Fuzzing the runtime isn't interesting either.
Debug.Libfuzzer = 0
}
// set via a -d flag
Ctxt.Debugpcln = Debug.PCTab
}
// registerFlags adds flag registrations for all the fields in Flag.
// See the comment on type CmdFlags for the rules.
func registerFlags() {
var (
boolType = reflect.TypeOf(bool(false))
intType = reflect.TypeOf(int(0))
stringType = reflect.TypeOf(string(""))
ptrBoolType = reflect.TypeOf(new(bool))
ptrIntType = reflect.TypeOf(new(int))
ptrStringType = reflect.TypeOf(new(string))
countType = reflect.TypeOf(CountFlag(0))
funcType = reflect.TypeOf((func(string))(nil))
)
v := reflect.ValueOf(&Flag).Elem()
t := v.Type()
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
if f.Name == "Cfg" {
continue
}
var name string
if len(f.Name) == 1 {
name = f.Name
} else if len(f.Name) == 6 && f.Name[:5] == "Lower" && 'A' <= f.Name[5] && f.Name[5] <= 'Z' {
name = string(rune(f.Name[5] + 'a' - 'A'))
} else {
name = strings.ToLower(f.Name)
}
if tag := f.Tag.Get("flag"); tag != "" {
name = tag
}
help := f.Tag.Get("help")
if help == "" {
panic(fmt.Sprintf("base.Flag.%s is missing help text", f.Name))
}
if k := f.Type.Kind(); (k == reflect.Ptr || k == reflect.Func) && v.Field(i).IsNil() {
panic(fmt.Sprintf("base.Flag.%s is uninitialized %v", f.Name, f.Type))
}
switch f.Type {
case boolType:
p := v.Field(i).Addr().Interface().(*bool)
flag.BoolVar(p, name, *p, help)
case intType:
p := v.Field(i).Addr().Interface().(*int)
flag.IntVar(p, name, *p, help)
case stringType:
p := v.Field(i).Addr().Interface().(*string)
flag.StringVar(p, name, *p, help)
case ptrBoolType:
p := v.Field(i).Interface().(*bool)
flag.BoolVar(p, name, *p, help)
case ptrIntType:
p := v.Field(i).Interface().(*int)
flag.IntVar(p, name, *p, help)
case ptrStringType:
p := v.Field(i).Interface().(*string)
flag.StringVar(p, name, *p, help)
case countType:
p := (*int)(v.Field(i).Addr().Interface().(*CountFlag))
objabi.Flagcount(name, help, p)
case funcType:
f := v.Field(i).Interface().(func(string))
objabi.Flagfn1(name, help, f)
}
}
}
// concurrentFlagOk reports whether the current compiler flags
// are compatible with concurrent compilation.
func concurrentFlagOk() bool {
// TODO(rsc): Many of these are fine. Remove them.
return Flag.Percent == 0 &&
Flag.E == 0 &&
Flag.K == 0 &&
Flag.L == 0 &&
Flag.LowerH == 0 &&
Flag.LowerJ == 0 &&
Flag.LowerM == 0 &&
Flag.LowerR == 0
}
func concurrentBackendAllowed() bool {
if !concurrentFlagOk() {
return false
}
// Debug.S by itself is ok, because all printing occurs
// while writing the object file, and that is non-concurrent.
// Adding Debug_vlog, however, causes Debug.S to also print
// while flushing the plist, which happens concurrently.
if Ctxt.Debugvlog || Debug.Any() || Flag.Live > 0 {
return false
}
// TODO: Test and delete this condition.
if objabi.Fieldtrack_enabled != 0 {
return false
}
// TODO: fix races and enable the following flags
if Ctxt.Flag_shared || Ctxt.Flag_dynlink || Flag.Race {
return false
}
return true
}
func addImportDir(dir string) {
if dir != "" {
Flag.Cfg.ImportDirs = append(Flag.Cfg.ImportDirs, dir)
}
}
func addImportMap(s string) {
if Flag.Cfg.ImportMap == nil {
Flag.Cfg.ImportMap = make(map[string]string)
}
if strings.Count(s, "=") != 1 {
log.Fatal("-importmap argument must be of the form source=actual")
}
i := strings.Index(s, "=")
source, actual := s[:i], s[i+1:]
if source == "" || actual == "" {
log.Fatal("-importmap argument must be of the form source=actual; source and actual must be non-empty")
}
Flag.Cfg.ImportMap[source] = actual
}
func readImportCfg(file string) {
if Flag.Cfg.ImportMap == nil {
Flag.Cfg.ImportMap = make(map[string]string)
}
Flag.Cfg.PackageFile = map[string]string{}
data, err := ioutil.ReadFile(file)
if err != nil {
log.Fatalf("-importcfg: %v", err)
}
for lineNum, line := range strings.Split(string(data), "\n") {
lineNum++ // 1-based
line = strings.TrimSpace(line)
if line == "" || strings.HasPrefix(line, "#") {
continue
}
var verb, args string
if i := strings.Index(line, " "); i < 0 {
verb = line
} else {
verb, args = line[:i], strings.TrimSpace(line[i+1:])
}
var before, after string
if i := strings.Index(args, "="); i >= 0 {
before, after = args[:i], args[i+1:]
}
switch verb {
default:
log.Fatalf("%s:%d: unknown directive %q", file, lineNum, verb)
case "importmap":
if before == "" || after == "" {
log.Fatalf(`%s:%d: invalid importmap: syntax is "importmap old=new"`, file, lineNum)
}
Flag.Cfg.ImportMap[before] = after
case "packagefile":
if before == "" || after == "" {
log.Fatalf(`%s:%d: invalid packagefile: syntax is "packagefile path=filename"`, file, lineNum)
}
Flag.Cfg.PackageFile[before] = after
}
}
}
func readEmbedCfg(file string) {
data, err := ioutil.ReadFile(file)
if err != nil {
log.Fatalf("-embedcfg: %v", err)
}
if err := json.Unmarshal(data, &Flag.Cfg.Embed); err != nil {
log.Fatalf("%s: %v", file, err)
}
if Flag.Cfg.Embed.Patterns == nil {
log.Fatalf("%s: invalid embedcfg: missing Patterns", file)
}
if Flag.Cfg.Embed.Files == nil {
log.Fatalf("%s: invalid embedcfg: missing Files", file)
}
}
// parseSpectre parses the spectre configuration from the string s.
func parseSpectre(s string) {
for _, f := range strings.Split(s, ",") {
f = strings.TrimSpace(f)
switch f {
default:
log.Fatalf("unknown setting -spectre=%s", f)
case "":
// nothing
case "all":
Flag.Cfg.SpectreIndex = true
Ctxt.Retpoline = true
case "index":
Flag.Cfg.SpectreIndex = true
case "ret":
Ctxt.Retpoline = true
}
}
if Flag.Cfg.SpectreIndex {
switch objabi.GOARCH {
case "amd64":
// ok
default:
log.Fatalf("GOARCH=%s does not support -spectre=index", objabi.GOARCH)
}
}
}

View File

@@ -1,36 +0,0 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package base
import (
"cmd/internal/obj"
)
var Ctxt *obj.Link
// TODO(mdempsky): These should probably be obj.Link methods.
// PkgLinksym returns the linker symbol for name within the given
// package prefix. For user packages, prefix should be the package
// path encoded with objabi.PathToPrefix.
func PkgLinksym(prefix, name string, abi obj.ABI) *obj.LSym {
if name == "_" {
// TODO(mdempsky): Cleanup callers and Fatalf instead.
return linksym(prefix, "_", abi)
}
return linksym(prefix, prefix+"."+name, abi)
}
// Linkname returns the linker symbol for the given name as it might
// appear within a //go:linkname directive.
func Linkname(name string, abi obj.ABI) *obj.LSym {
return linksym("_", name, abi)
}
// linksym is an internal helper function for implementing the above
// exported APIs.
func linksym(pkg, name string, abi obj.ABI) *obj.LSym {
return Ctxt.LookupABIInit(name, abi, func(r *obj.LSym) { r.Pkg = pkg })
}

View File

@@ -1,264 +0,0 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package base
import (
"fmt"
"os"
"runtime/debug"
"sort"
"strings"
"cmd/internal/objabi"
"cmd/internal/src"
)
// An errorMsg is a queued error message, waiting to be printed.
type errorMsg struct {
pos src.XPos
msg string
}
// Pos is the current source position being processed,
// printed by Errorf, ErrorfLang, Fatalf, and Warnf.
var Pos src.XPos
var (
errorMsgs []errorMsg
numErrors int // number of entries in errorMsgs that are errors (as opposed to warnings)
numSyntaxErrors int
)
// Errors returns the number of errors reported.
func Errors() int {
return numErrors
}
// SyntaxErrors returns the number of syntax errors reported
func SyntaxErrors() int {
return numSyntaxErrors
}
// addErrorMsg adds a new errorMsg (which may be a warning) to errorMsgs.
func addErrorMsg(pos src.XPos, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
// Only add the position if know the position.
// See issue golang.org/issue/11361.
if pos.IsKnown() {
msg = fmt.Sprintf("%v: %s", FmtPos(pos), msg)
}
errorMsgs = append(errorMsgs, errorMsg{
pos: pos,
msg: msg + "\n",
})
}
// FmtPos formats pos as a file:line string.
func FmtPos(pos src.XPos) string {
if Ctxt == nil {
return "???"
}
return Ctxt.OutermostPos(pos).Format(Flag.C == 0, Flag.L == 1)
}
// byPos sorts errors by source position.
type byPos []errorMsg
func (x byPos) Len() int { return len(x) }
func (x byPos) Less(i, j int) bool { return x[i].pos.Before(x[j].pos) }
func (x byPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
// FlushErrors sorts errors seen so far by line number, prints them to stdout,
// and empties the errors array.
func FlushErrors() {
if Ctxt != nil && Ctxt.Bso != nil {
Ctxt.Bso.Flush()
}
if len(errorMsgs) == 0 {
return
}
sort.Stable(byPos(errorMsgs))
for i, err := range errorMsgs {
if i == 0 || err.msg != errorMsgs[i-1].msg {
fmt.Printf("%s", err.msg)
}
}
errorMsgs = errorMsgs[:0]
}
// lasterror keeps track of the most recently issued error,
// to avoid printing multiple error messages on the same line.
var lasterror struct {
syntax src.XPos // source position of last syntax error
other src.XPos // source position of last non-syntax error
msg string // error message of last non-syntax error
}
// sameline reports whether two positions a, b are on the same line.
func sameline(a, b src.XPos) bool {
p := Ctxt.PosTable.Pos(a)
q := Ctxt.PosTable.Pos(b)
return p.Base() == q.Base() && p.Line() == q.Line()
}
// Errorf reports a formatted error at the current line.
func Errorf(format string, args ...interface{}) {
ErrorfAt(Pos, format, args...)
}
// ErrorfAt reports a formatted error message at pos.
func ErrorfAt(pos src.XPos, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
if strings.HasPrefix(msg, "syntax error") {
numSyntaxErrors++
// only one syntax error per line, no matter what error
if sameline(lasterror.syntax, pos) {
return
}
lasterror.syntax = pos
} else {
// only one of multiple equal non-syntax errors per line
// (FlushErrors shows only one of them, so we filter them
// here as best as we can (they may not appear in order)
// so that we don't count them here and exit early, and
// then have nothing to show for.)
if sameline(lasterror.other, pos) && lasterror.msg == msg {
return
}
lasterror.other = pos
lasterror.msg = msg
}
addErrorMsg(pos, "%s", msg)
numErrors++
hcrash()
if numErrors >= 10 && Flag.LowerE == 0 {
FlushErrors()
fmt.Printf("%v: too many errors\n", FmtPos(pos))
ErrorExit()
}
}
// ErrorfVers reports that a language feature (format, args) requires a later version of Go.
func ErrorfVers(lang string, format string, args ...interface{}) {
Errorf("%s requires %s or later (-lang was set to %s; check go.mod)", fmt.Sprintf(format, args...), lang, Flag.Lang)
}
// UpdateErrorDot is a clumsy hack that rewrites the last error,
// if it was "LINE: undefined: NAME", to be "LINE: undefined: NAME in EXPR".
// It is used to give better error messages for dot (selector) expressions.
func UpdateErrorDot(line string, name, expr string) {
if len(errorMsgs) == 0 {
return
}
e := &errorMsgs[len(errorMsgs)-1]
if strings.HasPrefix(e.msg, line) && e.msg == fmt.Sprintf("%v: undefined: %v\n", line, name) {
e.msg = fmt.Sprintf("%v: undefined: %v in %v\n", line, name, expr)
}
}
// Warnf reports a formatted warning at the current line.
// In general the Go compiler does NOT generate warnings,
// so this should be used only when the user has opted in
// to additional output by setting a particular flag.
func Warn(format string, args ...interface{}) {
WarnfAt(Pos, format, args...)
}
// WarnfAt reports a formatted warning at pos.
// In general the Go compiler does NOT generate warnings,
// so this should be used only when the user has opted in
// to additional output by setting a particular flag.
func WarnfAt(pos src.XPos, format string, args ...interface{}) {
addErrorMsg(pos, format, args...)
if Flag.LowerM != 0 {
FlushErrors()
}
}
// Fatalf reports a fatal error - an internal problem - at the current line and exits.
// If other errors have already been printed, then Fatalf just quietly exits.
// (The internal problem may have been caused by incomplete information
// after the already-reported errors, so best to let users fix those and
// try again without being bothered about a spurious internal error.)
//
// But if no errors have been printed, or if -d panic has been specified,
// Fatalf prints the error as an "internal compiler error". In a released build,
// it prints an error asking to file a bug report. In development builds, it
// prints a stack trace.
//
// If -h has been specified, Fatalf panics to force the usual runtime info dump.
func Fatalf(format string, args ...interface{}) {
FatalfAt(Pos, format, args...)
}
// FatalfAt reports a fatal error - an internal problem - at pos and exits.
// If other errors have already been printed, then FatalfAt just quietly exits.
// (The internal problem may have been caused by incomplete information
// after the already-reported errors, so best to let users fix those and
// try again without being bothered about a spurious internal error.)
//
// But if no errors have been printed, or if -d panic has been specified,
// FatalfAt prints the error as an "internal compiler error". In a released build,
// it prints an error asking to file a bug report. In development builds, it
// prints a stack trace.
//
// If -h has been specified, FatalfAt panics to force the usual runtime info dump.
func FatalfAt(pos src.XPos, format string, args ...interface{}) {
FlushErrors()
if Debug.Panic != 0 || numErrors == 0 {
fmt.Printf("%v: internal compiler error: ", FmtPos(pos))
fmt.Printf(format, args...)
fmt.Printf("\n")
// If this is a released compiler version, ask for a bug report.
if strings.HasPrefix(objabi.Version, "go") {
fmt.Printf("\n")
fmt.Printf("Please file a bug report including a short program that triggers the error.\n")
fmt.Printf("https://golang.org/issue/new\n")
} else {
// Not a release; dump a stack trace, too.
fmt.Println()
os.Stdout.Write(debug.Stack())
fmt.Println()
}
}
hcrash()
ErrorExit()
}
// hcrash crashes the compiler when -h is set, to find out where a message is generated.
func hcrash() {
if Flag.LowerH != 0 {
FlushErrors()
if Flag.LowerO != "" {
os.Remove(Flag.LowerO)
}
panic("-h")
}
}
// ErrorExit handles an error-status exit.
// It flushes any pending errors, removes the output file, and exits.
func ErrorExit() {
FlushErrors()
if Flag.LowerO != "" {
os.Remove(Flag.LowerO)
}
os.Exit(2)
}
// ExitIfErrors calls ErrorExit if any errors have been reported.
func ExitIfErrors() {
if Errors() > 0 {
ErrorExit()
}
}
var AutogeneratedPos src.XPos

View File

@@ -1,190 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bitvec
import (
"math/bits"
"cmd/compile/internal/base"
)
const (
wordBits = 32
wordMask = wordBits - 1
wordShift = 5
)
// A BitVec is a bit vector.
type BitVec struct {
N int32 // number of bits in vector
B []uint32 // words holding bits
}
func New(n int32) BitVec {
nword := (n + wordBits - 1) / wordBits
return BitVec{n, make([]uint32, nword)}
}
type Bulk struct {
words []uint32
nbit int32
nword int32
}
func NewBulk(nbit int32, count int32) Bulk {
nword := (nbit + wordBits - 1) / wordBits
size := int64(nword) * int64(count)
if int64(int32(size*4)) != size*4 {
base.Fatalf("NewBulk too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size)
}
return Bulk{
words: make([]uint32, size),
nbit: nbit,
nword: nword,
}
}
func (b *Bulk) Next() BitVec {
out := BitVec{b.nbit, b.words[:b.nword]}
b.words = b.words[b.nword:]
return out
}
func (bv1 BitVec) Eq(bv2 BitVec) bool {
if bv1.N != bv2.N {
base.Fatalf("bvequal: lengths %d and %d are not equal", bv1.N, bv2.N)
}
for i, x := range bv1.B {
if x != bv2.B[i] {
return false
}
}
return true
}
func (dst BitVec) Copy(src BitVec) {
copy(dst.B, src.B)
}
func (bv BitVec) Get(i int32) bool {
if i < 0 || i >= bv.N {
base.Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.N)
}
mask := uint32(1 << uint(i%wordBits))
return bv.B[i>>wordShift]&mask != 0
}
func (bv BitVec) Set(i int32) {
if i < 0 || i >= bv.N {
base.Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.N)
}
mask := uint32(1 << uint(i%wordBits))
bv.B[i/wordBits] |= mask
}
func (bv BitVec) Unset(i int32) {
if i < 0 || i >= bv.N {
base.Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.N)
}
mask := uint32(1 << uint(i%wordBits))
bv.B[i/wordBits] &^= mask
}
// bvnext returns the smallest index >= i for which bvget(bv, i) == 1.
// If there is no such index, bvnext returns -1.
func (bv BitVec) Next(i int32) int32 {
if i >= bv.N {
return -1
}
// Jump i ahead to next word with bits.
if bv.B[i>>wordShift]>>uint(i&wordMask) == 0 {
i &^= wordMask
i += wordBits
for i < bv.N && bv.B[i>>wordShift] == 0 {
i += wordBits
}
}
if i >= bv.N {
return -1
}
// Find 1 bit.
w := bv.B[i>>wordShift] >> uint(i&wordMask)
i += int32(bits.TrailingZeros32(w))
return i
}
func (bv BitVec) IsEmpty() bool {
for _, x := range bv.B {
if x != 0 {
return false
}
}
return true
}
func (bv BitVec) Not() {
for i, x := range bv.B {
bv.B[i] = ^x
}
}
// union
func (dst BitVec) Or(src1, src2 BitVec) {
if len(src1.B) == 0 {
return
}
_, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
for i, x := range src1.B {
dst.B[i] = x | src2.B[i]
}
}
// intersection
func (dst BitVec) And(src1, src2 BitVec) {
if len(src1.B) == 0 {
return
}
_, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
for i, x := range src1.B {
dst.B[i] = x & src2.B[i]
}
}
// difference
func (dst BitVec) AndNot(src1, src2 BitVec) {
if len(src1.B) == 0 {
return
}
_, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
for i, x := range src1.B {
dst.B[i] = x &^ src2.B[i]
}
}
func (bv BitVec) String() string {
s := make([]byte, 2+bv.N)
copy(s, "#*")
for i := int32(0); i < bv.N; i++ {
ch := byte('0')
if bv.Get(i) {
ch = '1'
}
s[2+i] = ch
}
return string(s)
}
func (bv BitVec) Clear() {
for i := range bv.B {
bv.B[i] = 0
}
}

View File

@@ -1,152 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package deadcode
import (
"go/constant"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
)
func Func(fn *ir.Func) {
stmts(&fn.Body)
if len(fn.Body) == 0 {
return
}
for _, n := range fn.Body {
if len(n.Init()) > 0 {
return
}
switch n.Op() {
case ir.OIF:
n := n.(*ir.IfStmt)
if !ir.IsConst(n.Cond, constant.Bool) || len(n.Body) > 0 || len(n.Else) > 0 {
return
}
case ir.OFOR:
n := n.(*ir.ForStmt)
if !ir.IsConst(n.Cond, constant.Bool) || ir.BoolVal(n.Cond) {
return
}
default:
return
}
}
fn.Body = []ir.Node{ir.NewBlockStmt(base.Pos, nil)}
}
func stmts(nn *ir.Nodes) {
var lastLabel = -1
for i, n := range *nn {
if n != nil && n.Op() == ir.OLABEL {
lastLabel = i
}
}
for i, n := range *nn {
// Cut is set to true when all nodes after i'th position
// should be removed.
// In other words, it marks whole slice "tail" as dead.
cut := false
if n == nil {
continue
}
if n.Op() == ir.OIF {
n := n.(*ir.IfStmt)
n.Cond = expr(n.Cond)
if ir.IsConst(n.Cond, constant.Bool) {
var body ir.Nodes
if ir.BoolVal(n.Cond) {
n.Else = ir.Nodes{}
body = n.Body
} else {
n.Body = ir.Nodes{}
body = n.Else
}
// If "then" or "else" branch ends with panic or return statement,
// it is safe to remove all statements after this node.
// isterminating is not used to avoid goto-related complications.
// We must be careful not to deadcode-remove labels, as they
// might be the target of a goto. See issue 28616.
if body := body; len(body) != 0 {
switch body[(len(body) - 1)].Op() {
case ir.ORETURN, ir.OTAILCALL, ir.OPANIC:
if i > lastLabel {
cut = true
}
}
}
}
}
if len(n.Init()) != 0 {
stmts(n.(ir.InitNode).PtrInit())
}
switch n.Op() {
case ir.OBLOCK:
n := n.(*ir.BlockStmt)
stmts(&n.List)
case ir.OFOR:
n := n.(*ir.ForStmt)
stmts(&n.Body)
case ir.OIF:
n := n.(*ir.IfStmt)
stmts(&n.Body)
stmts(&n.Else)
case ir.ORANGE:
n := n.(*ir.RangeStmt)
stmts(&n.Body)
case ir.OSELECT:
n := n.(*ir.SelectStmt)
for _, cas := range n.Cases {
stmts(&cas.Body)
}
case ir.OSWITCH:
n := n.(*ir.SwitchStmt)
for _, cas := range n.Cases {
stmts(&cas.Body)
}
}
if cut {
*nn = (*nn)[:i+1]
break
}
}
}
func expr(n ir.Node) ir.Node {
// Perform dead-code elimination on short-circuited boolean
// expressions involving constants with the intent of
// producing a constant 'if' condition.
switch n.Op() {
case ir.OANDAND:
n := n.(*ir.LogicalExpr)
n.X = expr(n.X)
n.Y = expr(n.Y)
if ir.IsConst(n.X, constant.Bool) {
if ir.BoolVal(n.X) {
return n.Y // true && x => x
} else {
return n.X // false && x => false
}
}
case ir.OOROR:
n := n.(*ir.LogicalExpr)
n.X = expr(n.X)
n.Y = expr(n.Y)
if ir.IsConst(n.X, constant.Bool) {
if ir.BoolVal(n.X) {
return n.X // true || x => true
} else {
return n.Y // false || x => x
}
}
}
return n
}

View File

@@ -1,85 +0,0 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package devirtualize implements a simple "devirtualization"
// optimization pass, which replaces interface method calls with
// direct concrete-type method calls where possible.
package devirtualize
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
)
// Func devirtualizes calls within fn where possible.
func Func(fn *ir.Func) {
ir.CurFunc = fn
ir.VisitList(fn.Body, func(n ir.Node) {
if call, ok := n.(*ir.CallExpr); ok {
Call(call)
}
})
}
// Call devirtualizes the given call if possible.
func Call(call *ir.CallExpr) {
if call.Op() != ir.OCALLINTER {
return
}
sel := call.X.(*ir.SelectorExpr)
r := ir.StaticValue(sel.X)
if r.Op() != ir.OCONVIFACE {
return
}
recv := r.(*ir.ConvExpr)
typ := recv.X.Type()
if typ.IsInterface() {
return
}
dt := ir.NewTypeAssertExpr(sel.Pos(), sel.X, nil)
dt.SetType(typ)
x := typecheck.Callee(ir.NewSelectorExpr(sel.Pos(), ir.OXDOT, dt, sel.Sel))
switch x.Op() {
case ir.ODOTMETH:
x := x.(*ir.SelectorExpr)
if base.Flag.LowerM != 0 {
base.WarnfAt(call.Pos(), "devirtualizing %v to %v", sel, typ)
}
call.SetOp(ir.OCALLMETH)
call.X = x
case ir.ODOTINTER:
// Promoted method from embedded interface-typed field (#42279).
x := x.(*ir.SelectorExpr)
if base.Flag.LowerM != 0 {
base.WarnfAt(call.Pos(), "partially devirtualizing %v to %v", sel, typ)
}
call.SetOp(ir.OCALLINTER)
call.X = x
default:
// TODO(mdempsky): Turn back into Fatalf after more testing.
if base.Flag.LowerM != 0 {
base.WarnfAt(call.Pos(), "failed to devirtualize %v (%v)", x, x.Op())
}
return
}
// Duplicated logic from typecheck for function call return
// value types.
//
// Receiver parameter size may have changed; need to update
// call.Type to get correct stack offsets for result
// parameters.
types.CheckSize(x.Type())
switch ft := x.Type(); ft.NumResults() {
case 0:
case 1:
call.SetType(ft.Results().Field(0).Type)
default:
call.SetType(ft.Results())
}
}

View File

@@ -1,457 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package dwarfgen
import (
"bytes"
"flag"
"fmt"
"sort"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/dwarf"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
)
func Info(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) {
fn := curfn.(*ir.Func)
if fn.Nname != nil {
expect := fn.Linksym()
if fnsym.ABI() == obj.ABI0 {
expect = fn.LinksymABI(obj.ABI0)
}
if fnsym != expect {
base.Fatalf("unexpected fnsym: %v != %v", fnsym, expect)
}
}
// Back when there were two different *Funcs for a function, this code
// was not consistent about whether a particular *Node being processed
// was an ODCLFUNC or ONAME node. Partly this is because inlined function
// bodies have no ODCLFUNC node, which was it's own inconsistency.
// In any event, the handling of the two different nodes for DWARF purposes
// was subtly different, likely in unintended ways. CL 272253 merged the
// two nodes' Func fields, so that code sees the same *Func whether it is
// holding the ODCLFUNC or the ONAME. This resulted in changes in the
// DWARF output. To preserve the existing DWARF output and leave an
// intentional change for a future CL, this code does the following when
// fn.Op == ONAME:
//
// 1. Disallow use of createComplexVars in createDwarfVars.
// It was not possible to reach that code for an ONAME before,
// because the DebugInfo was set only on the ODCLFUNC Func.
// Calling into it in the ONAME case causes an index out of bounds panic.
//
// 2. Do not populate apdecls. fn.Func.Dcl was in the ODCLFUNC Func,
// not the ONAME Func. Populating apdecls for the ONAME case results
// in selected being populated after createSimpleVars is called in
// createDwarfVars, and then that causes the loop to skip all the entries
// in dcl, meaning that the RecordAutoType calls don't happen.
//
// These two adjustments keep toolstash -cmp working for now.
// Deciding the right answer is, as they say, future work.
//
// We can tell the difference between the old ODCLFUNC and ONAME
// cases by looking at the infosym.Name. If it's empty, DebugInfo is
// being called from (*obj.Link).populateDWARF, which used to use
// the ODCLFUNC. If it's non-empty (the name will end in $abstract),
// DebugInfo is being called from (*obj.Link).DwarfAbstractFunc,
// which used to use the ONAME form.
isODCLFUNC := infosym.Name == ""
var apdecls []*ir.Name
// Populate decls for fn.
if isODCLFUNC {
for _, n := range fn.Dcl {
if n.Op() != ir.ONAME { // might be OTYPE or OLITERAL
continue
}
switch n.Class {
case ir.PAUTO:
if !n.Used() {
// Text == nil -> generating abstract function
if fnsym.Func().Text != nil {
base.Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)")
}
continue
}
case ir.PPARAM, ir.PPARAMOUT:
default:
continue
}
apdecls = append(apdecls, n)
fnsym.Func().RecordAutoType(reflectdata.TypeLinksym(n.Type()))
}
}
decls, dwarfVars := createDwarfVars(fnsym, isODCLFUNC, fn, apdecls)
// For each type referenced by the functions auto vars but not
// already referenced by a dwarf var, attach an R_USETYPE relocation to
// the function symbol to insure that the type included in DWARF
// processing during linking.
typesyms := []*obj.LSym{}
for t, _ := range fnsym.Func().Autot {
typesyms = append(typesyms, t)
}
sort.Sort(obj.BySymName(typesyms))
for _, sym := range typesyms {
r := obj.Addrel(infosym)
r.Sym = sym
r.Type = objabi.R_USETYPE
}
fnsym.Func().Autot = nil
var varScopes []ir.ScopeID
for _, decl := range decls {
pos := declPos(decl)
varScopes = append(varScopes, findScope(fn.Marks, pos))
}
scopes := assembleScopes(fnsym, fn, dwarfVars, varScopes)
var inlcalls dwarf.InlCalls
if base.Flag.GenDwarfInl > 0 {
inlcalls = assembleInlines(fnsym, dwarfVars)
}
return scopes, inlcalls
}
func declPos(decl *ir.Name) src.XPos {
return decl.Canonical().Pos()
}
// createDwarfVars process fn, returning a list of DWARF variables and the
// Nodes they represent.
func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var) {
// Collect a raw list of DWARF vars.
var vars []*dwarf.Var
var decls []*ir.Name
var selected ir.NameSet
if base.Ctxt.Flag_locationlists && base.Ctxt.Flag_optimize && fn.DebugInfo != nil && complexOK {
decls, vars, selected = createComplexVars(fnsym, fn)
} else {
decls, vars, selected = createSimpleVars(fnsym, apDecls)
}
dcl := apDecls
if fnsym.WasInlined() {
dcl = preInliningDcls(fnsym)
}
// If optimization is enabled, the list above will typically be
// missing some of the original pre-optimization variables in the
// function (they may have been promoted to registers, folded into
// constants, dead-coded away, etc). Input arguments not eligible
// for SSA optimization are also missing. Here we add back in entries
// for selected missing vars. Note that the recipe below creates a
// conservative location. The idea here is that we want to
// communicate to the user that "yes, there is a variable named X
// in this function, but no, I don't have enough information to
// reliably report its contents."
// For non-SSA-able arguments, however, the correct information
// is known -- they have a single home on the stack.
for _, n := range dcl {
if selected.Has(n) {
continue
}
c := n.Sym().Name[0]
if c == '.' || n.Type().IsUntyped() {
continue
}
if n.Class == ir.PPARAM && !ssagen.TypeOK(n.Type()) {
// SSA-able args get location lists, and may move in and
// out of registers, so those are handled elsewhere.
// Autos and named output params seem to get handled
// with VARDEF, which creates location lists.
// Args not of SSA-able type are treated here; they
// are homed on the stack in a single place for the
// entire call.
vars = append(vars, createSimpleVar(fnsym, n))
decls = append(decls, n)
continue
}
typename := dwarf.InfoPrefix + types.TypeSymName(n.Type())
decls = append(decls, n)
abbrev := dwarf.DW_ABRV_AUTO_LOCLIST
isReturnValue := (n.Class == ir.PPARAMOUT)
if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
}
if n.Esc() == ir.EscHeap {
// The variable in question has been promoted to the heap.
// Its address is in n.Heapaddr.
// TODO(thanm): generate a better location expression
}
inlIndex := 0
if base.Flag.GenDwarfInl > 1 {
if n.InlFormal() || n.InlLocal() {
inlIndex = posInlIndex(n.Pos()) + 1
if n.InlFormal() {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
}
}
}
declpos := base.Ctxt.InnermostPos(n.Pos())
vars = append(vars, &dwarf.Var{
Name: n.Sym().Name,
IsReturnValue: isReturnValue,
Abbrev: abbrev,
StackOffset: int32(n.FrameOffset()),
Type: base.Ctxt.Lookup(typename),
DeclFile: declpos.RelFilename(),
DeclLine: declpos.RelLine(),
DeclCol: declpos.Col(),
InlIndex: int32(inlIndex),
ChildIndex: -1,
})
// Record go type of to insure that it gets emitted by the linker.
fnsym.Func().RecordAutoType(reflectdata.TypeLinksym(n.Type()))
}
return decls, vars
}
// Given a function that was inlined at some point during the
// compilation, return a sorted list of nodes corresponding to the
// autos/locals in that function prior to inlining. If this is a
// function that is not local to the package being compiled, then the
// names of the variables may have been "versioned" to avoid conflicts
// with local vars; disregard this versioning when sorting.
func preInliningDcls(fnsym *obj.LSym) []*ir.Name {
fn := base.Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*ir.Func)
var rdcl []*ir.Name
for _, n := range fn.Inl.Dcl {
c := n.Sym().Name[0]
// Avoid reporting "_" parameters, since if there are more than
// one, it can result in a collision later on, as in #23179.
if unversion(n.Sym().Name) == "_" || c == '.' || n.Type().IsUntyped() {
continue
}
rdcl = append(rdcl, n)
}
return rdcl
}
// createSimpleVars creates a DWARF entry for every variable declared in the
// function, claiming that they are permanently on the stack.
func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var, ir.NameSet) {
var vars []*dwarf.Var
var decls []*ir.Name
var selected ir.NameSet
for _, n := range apDecls {
if ir.IsAutoTmp(n) {
continue
}
decls = append(decls, n)
vars = append(vars, createSimpleVar(fnsym, n))
selected.Add(n)
}
return decls, vars, selected
}
func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var {
var abbrev int
var offs int64
switch n.Class {
case ir.PAUTO:
offs = n.FrameOffset()
abbrev = dwarf.DW_ABRV_AUTO
if base.Ctxt.FixedFrameSize() == 0 {
offs -= int64(types.PtrSize)
}
if objabi.Framepointer_enabled {
offs -= int64(types.PtrSize)
}
case ir.PPARAM, ir.PPARAMOUT:
abbrev = dwarf.DW_ABRV_PARAM
offs = n.FrameOffset() + base.Ctxt.FixedFrameSize()
default:
base.Fatalf("createSimpleVar unexpected class %v for node %v", n.Class, n)
}
typename := dwarf.InfoPrefix + types.TypeSymName(n.Type())
delete(fnsym.Func().Autot, reflectdata.TypeLinksym(n.Type()))
inlIndex := 0
if base.Flag.GenDwarfInl > 1 {
if n.InlFormal() || n.InlLocal() {
inlIndex = posInlIndex(n.Pos()) + 1
if n.InlFormal() {
abbrev = dwarf.DW_ABRV_PARAM
}
}
}
declpos := base.Ctxt.InnermostPos(declPos(n))
return &dwarf.Var{
Name: n.Sym().Name,
IsReturnValue: n.Class == ir.PPARAMOUT,
IsInlFormal: n.InlFormal(),
Abbrev: abbrev,
StackOffset: int32(offs),
Type: base.Ctxt.Lookup(typename),
DeclFile: declpos.RelFilename(),
DeclLine: declpos.RelLine(),
DeclCol: declpos.Col(),
InlIndex: int32(inlIndex),
ChildIndex: -1,
}
}
// createComplexVars creates recomposed DWARF vars with location lists,
// suitable for describing optimized code.
func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Name, []*dwarf.Var, ir.NameSet) {
debugInfo := fn.DebugInfo.(*ssa.FuncDebug)
// Produce a DWARF variable entry for each user variable.
var decls []*ir.Name
var vars []*dwarf.Var
var ssaVars ir.NameSet
for varID, dvar := range debugInfo.Vars {
n := dvar
ssaVars.Add(n)
for _, slot := range debugInfo.VarSlots[varID] {
ssaVars.Add(debugInfo.Slots[slot].N)
}
if dvar := createComplexVar(fnsym, fn, ssa.VarID(varID)); dvar != nil {
decls = append(decls, n)
vars = append(vars, dvar)
}
}
return decls, vars, ssaVars
}
// createComplexVar builds a single DWARF variable entry and location list.
func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var {
debug := fn.DebugInfo.(*ssa.FuncDebug)
n := debug.Vars[varID]
var abbrev int
switch n.Class {
case ir.PAUTO:
abbrev = dwarf.DW_ABRV_AUTO_LOCLIST
case ir.PPARAM, ir.PPARAMOUT:
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
default:
return nil
}
gotype := reflectdata.TypeLinksym(n.Type())
delete(fnsym.Func().Autot, gotype)
typename := dwarf.InfoPrefix + gotype.Name[len("type."):]
inlIndex := 0
if base.Flag.GenDwarfInl > 1 {
if n.InlFormal() || n.InlLocal() {
inlIndex = posInlIndex(n.Pos()) + 1
if n.InlFormal() {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
}
}
}
declpos := base.Ctxt.InnermostPos(n.Pos())
dvar := &dwarf.Var{
Name: n.Sym().Name,
IsReturnValue: n.Class == ir.PPARAMOUT,
IsInlFormal: n.InlFormal(),
Abbrev: abbrev,
Type: base.Ctxt.Lookup(typename),
// The stack offset is used as a sorting key, so for decomposed
// variables just give it the first one. It's not used otherwise.
// This won't work well if the first slot hasn't been assigned a stack
// location, but it's not obvious how to do better.
StackOffset: ssagen.StackOffset(debug.Slots[debug.VarSlots[varID][0]]),
DeclFile: declpos.RelFilename(),
DeclLine: declpos.RelLine(),
DeclCol: declpos.Col(),
InlIndex: int32(inlIndex),
ChildIndex: -1,
}
list := debug.LocationLists[varID]
if len(list) != 0 {
dvar.PutLocationList = func(listSym, startPC dwarf.Sym) {
debug.PutLocationList(list, base.Ctxt, listSym.(*obj.LSym), startPC.(*obj.LSym))
}
}
return dvar
}
// RecordFlags records the specified command-line flags to be placed
// in the DWARF info.
func RecordFlags(flags ...string) {
if base.Ctxt.Pkgpath == "" {
// We can't record the flags if we don't know what the
// package name is.
return
}
type BoolFlag interface {
IsBoolFlag() bool
}
type CountFlag interface {
IsCountFlag() bool
}
var cmd bytes.Buffer
for _, name := range flags {
f := flag.Lookup(name)
if f == nil {
continue
}
getter := f.Value.(flag.Getter)
if getter.String() == f.DefValue {
// Flag has default value, so omit it.
continue
}
if bf, ok := f.Value.(BoolFlag); ok && bf.IsBoolFlag() {
val, ok := getter.Get().(bool)
if ok && val {
fmt.Fprintf(&cmd, " -%s", f.Name)
continue
}
}
if cf, ok := f.Value.(CountFlag); ok && cf.IsCountFlag() {
val, ok := getter.Get().(int)
if ok && val == 1 {
fmt.Fprintf(&cmd, " -%s", f.Name)
continue
}
}
fmt.Fprintf(&cmd, " -%s=%v", f.Name, getter.Get())
}
if cmd.Len() == 0 {
return
}
s := base.Ctxt.Lookup(dwarf.CUInfoPrefix + "producer." + base.Ctxt.Pkgpath)
s.Type = objabi.SDWARFCUINFO
// Sometimes (for example when building tests) we can link
// together two package main archives. So allow dups.
s.Set(obj.AttrDuplicateOK, true)
base.Ctxt.Data = append(base.Ctxt.Data, s)
s.P = cmd.Bytes()[1:]
}
// RecordPackageName records the name of the package being
// compiled, so that the linker can save it in the compile unit's DIE.
func RecordPackageName() {
s := base.Ctxt.Lookup(dwarf.CUInfoPrefix + "packagename." + base.Ctxt.Pkgpath)
s.Type = objabi.SDWARFCUINFO
// Sometimes (for example when building tests) we can link
// together two package main archives. So allow dups.
s.Set(obj.AttrDuplicateOK, true)
base.Ctxt.Data = append(base.Ctxt.Data, s)
s.P = []byte(types.LocalPkg.Name)
}

View File

@@ -1,94 +0,0 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package dwarfgen
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/internal/src"
)
// A ScopeMarker tracks scope nesting and boundaries for later use
// during DWARF generation.
type ScopeMarker struct {
parents []ir.ScopeID
marks []ir.Mark
}
// checkPos validates the given position and returns the current scope.
func (m *ScopeMarker) checkPos(pos src.XPos) ir.ScopeID {
if !pos.IsKnown() {
base.Fatalf("unknown scope position")
}
if len(m.marks) == 0 {
return 0
}
last := &m.marks[len(m.marks)-1]
if xposBefore(pos, last.Pos) {
base.FatalfAt(pos, "non-monotonic scope positions\n\t%v: previous scope position", base.FmtPos(last.Pos))
}
return last.Scope
}
// Push records a transition to a new child scope of the current scope.
func (m *ScopeMarker) Push(pos src.XPos) {
current := m.checkPos(pos)
m.parents = append(m.parents, current)
child := ir.ScopeID(len(m.parents))
m.marks = append(m.marks, ir.Mark{Pos: pos, Scope: child})
}
// Pop records a transition back to the current scope's parent.
func (m *ScopeMarker) Pop(pos src.XPos) {
current := m.checkPos(pos)
parent := m.parents[current-1]
m.marks = append(m.marks, ir.Mark{Pos: pos, Scope: parent})
}
// Unpush removes the current scope, which must be empty.
func (m *ScopeMarker) Unpush() {
i := len(m.marks) - 1
current := m.marks[i].Scope
if current != ir.ScopeID(len(m.parents)) {
base.FatalfAt(m.marks[i].Pos, "current scope is not empty")
}
m.parents = m.parents[:current-1]
m.marks = m.marks[:i]
}
// WriteTo writes the recorded scope marks to the given function,
// and resets the marker for reuse.
func (m *ScopeMarker) WriteTo(fn *ir.Func) {
m.compactMarks()
fn.Parents = make([]ir.ScopeID, len(m.parents))
copy(fn.Parents, m.parents)
m.parents = m.parents[:0]
fn.Marks = make([]ir.Mark, len(m.marks))
copy(fn.Marks, m.marks)
m.marks = m.marks[:0]
}
func (m *ScopeMarker) compactMarks() {
n := 0
for _, next := range m.marks {
if n > 0 && next.Pos == m.marks[n-1].Pos {
m.marks[n-1].Scope = next.Scope
continue
}
m.marks[n] = next
n++
}
m.marks = m.marks[:n]
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,959 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/types"
"cmd/internal/obj"
"fmt"
"sort"
)
// AlgKind describes the kind of algorithms used for comparing and
// hashing a Type.
type AlgKind int
//go:generate stringer -type AlgKind -trimprefix A
const (
// These values are known by runtime.
ANOEQ AlgKind = iota
AMEM0
AMEM8
AMEM16
AMEM32
AMEM64
AMEM128
ASTRING
AINTER
ANILINTER
AFLOAT32
AFLOAT64
ACPLX64
ACPLX128
// Type can be compared/hashed as regular memory.
AMEM AlgKind = 100
// Type needs special comparison/hashing functions.
ASPECIAL AlgKind = -1
)
// IsComparable reports whether t is a comparable type.
func IsComparable(t *types.Type) bool {
a, _ := algtype1(t)
return a != ANOEQ
}
// IsRegularMemory reports whether t can be compared/hashed as regular memory.
func IsRegularMemory(t *types.Type) bool {
a, _ := algtype1(t)
return a == AMEM
}
// IncomparableField returns an incomparable Field of struct Type t, if any.
func IncomparableField(t *types.Type) *types.Field {
for _, f := range t.FieldSlice() {
if !IsComparable(f.Type) {
return f
}
}
return nil
}
// EqCanPanic reports whether == on type t could panic (has an interface somewhere).
// t must be comparable.
func EqCanPanic(t *types.Type) bool {
switch t.Etype {
default:
return false
case TINTER:
return true
case TARRAY:
return EqCanPanic(t.Elem())
case TSTRUCT:
for _, f := range t.FieldSlice() {
if !f.Sym.IsBlank() && EqCanPanic(f.Type) {
return true
}
}
return false
}
}
// algtype is like algtype1, except it returns the fixed-width AMEMxx variants
// instead of the general AMEM kind when possible.
func algtype(t *types.Type) AlgKind {
a, _ := algtype1(t)
if a == AMEM {
switch t.Width {
case 0:
return AMEM0
case 1:
return AMEM8
case 2:
return AMEM16
case 4:
return AMEM32
case 8:
return AMEM64
case 16:
return AMEM128
}
}
return a
}
// algtype1 returns the AlgKind used for comparing and hashing Type t.
// If it returns ANOEQ, it also returns the component type of t that
// makes it incomparable.
func algtype1(t *types.Type) (AlgKind, *types.Type) {
if t.Broke() {
return AMEM, nil
}
if t.Noalg() {
return ANOEQ, t
}
switch t.Etype {
case TANY, TFORW:
// will be defined later.
return ANOEQ, t
case TINT8, TUINT8, TINT16, TUINT16,
TINT32, TUINT32, TINT64, TUINT64,
TINT, TUINT, TUINTPTR,
TBOOL, TPTR,
TCHAN, TUNSAFEPTR:
return AMEM, nil
case TFUNC, TMAP:
return ANOEQ, t
case TFLOAT32:
return AFLOAT32, nil
case TFLOAT64:
return AFLOAT64, nil
case TCOMPLEX64:
return ACPLX64, nil
case TCOMPLEX128:
return ACPLX128, nil
case TSTRING:
return ASTRING, nil
case TINTER:
if t.IsEmptyInterface() {
return ANILINTER, nil
}
return AINTER, nil
case TSLICE:
return ANOEQ, t
case TARRAY:
a, bad := algtype1(t.Elem())
switch a {
case AMEM:
return AMEM, nil
case ANOEQ:
return ANOEQ, bad
}
switch t.NumElem() {
case 0:
// We checked above that the element type is comparable.
return AMEM, nil
case 1:
// Single-element array is same as its lone element.
return a, nil
}
return ASPECIAL, nil
case TSTRUCT:
fields := t.FieldSlice()
// One-field struct is same as that one field alone.
if len(fields) == 1 && !fields[0].Sym.IsBlank() {
return algtype1(fields[0].Type)
}
ret := AMEM
for i, f := range fields {
// All fields must be comparable.
a, bad := algtype1(f.Type)
if a == ANOEQ {
return ANOEQ, bad
}
// Blank fields, padded fields, fields with non-memory
// equality need special compare.
if a != AMEM || f.Sym.IsBlank() || ispaddedfield(t, i) {
ret = ASPECIAL
}
}
return ret, nil
}
Fatalf("algtype1: unexpected type %v", t)
return 0, nil
}
// genhash returns a symbol which is the closure used to compute
// the hash of a value of type t.
// Note: the generated function must match runtime.typehash exactly.
func genhash(t *types.Type) *obj.LSym {
switch algtype(t) {
default:
// genhash is only called for types that have equality
Fatalf("genhash %v", t)
case AMEM0:
return sysClosure("memhash0")
case AMEM8:
return sysClosure("memhash8")
case AMEM16:
return sysClosure("memhash16")
case AMEM32:
return sysClosure("memhash32")
case AMEM64:
return sysClosure("memhash64")
case AMEM128:
return sysClosure("memhash128")
case ASTRING:
return sysClosure("strhash")
case AINTER:
return sysClosure("interhash")
case ANILINTER:
return sysClosure("nilinterhash")
case AFLOAT32:
return sysClosure("f32hash")
case AFLOAT64:
return sysClosure("f64hash")
case ACPLX64:
return sysClosure("c64hash")
case ACPLX128:
return sysClosure("c128hash")
case AMEM:
// For other sizes of plain memory, we build a closure
// that calls memhash_varlen. The size of the memory is
// encoded in the first slot of the closure.
closure := typeLookup(fmt.Sprintf(".hashfunc%d", t.Width)).Linksym()
if len(closure.P) > 0 { // already generated
return closure
}
if memhashvarlen == nil {
memhashvarlen = sysfunc("memhash_varlen")
}
ot := 0
ot = dsymptr(closure, ot, memhashvarlen, 0)
ot = duintptr(closure, ot, uint64(t.Width)) // size encoded in closure
ggloblsym(closure, int32(ot), obj.DUPOK|obj.RODATA)
return closure
case ASPECIAL:
break
}
closure := typesymprefix(".hashfunc", t).Linksym()
if len(closure.P) > 0 { // already generated
return closure
}
// Generate hash functions for subtypes.
// There are cases where we might not use these hashes,
// but in that case they will get dead-code eliminated.
// (And the closure generated by genhash will also get
// dead-code eliminated, as we call the subtype hashers
// directly.)
switch t.Etype {
case types.TARRAY:
genhash(t.Elem())
case types.TSTRUCT:
for _, f := range t.FieldSlice() {
genhash(f.Type)
}
}
sym := typesymprefix(".hash", t)
if Debug.r != 0 {
fmt.Printf("genhash %v %v %v\n", closure, sym, t)
}
lineno = autogeneratedPos // less confusing than end of input
dclcontext = PEXTERN
// func sym(p *T, h uintptr) uintptr
tfn := nod(OTFUNC, nil, nil)
tfn.List.Set2(
namedfield("p", types.NewPtr(t)),
namedfield("h", types.Types[TUINTPTR]),
)
tfn.Rlist.Set1(anonfield(types.Types[TUINTPTR]))
fn := dclfunc(sym, tfn)
np := asNode(tfn.Type.Params().Field(0).Nname)
nh := asNode(tfn.Type.Params().Field(1).Nname)
switch t.Etype {
case types.TARRAY:
// An array of pure memory would be handled by the
// standard algorithm, so the element type must not be
// pure memory.
hashel := hashfor(t.Elem())
n := nod(ORANGE, nil, nod(ODEREF, np, nil))
ni := newname(lookup("i"))
ni.Type = types.Types[TINT]
n.List.Set1(ni)
n.SetColas(true)
colasdefn(n.List.Slice(), n)
ni = n.List.First()
// h = hashel(&p[i], h)
call := nod(OCALL, hashel, nil)
nx := nod(OINDEX, np, ni)
nx.SetBounded(true)
na := nod(OADDR, nx, nil)
call.List.Append(na)
call.List.Append(nh)
n.Nbody.Append(nod(OAS, nh, call))
fn.Nbody.Append(n)
case types.TSTRUCT:
// Walk the struct using memhash for runs of AMEM
// and calling specific hash functions for the others.
for i, fields := 0, t.FieldSlice(); i < len(fields); {
f := fields[i]
// Skip blank fields.
if f.Sym.IsBlank() {
i++
continue
}
// Hash non-memory fields with appropriate hash function.
if !IsRegularMemory(f.Type) {
hashel := hashfor(f.Type)
call := nod(OCALL, hashel, nil)
nx := nodSym(OXDOT, np, f.Sym) // TODO: fields from other packages?
na := nod(OADDR, nx, nil)
call.List.Append(na)
call.List.Append(nh)
fn.Nbody.Append(nod(OAS, nh, call))
i++
continue
}
// Otherwise, hash a maximal length run of raw memory.
size, next := memrun(t, i)
// h = hashel(&p.first, size, h)
hashel := hashmem(f.Type)
call := nod(OCALL, hashel, nil)
nx := nodSym(OXDOT, np, f.Sym) // TODO: fields from other packages?
na := nod(OADDR, nx, nil)
call.List.Append(na)
call.List.Append(nh)
call.List.Append(nodintconst(size))
fn.Nbody.Append(nod(OAS, nh, call))
i = next
}
}
r := nod(ORETURN, nil, nil)
r.List.Append(nh)
fn.Nbody.Append(r)
if Debug.r != 0 {
dumplist("genhash body", fn.Nbody)
}
funcbody()
fn.Func.SetDupok(true)
fn = typecheck(fn, ctxStmt)
Curfn = fn
typecheckslice(fn.Nbody.Slice(), ctxStmt)
Curfn = nil
if debug_dclstack != 0 {
testdclstack()
}
fn.Func.SetNilCheckDisabled(true)
xtop = append(xtop, fn)
// Build closure. It doesn't close over any variables, so
// it contains just the function pointer.
dsymptr(closure, 0, sym.Linksym(), 0)
ggloblsym(closure, int32(Widthptr), obj.DUPOK|obj.RODATA)
return closure
}
func hashfor(t *types.Type) *Node {
var sym *types.Sym
switch a, _ := algtype1(t); a {
case AMEM:
Fatalf("hashfor with AMEM type")
case AINTER:
sym = Runtimepkg.Lookup("interhash")
case ANILINTER:
sym = Runtimepkg.Lookup("nilinterhash")
case ASTRING:
sym = Runtimepkg.Lookup("strhash")
case AFLOAT32:
sym = Runtimepkg.Lookup("f32hash")
case AFLOAT64:
sym = Runtimepkg.Lookup("f64hash")
case ACPLX64:
sym = Runtimepkg.Lookup("c64hash")
case ACPLX128:
sym = Runtimepkg.Lookup("c128hash")
default:
// Note: the caller of hashfor ensured that this symbol
// exists and has a body by calling genhash for t.
sym = typesymprefix(".hash", t)
}
n := newname(sym)
setNodeNameFunc(n)
n.Type = functype(nil, []*Node{
anonfield(types.NewPtr(t)),
anonfield(types.Types[TUINTPTR]),
}, []*Node{
anonfield(types.Types[TUINTPTR]),
})
return n
}
// sysClosure returns a closure which will call the
// given runtime function (with no closed-over variables).
func sysClosure(name string) *obj.LSym {
s := sysvar(name + "·f")
if len(s.P) == 0 {
f := sysfunc(name)
dsymptr(s, 0, f, 0)
ggloblsym(s, int32(Widthptr), obj.DUPOK|obj.RODATA)
}
return s
}
// geneq returns a symbol which is the closure used to compute
// equality for two objects of type t.
func geneq(t *types.Type) *obj.LSym {
switch algtype(t) {
case ANOEQ:
// The runtime will panic if it tries to compare
// a type with a nil equality function.
return nil
case AMEM0:
return sysClosure("memequal0")
case AMEM8:
return sysClosure("memequal8")
case AMEM16:
return sysClosure("memequal16")
case AMEM32:
return sysClosure("memequal32")
case AMEM64:
return sysClosure("memequal64")
case AMEM128:
return sysClosure("memequal128")
case ASTRING:
return sysClosure("strequal")
case AINTER:
return sysClosure("interequal")
case ANILINTER:
return sysClosure("nilinterequal")
case AFLOAT32:
return sysClosure("f32equal")
case AFLOAT64:
return sysClosure("f64equal")
case ACPLX64:
return sysClosure("c64equal")
case ACPLX128:
return sysClosure("c128equal")
case AMEM:
// make equality closure. The size of the type
// is encoded in the closure.
closure := typeLookup(fmt.Sprintf(".eqfunc%d", t.Width)).Linksym()
if len(closure.P) != 0 {
return closure
}
if memequalvarlen == nil {
memequalvarlen = sysvar("memequal_varlen") // asm func
}
ot := 0
ot = dsymptr(closure, ot, memequalvarlen, 0)
ot = duintptr(closure, ot, uint64(t.Width))
ggloblsym(closure, int32(ot), obj.DUPOK|obj.RODATA)
return closure
case ASPECIAL:
break
}
closure := typesymprefix(".eqfunc", t).Linksym()
if len(closure.P) > 0 { // already generated
return closure
}
sym := typesymprefix(".eq", t)
if Debug.r != 0 {
fmt.Printf("geneq %v\n", t)
}
// Autogenerate code for equality of structs and arrays.
lineno = autogeneratedPos // less confusing than end of input
dclcontext = PEXTERN
// func sym(p, q *T) bool
tfn := nod(OTFUNC, nil, nil)
tfn.List.Set2(
namedfield("p", types.NewPtr(t)),
namedfield("q", types.NewPtr(t)),
)
tfn.Rlist.Set1(namedfield("r", types.Types[TBOOL]))
fn := dclfunc(sym, tfn)
np := asNode(tfn.Type.Params().Field(0).Nname)
nq := asNode(tfn.Type.Params().Field(1).Nname)
nr := asNode(tfn.Type.Results().Field(0).Nname)
// Label to jump to if an equality test fails.
neq := autolabel(".neq")
// We reach here only for types that have equality but
// cannot be handled by the standard algorithms,
// so t must be either an array or a struct.
switch t.Etype {
default:
Fatalf("geneq %v", t)
case TARRAY:
nelem := t.NumElem()
// checkAll generates code to check the equality of all array elements.
// If unroll is greater than nelem, checkAll generates:
//
// if eq(p[0], q[0]) && eq(p[1], q[1]) && ... {
// } else {
// return
// }
//
// And so on.
//
// Otherwise it generates:
//
// for i := 0; i < nelem; i++ {
// if eq(p[i], q[i]) {
// } else {
// goto neq
// }
// }
//
// TODO(josharian): consider doing some loop unrolling
// for larger nelem as well, processing a few elements at a time in a loop.
checkAll := func(unroll int64, last bool, eq func(pi, qi *Node) *Node) {
// checkIdx generates a node to check for equality at index i.
checkIdx := func(i *Node) *Node {
// pi := p[i]
pi := nod(OINDEX, np, i)
pi.SetBounded(true)
pi.Type = t.Elem()
// qi := q[i]
qi := nod(OINDEX, nq, i)
qi.SetBounded(true)
qi.Type = t.Elem()
return eq(pi, qi)
}
if nelem <= unroll {
if last {
// Do last comparison in a different manner.
nelem--
}
// Generate a series of checks.
for i := int64(0); i < nelem; i++ {
// if check {} else { goto neq }
nif := nod(OIF, checkIdx(nodintconst(i)), nil)
nif.Rlist.Append(nodSym(OGOTO, nil, neq))
fn.Nbody.Append(nif)
}
if last {
fn.Nbody.Append(nod(OAS, nr, checkIdx(nodintconst(nelem))))
}
} else {
// Generate a for loop.
// for i := 0; i < nelem; i++
i := temp(types.Types[TINT])
init := nod(OAS, i, nodintconst(0))
cond := nod(OLT, i, nodintconst(nelem))
post := nod(OAS, i, nod(OADD, i, nodintconst(1)))
loop := nod(OFOR, cond, post)
loop.Ninit.Append(init)
// if eq(pi, qi) {} else { goto neq }
nif := nod(OIF, checkIdx(i), nil)
nif.Rlist.Append(nodSym(OGOTO, nil, neq))
loop.Nbody.Append(nif)
fn.Nbody.Append(loop)
if last {
fn.Nbody.Append(nod(OAS, nr, nodbool(true)))
}
}
}
switch t.Elem().Etype {
case TSTRING:
// Do two loops. First, check that all the lengths match (cheap).
// Second, check that all the contents match (expensive).
// TODO: when the array size is small, unroll the length match checks.
checkAll(3, false, func(pi, qi *Node) *Node {
// Compare lengths.
eqlen, _ := eqstring(pi, qi)
return eqlen
})
checkAll(1, true, func(pi, qi *Node) *Node {
// Compare contents.
_, eqmem := eqstring(pi, qi)
return eqmem
})
case TFLOAT32, TFLOAT64:
checkAll(2, true, func(pi, qi *Node) *Node {
// p[i] == q[i]
return nod(OEQ, pi, qi)
})
// TODO: pick apart structs, do them piecemeal too
default:
checkAll(1, true, func(pi, qi *Node) *Node {
// p[i] == q[i]
return nod(OEQ, pi, qi)
})
}
case TSTRUCT:
// Build a list of conditions to satisfy.
// The conditions are a list-of-lists. Conditions are reorderable
// within each inner list. The outer lists must be evaluated in order.
var conds [][]*Node
conds = append(conds, []*Node{})
and := func(n *Node) {
i := len(conds) - 1
conds[i] = append(conds[i], n)
}
// Walk the struct using memequal for runs of AMEM
// and calling specific equality tests for the others.
for i, fields := 0, t.FieldSlice(); i < len(fields); {
f := fields[i]
// Skip blank-named fields.
if f.Sym.IsBlank() {
i++
continue
}
// Compare non-memory fields with field equality.
if !IsRegularMemory(f.Type) {
if EqCanPanic(f.Type) {
// Enforce ordering by starting a new set of reorderable conditions.
conds = append(conds, []*Node{})
}
p := nodSym(OXDOT, np, f.Sym)
q := nodSym(OXDOT, nq, f.Sym)
switch {
case f.Type.IsString():
eqlen, eqmem := eqstring(p, q)
and(eqlen)
and(eqmem)
default:
and(nod(OEQ, p, q))
}
if EqCanPanic(f.Type) {
// Also enforce ordering after something that can panic.
conds = append(conds, []*Node{})
}
i++
continue
}
// Find maximal length run of memory-only fields.
size, next := memrun(t, i)
// TODO(rsc): All the calls to newname are wrong for
// cross-package unexported fields.
if s := fields[i:next]; len(s) <= 2 {
// Two or fewer fields: use plain field equality.
for _, f := range s {
and(eqfield(np, nq, f.Sym))
}
} else {
// More than two fields: use memequal.
and(eqmem(np, nq, f.Sym, size))
}
i = next
}
// Sort conditions to put runtime calls last.
// Preserve the rest of the ordering.
var flatConds []*Node
for _, c := range conds {
isCall := func(n *Node) bool {
return n.Op == OCALL || n.Op == OCALLFUNC
}
sort.SliceStable(c, func(i, j int) bool {
return !isCall(c[i]) && isCall(c[j])
})
flatConds = append(flatConds, c...)
}
if len(flatConds) == 0 {
fn.Nbody.Append(nod(OAS, nr, nodbool(true)))
} else {
for _, c := range flatConds[:len(flatConds)-1] {
// if cond {} else { goto neq }
n := nod(OIF, c, nil)
n.Rlist.Append(nodSym(OGOTO, nil, neq))
fn.Nbody.Append(n)
}
fn.Nbody.Append(nod(OAS, nr, flatConds[len(flatConds)-1]))
}
}
// ret:
// return
ret := autolabel(".ret")
fn.Nbody.Append(nodSym(OLABEL, nil, ret))
fn.Nbody.Append(nod(ORETURN, nil, nil))
// neq:
// r = false
// return (or goto ret)
fn.Nbody.Append(nodSym(OLABEL, nil, neq))
fn.Nbody.Append(nod(OAS, nr, nodbool(false)))
if EqCanPanic(t) || hasCall(fn) {
// Epilogue is large, so share it with the equal case.
fn.Nbody.Append(nodSym(OGOTO, nil, ret))
} else {
// Epilogue is small, so don't bother sharing.
fn.Nbody.Append(nod(ORETURN, nil, nil))
}
// TODO(khr): the epilogue size detection condition above isn't perfect.
// We should really do a generic CL that shares epilogues across
// the board. See #24936.
if Debug.r != 0 {
dumplist("geneq body", fn.Nbody)
}
funcbody()
fn.Func.SetDupok(true)
fn = typecheck(fn, ctxStmt)
Curfn = fn
typecheckslice(fn.Nbody.Slice(), ctxStmt)
Curfn = nil
if debug_dclstack != 0 {
testdclstack()
}
// Disable checknils while compiling this code.
// We are comparing a struct or an array,
// neither of which can be nil, and our comparisons
// are shallow.
fn.Func.SetNilCheckDisabled(true)
xtop = append(xtop, fn)
// Generate a closure which points at the function we just generated.
dsymptr(closure, 0, sym.Linksym(), 0)
ggloblsym(closure, int32(Widthptr), obj.DUPOK|obj.RODATA)
return closure
}
func hasCall(n *Node) bool {
if n.Op == OCALL || n.Op == OCALLFUNC {
return true
}
if n.Left != nil && hasCall(n.Left) {
return true
}
if n.Right != nil && hasCall(n.Right) {
return true
}
for _, x := range n.Ninit.Slice() {
if hasCall(x) {
return true
}
}
for _, x := range n.Nbody.Slice() {
if hasCall(x) {
return true
}
}
for _, x := range n.List.Slice() {
if hasCall(x) {
return true
}
}
for _, x := range n.Rlist.Slice() {
if hasCall(x) {
return true
}
}
return false
}
// eqfield returns the node
// p.field == q.field
func eqfield(p *Node, q *Node, field *types.Sym) *Node {
nx := nodSym(OXDOT, p, field)
ny := nodSym(OXDOT, q, field)
ne := nod(OEQ, nx, ny)
return ne
}
// eqstring returns the nodes
// len(s) == len(t)
// and
// memequal(s.ptr, t.ptr, len(s))
// which can be used to construct string equality comparison.
// eqlen must be evaluated before eqmem, and shortcircuiting is required.
func eqstring(s, t *Node) (eqlen, eqmem *Node) {
s = conv(s, types.Types[TSTRING])
t = conv(t, types.Types[TSTRING])
sptr := nod(OSPTR, s, nil)
tptr := nod(OSPTR, t, nil)
slen := conv(nod(OLEN, s, nil), types.Types[TUINTPTR])
tlen := conv(nod(OLEN, t, nil), types.Types[TUINTPTR])
fn := syslook("memequal")
fn = substArgTypes(fn, types.Types[TUINT8], types.Types[TUINT8])
call := nod(OCALL, fn, nil)
call.List.Append(sptr, tptr, slen.copy())
call = typecheck(call, ctxExpr|ctxMultiOK)
cmp := nod(OEQ, slen, tlen)
cmp = typecheck(cmp, ctxExpr)
cmp.Type = types.Types[TBOOL]
return cmp, call
}
// eqinterface returns the nodes
// s.tab == t.tab (or s.typ == t.typ, as appropriate)
// and
// ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate)
// which can be used to construct interface equality comparison.
// eqtab must be evaluated before eqdata, and shortcircuiting is required.
func eqinterface(s, t *Node) (eqtab, eqdata *Node) {
if !types.Identical(s.Type, t.Type) {
Fatalf("eqinterface %v %v", s.Type, t.Type)
}
// func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool)
// func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool)
var fn *Node
if s.Type.IsEmptyInterface() {
fn = syslook("efaceeq")
} else {
fn = syslook("ifaceeq")
}
stab := nod(OITAB, s, nil)
ttab := nod(OITAB, t, nil)
sdata := nod(OIDATA, s, nil)
tdata := nod(OIDATA, t, nil)
sdata.Type = types.Types[TUNSAFEPTR]
tdata.Type = types.Types[TUNSAFEPTR]
sdata.SetTypecheck(1)
tdata.SetTypecheck(1)
call := nod(OCALL, fn, nil)
call.List.Append(stab, sdata, tdata)
call = typecheck(call, ctxExpr|ctxMultiOK)
cmp := nod(OEQ, stab, ttab)
cmp = typecheck(cmp, ctxExpr)
cmp.Type = types.Types[TBOOL]
return cmp, call
}
// eqmem returns the node
// memequal(&p.field, &q.field [, size])
func eqmem(p *Node, q *Node, field *types.Sym, size int64) *Node {
nx := nod(OADDR, nodSym(OXDOT, p, field), nil)
ny := nod(OADDR, nodSym(OXDOT, q, field), nil)
nx = typecheck(nx, ctxExpr)
ny = typecheck(ny, ctxExpr)
fn, needsize := eqmemfunc(size, nx.Type.Elem())
call := nod(OCALL, fn, nil)
call.List.Append(nx)
call.List.Append(ny)
if needsize {
call.List.Append(nodintconst(size))
}
return call
}
func eqmemfunc(size int64, t *types.Type) (fn *Node, needsize bool) {
switch size {
default:
fn = syslook("memequal")
needsize = true
case 1, 2, 4, 8, 16:
buf := fmt.Sprintf("memequal%d", int(size)*8)
fn = syslook(buf)
}
fn = substArgTypes(fn, t, t)
return fn, needsize
}
// memrun finds runs of struct fields for which memory-only algs are appropriate.
// t is the parent struct type, and start is the field index at which to start the run.
// size is the length in bytes of the memory included in the run.
// next is the index just after the end of the memory run.
func memrun(t *types.Type, start int) (size int64, next int) {
next = start
for {
next++
if next == t.NumFields() {
break
}
// Stop run after a padded field.
if ispaddedfield(t, next-1) {
break
}
// Also, stop before a blank or non-memory field.
if f := t.Field(next); f.Sym.IsBlank() || !IsRegularMemory(f.Type) {
break
}
}
return t.Field(next-1).End() - t.Field(start).Offset, next
}
// ispaddedfield reports whether the i'th field of struct type t is followed
// by padding.
func ispaddedfield(t *types.Type, i int) bool {
if !t.IsStruct() {
Fatalf("ispaddedfield called non-struct %v", t)
}
end := t.Width
if i+1 < t.NumFields() {
end = t.Field(i + 1).Offset
}
return t.Field(i).End() != end
}

View File

@@ -1,6 +1,6 @@
// Code generated by "stringer -type AlgKind -trimprefix A alg.go"; DO NOT EDIT.
// Code generated by "stringer -type AlgKind -trimprefix A"; DO NOT EDIT.
package types
package gc
import "strconv"

View File

@@ -2,64 +2,18 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package types
package gc
import (
"bytes"
"cmd/compile/internal/types"
"fmt"
"sort"
"cmd/compile/internal/base"
"cmd/internal/src"
)
var PtrSize int
var RegSize int
// Slices in the runtime are represented by three components:
//
// type slice struct {
// ptr unsafe.Pointer
// len int
// cap int
// }
//
// Strings in the runtime are represented by two components:
//
// type string struct {
// ptr unsafe.Pointer
// len int
// }
//
// These variables are the offsets of fields and sizes of these structs.
var (
SlicePtrOffset int64
SliceLenOffset int64
SliceCapOffset int64
SliceSize int64
StringSize int64
)
var SkipSizeForTracing bool
// typePos returns the position associated with t.
// This is where t was declared or where it appeared as a type expression.
func typePos(t *Type) src.XPos {
if pos := t.Pos(); pos.IsKnown() {
return pos
}
base.Fatalf("bad type: %v", t)
panic("unreachable")
}
// MaxWidth is the maximum size of a value on the target architecture.
var MaxWidth int64
// CalcSizeDisabled indicates whether it is safe
// to calculate Types' widths and alignments. See CalcSize.
var CalcSizeDisabled bool
// sizeCalculationDisabled indicates whether it is safe
// to calculate Types' widths and alignments. See dowidth.
var sizeCalculationDisabled bool
// machine size and rounding alignment is dictated around
// the size of a pointer, set in betypeinit (see ../amd64/galign.go).
@@ -67,25 +21,25 @@ var defercalc int
func Rnd(o int64, r int64) int64 {
if r < 1 || r > 8 || r&(r-1) != 0 {
base.Fatalf("rnd %d", r)
Fatalf("rnd %d", r)
}
return (o + r - 1) &^ (r - 1)
}
// expandiface computes the method set for interface type t by
// expanding embedded interfaces.
func expandiface(t *Type) {
seen := make(map[*Sym]*Field)
var methods []*Field
func expandiface(t *types.Type) {
seen := make(map[*types.Sym]*types.Field)
var methods []*types.Field
addMethod := func(m *Field, explicit bool) {
addMethod := func(m *types.Field, explicit bool) {
switch prev := seen[m.Sym]; {
case prev == nil:
seen[m.Sym] = m
case AllowsGoVersion(t.Pkg(), 1, 14) && !explicit && Identical(m.Type, prev.Type):
case langSupported(1, 14, t.Pkg()) && !explicit && types.Identical(m.Type, prev.Type):
return
default:
base.ErrorfAt(m.Pos, "duplicate method %s", m.Sym.Name)
yyerrorl(m.Pos, "duplicate method %s", m.Sym.Name)
}
methods = append(methods, m)
}
@@ -95,7 +49,7 @@ func expandiface(t *Type) {
continue
}
CheckSize(m.Type)
checkwidth(m.Type)
addMethod(m, true)
}
@@ -105,7 +59,7 @@ func expandiface(t *Type) {
}
if !m.Type.IsInterface() {
base.ErrorfAt(m.Pos, "interface contains embedded non-interface %v", m.Type)
yyerrorl(m.Pos, "interface contains embedded non-interface %v", m.Type)
m.SetBroke(true)
t.SetBroke(true)
// Add to fields so that error messages
@@ -120,29 +74,30 @@ func expandiface(t *Type) {
// (including broken ones, if any) and add to t's
// method set.
for _, t1 := range m.Type.Fields().Slice() {
// Use m.Pos rather than t1.Pos to preserve embedding position.
f := NewField(m.Pos, t1.Sym, t1.Type)
f := types.NewField()
f.Pos = m.Pos // preserve embedding position
f.Sym = t1.Sym
f.Type = t1.Type
f.SetBroke(t1.Broke())
addMethod(f, false)
}
}
sort.Sort(MethodsByName(methods))
sort.Sort(methcmp(methods))
if int64(len(methods)) >= MaxWidth/int64(PtrSize) {
base.ErrorfAt(typePos(t), "interface too large")
if int64(len(methods)) >= thearch.MAXWIDTH/int64(Widthptr) {
yyerrorl(typePos(t), "interface too large")
}
for i, m := range methods {
m.Offset = int64(i) * int64(PtrSize)
m.Offset = int64(i) * int64(Widthptr)
}
// Access fields directly to avoid recursively calling CalcSize
// Access fields directly to avoid recursively calling dowidth
// within Type.Fields().
t.Extra.(*Interface).Fields.Set(methods)
t.Extra.(*types.Interface).Fields.Set(methods)
}
func calcStructOffset(errtype *Type, t *Type, o int64, flag int) int64 {
// flag is 0 (receiver), 1 (actual struct), or RegSize (in/out parameters)
isStruct := flag == 1
func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 {
starto := o
maxalign := int32(flag)
if maxalign < 1 {
@@ -156,43 +111,46 @@ func calcStructOffset(errtype *Type, t *Type, o int64, flag int) int64 {
continue
}
CalcSize(f.Type)
dowidth(f.Type)
if int32(f.Type.Align) > maxalign {
maxalign = int32(f.Type.Align)
}
if f.Type.Align > 0 {
o = Rnd(o, int64(f.Type.Align))
}
if isStruct { // For receiver/args/results, depends on ABI
f.Offset = o
}
if f.Nname != nil {
f.Offset = o
if n := asNode(f.Nname); n != nil {
// addrescapes has similar code to update these offsets.
// Usually addrescapes runs after calcStructOffset,
// Usually addrescapes runs after widstruct,
// in which case we could drop this,
// but function closure functions are the exception.
// NOTE(rsc): This comment may be stale.
// It's possible the ordering has changed and this is
// now the common case. I'm not sure.
f.Nname.(VarObject).RecordFrameOffset(o)
if n.Name.Param.Stackcopy != nil {
n.Name.Param.Stackcopy.Xoffset = o
n.Xoffset = 0
} else {
n.Xoffset = o
}
}
w := f.Type.Width
if w < 0 {
base.Fatalf("invalid width %d", f.Type.Width)
Fatalf("invalid width %d", f.Type.Width)
}
if w == 0 {
lastzero = o
}
o += w
maxwidth := MaxWidth
maxwidth := thearch.MAXWIDTH
// On 32-bit systems, reflect tables impose an additional constraint
// that each field start offset must fit in 31 bits.
if maxwidth < 1<<32 {
maxwidth = 1<<31 - 1
}
if o >= maxwidth {
base.ErrorfAt(typePos(errtype), "type %L too large", errtype)
yyerrorl(typePos(errtype), "type %L too large", errtype)
o = 8 // small but nonzero
}
}
@@ -224,22 +182,15 @@ func calcStructOffset(errtype *Type, t *Type, o int64, flag int) int64 {
// path points to a slice used for tracking the sequence of types
// visited. Using a pointer to a slice allows the slice capacity to
// grow and limit reallocations.
func findTypeLoop(t *Type, path *[]*Type) bool {
func findTypeLoop(t *types.Type, path *[]*types.Type) bool {
// We implement a simple DFS loop-finding algorithm. This
// could be faster, but type cycles are rare.
if t.Sym() != nil {
if t.Sym != nil {
// Declared type. Check for loops and otherwise
// recurse on the type expression used in the type
// declaration.
// Type imported from package, so it can't be part of
// a type loop (otherwise that package should have
// failed to compile).
if t.Sym().Pkg != LocalPkg {
return false
}
for i, x := range *path {
if x == t {
*path = (*path)[i:]
@@ -248,14 +199,14 @@ func findTypeLoop(t *Type, path *[]*Type) bool {
}
*path = append(*path, t)
if findTypeLoop(t.Obj().(TypeObject).TypeDefn(), path) {
if p := asNode(t.Nod).Name.Param; p != nil && findTypeLoop(p.Ntype.Type, path) {
return true
}
*path = (*path)[:len(*path)-1]
} else {
// Anonymous type. Recurse on contained types.
switch t.Kind() {
switch t.Etype {
case TARRAY:
if findTypeLoop(t.Elem(), path) {
return true
@@ -280,14 +231,14 @@ func findTypeLoop(t *Type, path *[]*Type) bool {
return false
}
func reportTypeLoop(t *Type) {
func reportTypeLoop(t *types.Type) {
if t.Broke() {
return
}
var l []*Type
var l []*types.Type
if !findTypeLoop(t, &l) {
base.Fatalf("failed to find type loop for: %v", t)
Fatalf("failed to find type loop for: %v", t)
}
// Rotate loop so that the earliest type declaration is first.
@@ -302,26 +253,25 @@ func reportTypeLoop(t *Type) {
var msg bytes.Buffer
fmt.Fprintf(&msg, "invalid recursive type %v\n", l[0])
for _, t := range l {
fmt.Fprintf(&msg, "\t%v: %v refers to\n", base.FmtPos(typePos(t)), t)
fmt.Fprintf(&msg, "\t%v: %v refers to\n", linestr(typePos(t)), t)
t.SetBroke(true)
}
fmt.Fprintf(&msg, "\t%v: %v", base.FmtPos(typePos(l[0])), l[0])
base.ErrorfAt(typePos(l[0]), msg.String())
fmt.Fprintf(&msg, "\t%v: %v", linestr(typePos(l[0])), l[0])
yyerrorl(typePos(l[0]), msg.String())
}
// CalcSize calculates and stores the size and alignment for t.
// If CalcSizeDisabled is set, and the size/alignment
// dowidth calculates and stores the size and alignment for t.
// If sizeCalculationDisabled is set, and the size/alignment
// have not already been calculated, it calls Fatal.
// This is used to prevent data races in the back end.
func CalcSize(t *Type) {
// Calling CalcSize when typecheck tracing enabled is not safe.
func dowidth(t *types.Type) {
// Calling dowidth when typecheck tracing enabled is not safe.
// See issue #33658.
if base.EnableTrace && SkipSizeForTracing {
if enableTrace && skipDowidthForTracing {
return
}
if PtrSize == 0 {
// Assume this is a test.
return
if Widthptr == 0 {
Fatalf("dowidth without betypeinit")
}
if t == nil {
@@ -339,13 +289,13 @@ func CalcSize(t *Type) {
return
}
if CalcSizeDisabled {
if sizeCalculationDisabled {
if t.Broke() {
// break infinite recursion from Fatal call below
return
}
t.SetBroke(true)
base.Fatalf("width not calculated: %v", t)
Fatalf("width not calculated: %v", t)
}
// break infinite recursion if the broken recursive type
@@ -354,33 +304,33 @@ func CalcSize(t *Type) {
return
}
// defer CheckSize calls until after we're done
DeferCheckSize()
// defer checkwidth calls until after we're done
defercheckwidth()
lno := base.Pos
if pos := t.Pos(); pos.IsKnown() {
base.Pos = pos
lno := lineno
if asNode(t.Nod) != nil {
lineno = asNode(t.Nod).Pos
}
t.Width = -2
t.Align = 0 // 0 means use t.Width, below
et := t.Kind()
et := t.Etype
switch et {
case TFUNC, TCHAN, TMAP, TSTRING:
break
// SimType == 0 during bootstrap
// simtype == 0 during bootstrap
default:
if SimType[t.Kind()] != 0 {
et = SimType[t.Kind()]
if simtype[t.Etype] != 0 {
et = simtype[t.Etype]
}
}
var w int64
switch et {
default:
base.Fatalf("CalcSize: unknown type: %v", t)
Fatalf("dowidth: unknown type: %v", t)
// compiler-specific stuff
case TINT8, TUINT8, TBOOL:
@@ -395,7 +345,7 @@ func CalcSize(t *Type) {
case TINT64, TUINT64, TFLOAT64:
w = 8
t.Align = uint8(RegSize)
t.Align = uint8(Widthreg)
case TCOMPLEX64:
w = 8
@@ -403,68 +353,68 @@ func CalcSize(t *Type) {
case TCOMPLEX128:
w = 16
t.Align = uint8(RegSize)
t.Align = uint8(Widthreg)
case TPTR:
w = int64(PtrSize)
CheckSize(t.Elem())
w = int64(Widthptr)
checkwidth(t.Elem())
case TUNSAFEPTR:
w = int64(PtrSize)
w = int64(Widthptr)
case TINTER: // implemented as 2 pointers
w = 2 * int64(PtrSize)
t.Align = uint8(PtrSize)
w = 2 * int64(Widthptr)
t.Align = uint8(Widthptr)
expandiface(t)
case TCHAN: // implemented as pointer
w = int64(PtrSize)
w = int64(Widthptr)
CheckSize(t.Elem())
checkwidth(t.Elem())
// make fake type to check later to
// trigger channel argument check.
t1 := NewChanArgs(t)
CheckSize(t1)
t1 := types.NewChanArgs(t)
checkwidth(t1)
case TCHANARGS:
t1 := t.ChanArgs()
CalcSize(t1) // just in case
dowidth(t1) // just in case
if t1.Elem().Width >= 1<<16 {
base.ErrorfAt(typePos(t1), "channel element type too large (>64kB)")
yyerrorl(typePos(t1), "channel element type too large (>64kB)")
}
w = 1 // anything will do
case TMAP: // implemented as pointer
w = int64(PtrSize)
CheckSize(t.Elem())
CheckSize(t.Key())
w = int64(Widthptr)
checkwidth(t.Elem())
checkwidth(t.Key())
case TFORW: // should have been filled in
reportTypeLoop(t)
w = 1 // anything will do
case TANY:
// not a real type; should be replaced before use.
base.Fatalf("CalcSize any")
// dummy type; should be replaced before use.
Fatalf("dowidth any")
case TSTRING:
if StringSize == 0 {
base.Fatalf("early CalcSize string")
if sizeofString == 0 {
Fatalf("early dowidth string")
}
w = StringSize
t.Align = uint8(PtrSize)
w = sizeofString
t.Align = uint8(Widthptr)
case TARRAY:
if t.Elem() == nil {
break
}
CalcSize(t.Elem())
dowidth(t.Elem())
if t.Elem().Width != 0 {
cap := (uint64(MaxWidth) - 1) / uint64(t.Elem().Width)
cap := (uint64(thearch.MAXWIDTH) - 1) / uint64(t.Elem().Width)
if uint64(t.NumElem()) > cap {
base.ErrorfAt(typePos(t), "type %L larger than address space", t)
yyerrorl(typePos(t), "type %L larger than address space", t)
}
}
w = t.NumElem() * t.Elem().Width
@@ -474,67 +424,55 @@ func CalcSize(t *Type) {
if t.Elem() == nil {
break
}
w = SliceSize
CheckSize(t.Elem())
t.Align = uint8(PtrSize)
w = sizeofSlice
checkwidth(t.Elem())
t.Align = uint8(Widthptr)
case TSTRUCT:
if t.IsFuncArgStruct() {
base.Fatalf("CalcSize fn struct %v", t)
Fatalf("dowidth fn struct %v", t)
}
w = calcStructOffset(t, t, 0, 1)
w = widstruct(t, t, 0, 1)
// make fake type to check later to
// trigger function argument computation.
case TFUNC:
t1 := NewFuncArgs(t)
CheckSize(t1)
w = int64(PtrSize) // width of func type is pointer
t1 := types.NewFuncArgs(t)
checkwidth(t1)
w = int64(Widthptr) // width of func type is pointer
// function is 3 cated structures;
// compute their widths as side-effect.
case TFUNCARGS:
t1 := t.FuncArgs()
w = calcStructOffset(t1, t1.Recvs(), 0, 0)
w = calcStructOffset(t1, t1.Params(), w, RegSize)
w = calcStructOffset(t1, t1.Results(), w, RegSize)
t1.Extra.(*Func).Argwid = w
if w%int64(RegSize) != 0 {
base.Warn("bad type %v %d\n", t1, w)
w = widstruct(t1, t1.Recvs(), 0, 0)
w = widstruct(t1, t1.Params(), w, Widthreg)
w = widstruct(t1, t1.Results(), w, Widthreg)
t1.Extra.(*types.Func).Argwid = w
if w%int64(Widthreg) != 0 {
Warn("bad type %v %d\n", t1, w)
}
t.Align = 1
case TTYPEPARAM:
// TODO(danscales) - remove when we eliminate the need
// to do CalcSize in noder2 (which shouldn't be needed in the noder)
w = int64(PtrSize)
}
if PtrSize == 4 && w != int64(int32(w)) {
base.ErrorfAt(typePos(t), "type %v too large", t)
if Widthptr == 4 && w != int64(int32(w)) {
yyerrorl(typePos(t), "type %v too large", t)
}
t.Width = w
if t.Align == 0 {
if w == 0 || w > 8 || w&(w-1) != 0 {
base.Fatalf("invalid alignment for %v", t)
Fatalf("invalid alignment for %v", t)
}
t.Align = uint8(w)
}
base.Pos = lno
lineno = lno
ResumeCheckSize()
resumecheckwidth()
}
// CalcStructSize calculates the size of s,
// filling in s.Width and s.Align,
// even if size calculation is otherwise disabled.
func CalcStructSize(s *Type) {
s.Width = calcStructOffset(s, s, 0, 1) // sets align
}
// when a type's width should be known, we call CheckSize
// when a type's width should be known, we call checkwidth
// to compute it. during a declaration like
//
// type T *struct { next T }
@@ -543,16 +481,16 @@ func CalcStructSize(s *Type) {
// until after T has been initialized to be a pointer to that struct.
// similarly, during import processing structs may be used
// before their definition. in those situations, calling
// DeferCheckSize() stops width calculations until
// ResumeCheckSize() is called, at which point all the
// CalcSizes that were deferred are executed.
// CalcSize should only be called when the type's size
// is needed immediately. CheckSize makes sure the
// defercheckwidth() stops width calculations until
// resumecheckwidth() is called, at which point all the
// checkwidths that were deferred are executed.
// dowidth should only be called when the type's size
// is needed immediately. checkwidth makes sure the
// size is evaluated eventually.
var deferredTypeStack []*Type
var deferredTypeStack []*types.Type
func CheckSize(t *Type) {
func checkwidth(t *types.Type) {
if t == nil {
return
}
@@ -560,11 +498,11 @@ func CheckSize(t *Type) {
// function arg structs should not be checked
// outside of the enclosing function.
if t.IsFuncArgStruct() {
base.Fatalf("CheckSize %v", t)
Fatalf("checkwidth %v", t)
}
if defercalc == 0 {
CalcSize(t)
dowidth(t)
return
}
@@ -575,70 +513,19 @@ func CheckSize(t *Type) {
}
}
func DeferCheckSize() {
func defercheckwidth() {
defercalc++
}
func ResumeCheckSize() {
func resumecheckwidth() {
if defercalc == 1 {
for len(deferredTypeStack) > 0 {
t := deferredTypeStack[len(deferredTypeStack)-1]
deferredTypeStack = deferredTypeStack[:len(deferredTypeStack)-1]
t.SetDeferwidth(false)
CalcSize(t)
dowidth(t)
}
}
defercalc--
}
// PtrDataSize returns the length in bytes of the prefix of t
// containing pointer data. Anything after this offset is scalar data.
func PtrDataSize(t *Type) int64 {
if !t.HasPointers() {
return 0
}
switch t.Kind() {
case TPTR,
TUNSAFEPTR,
TFUNC,
TCHAN,
TMAP:
return int64(PtrSize)
case TSTRING:
// struct { byte *str; intgo len; }
return int64(PtrSize)
case TINTER:
// struct { Itab *tab; void *data; } or
// struct { Type *type; void *data; }
// Note: see comment in typebits.Set
return 2 * int64(PtrSize)
case TSLICE:
// struct { byte *array; uintgo len; uintgo cap; }
return int64(PtrSize)
case TARRAY:
// haspointers already eliminated t.NumElem() == 0.
return (t.NumElem()-1)*t.Elem().Width + PtrDataSize(t.Elem())
case TSTRUCT:
// Find the last field that has pointers.
var lastPtrField *Field
fs := t.Fields().Slice()
for i := len(fs) - 1; i >= 0; i-- {
if fs[i].Type.HasPointers() {
lastPtrField = fs[i]
break
}
}
return lastPtrField.Offset + PtrDataSize(lastPtrField.Type)
default:
base.Fatalf("PtrDataSize: unexpected type, %v", t)
return 0
}
}

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package test
package gc
import "testing"

View File

@@ -0,0 +1,177 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/types"
)
type exporter struct {
marked map[*types.Type]bool // types already seen by markType
}
// markType recursively visits types reachable from t to identify
// functions whose inline bodies may be needed.
func (p *exporter) markType(t *types.Type) {
if p.marked[t] {
return
}
p.marked[t] = true
// If this is a named type, mark all of its associated
// methods. Skip interface types because t.Methods contains
// only their unexpanded method set (i.e., exclusive of
// interface embeddings), and the switch statement below
// handles their full method set.
if t.Sym != nil && t.Etype != TINTER {
for _, m := range t.Methods().Slice() {
if types.IsExported(m.Sym.Name) {
p.markType(m.Type)
}
}
}
// Recursively mark any types that can be produced given a
// value of type t: dereferencing a pointer; indexing or
// iterating over an array, slice, or map; receiving from a
// channel; accessing a struct field or interface method; or
// calling a function.
//
// Notably, we don't mark function parameter types, because
// the user already needs some way to construct values of
// those types.
switch t.Etype {
case TPTR, TARRAY, TSLICE:
p.markType(t.Elem())
case TCHAN:
if t.ChanDir().CanRecv() {
p.markType(t.Elem())
}
case TMAP:
p.markType(t.Key())
p.markType(t.Elem())
case TSTRUCT:
for _, f := range t.FieldSlice() {
if types.IsExported(f.Sym.Name) || f.Embedded != 0 {
p.markType(f.Type)
}
}
case TFUNC:
// If t is the type of a function or method, then
// t.Nname() is its ONAME. Mark its inline body and
// any recursively called functions for export.
inlFlood(asNode(t.Nname()))
for _, f := range t.Results().FieldSlice() {
p.markType(f.Type)
}
case TINTER:
for _, f := range t.FieldSlice() {
if types.IsExported(f.Sym.Name) {
p.markType(f.Type)
}
}
}
}
// ----------------------------------------------------------------------------
// Export format
// Tags. Must be < 0.
const (
// Objects
packageTag = -(iota + 1)
constTag
typeTag
varTag
funcTag
endTag
// Types
namedTag
arrayTag
sliceTag
dddTag
structTag
pointerTag
signatureTag
interfaceTag
mapTag
chanTag
// Values
falseTag
trueTag
int64Tag
floatTag
fractionTag // not used by gc
complexTag
stringTag
nilTag
unknownTag // not used by gc (only appears in packages with errors)
// Type aliases
aliasTag
)
var predecl []*types.Type // initialized lazily
func predeclared() []*types.Type {
if predecl == nil {
// initialize lazily to be sure that all
// elements have been initialized before
predecl = []*types.Type{
// basic types
types.Types[TBOOL],
types.Types[TINT],
types.Types[TINT8],
types.Types[TINT16],
types.Types[TINT32],
types.Types[TINT64],
types.Types[TUINT],
types.Types[TUINT8],
types.Types[TUINT16],
types.Types[TUINT32],
types.Types[TUINT64],
types.Types[TUINTPTR],
types.Types[TFLOAT32],
types.Types[TFLOAT64],
types.Types[TCOMPLEX64],
types.Types[TCOMPLEX128],
types.Types[TSTRING],
// basic type aliases
types.Bytetype,
types.Runetype,
// error
types.Errortype,
// untyped types
types.UntypedBool,
types.UntypedInt,
types.UntypedRune,
types.UntypedFloat,
types.UntypedComplex,
types.UntypedString,
types.Types[TNIL],
// package unsafe
types.Types[TUNSAFEPTR],
// invalid type (package contains errors)
types.Types[Txxx],
// any type, for builtin export data
types.Types[TANY],
}
}
return predecl
}

View File

@@ -0,0 +1,24 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/internal/src"
)
// numImport tracks how often a package with a given name is imported.
// It is used to provide a better error message (by using the package
// path to disambiguate) if a package that appears multiple times with
// the same name appears in an error message.
var numImport = make(map[string]int)
func npos(pos src.XPos, n *Node) *Node {
n.Pos = pos
return n
}
func builtinCall(op Op) *Node {
return nod(OCALL, mkname(builtinpkg.Lookup(goopnames[op])), nil)
}

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ir
package gc
type bitset8 uint8
@@ -14,18 +14,6 @@ func (f *bitset8) set(mask uint8, b bool) {
}
}
func (f bitset8) get2(shift uint8) uint8 {
return uint8(f>>shift) & 3
}
// set2 sets two bits in f using the bottom two bits of b.
func (f *bitset8) set2(shift uint8, b uint8) {
// Clear old bits.
*(*uint8)(f) &^= 3 << shift
// Set new bits.
*(*uint8)(f) |= uint8(b&3) << shift
}
type bitset16 uint16
func (f *bitset16) set(mask uint16, b bool) {

View File

@@ -2,16 +2,12 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.8
// +build !go1.8
package gc
import (
"cmd/compile/internal/base"
"runtime"
)
import "runtime"
func startMutexProfiling() {
base.Fatalf("mutex profiling unavailable in version %v", runtime.Version())
Fatalf("mutex profiling unavailable in version %v", runtime.Version())
}

View File

@@ -0,0 +1,340 @@
// Code generated by mkbuiltin.go. DO NOT EDIT.
package gc
import "cmd/compile/internal/types"
var runtimeDecls = [...]struct {
name string
tag int
typ int
}{
{"newobject", funcTag, 4},
{"mallocgc", funcTag, 8},
{"panicdivide", funcTag, 9},
{"panicshift", funcTag, 9},
{"panicmakeslicelen", funcTag, 9},
{"panicmakeslicecap", funcTag, 9},
{"throwinit", funcTag, 9},
{"panicwrap", funcTag, 9},
{"gopanic", funcTag, 11},
{"gorecover", funcTag, 14},
{"goschedguarded", funcTag, 9},
{"goPanicIndex", funcTag, 16},
{"goPanicIndexU", funcTag, 18},
{"goPanicSliceAlen", funcTag, 16},
{"goPanicSliceAlenU", funcTag, 18},
{"goPanicSliceAcap", funcTag, 16},
{"goPanicSliceAcapU", funcTag, 18},
{"goPanicSliceB", funcTag, 16},
{"goPanicSliceBU", funcTag, 18},
{"goPanicSlice3Alen", funcTag, 16},
{"goPanicSlice3AlenU", funcTag, 18},
{"goPanicSlice3Acap", funcTag, 16},
{"goPanicSlice3AcapU", funcTag, 18},
{"goPanicSlice3B", funcTag, 16},
{"goPanicSlice3BU", funcTag, 18},
{"goPanicSlice3C", funcTag, 16},
{"goPanicSlice3CU", funcTag, 18},
{"printbool", funcTag, 19},
{"printfloat", funcTag, 21},
{"printint", funcTag, 23},
{"printhex", funcTag, 25},
{"printuint", funcTag, 25},
{"printcomplex", funcTag, 27},
{"printstring", funcTag, 29},
{"printpointer", funcTag, 30},
{"printuintptr", funcTag, 31},
{"printiface", funcTag, 30},
{"printeface", funcTag, 30},
{"printslice", funcTag, 30},
{"printnl", funcTag, 9},
{"printsp", funcTag, 9},
{"printlock", funcTag, 9},
{"printunlock", funcTag, 9},
{"concatstring2", funcTag, 34},
{"concatstring3", funcTag, 35},
{"concatstring4", funcTag, 36},
{"concatstring5", funcTag, 37},
{"concatstrings", funcTag, 39},
{"cmpstring", funcTag, 40},
{"intstring", funcTag, 43},
{"slicebytetostring", funcTag, 44},
{"slicebytetostringtmp", funcTag, 45},
{"slicerunetostring", funcTag, 48},
{"stringtoslicebyte", funcTag, 50},
{"stringtoslicerune", funcTag, 53},
{"slicecopy", funcTag, 54},
{"decoderune", funcTag, 55},
{"countrunes", funcTag, 56},
{"convI2I", funcTag, 57},
{"convT16", funcTag, 58},
{"convT32", funcTag, 58},
{"convT64", funcTag, 58},
{"convTstring", funcTag, 58},
{"convTslice", funcTag, 58},
{"convT2E", funcTag, 59},
{"convT2Enoptr", funcTag, 59},
{"convT2I", funcTag, 59},
{"convT2Inoptr", funcTag, 59},
{"assertE2I", funcTag, 57},
{"assertE2I2", funcTag, 60},
{"assertI2I", funcTag, 57},
{"assertI2I2", funcTag, 60},
{"panicdottypeE", funcTag, 61},
{"panicdottypeI", funcTag, 61},
{"panicnildottype", funcTag, 62},
{"ifaceeq", funcTag, 64},
{"efaceeq", funcTag, 64},
{"fastrand", funcTag, 66},
{"makemap64", funcTag, 68},
{"makemap", funcTag, 69},
{"makemap_small", funcTag, 70},
{"mapaccess1", funcTag, 71},
{"mapaccess1_fast32", funcTag, 72},
{"mapaccess1_fast64", funcTag, 72},
{"mapaccess1_faststr", funcTag, 72},
{"mapaccess1_fat", funcTag, 73},
{"mapaccess2", funcTag, 74},
{"mapaccess2_fast32", funcTag, 75},
{"mapaccess2_fast64", funcTag, 75},
{"mapaccess2_faststr", funcTag, 75},
{"mapaccess2_fat", funcTag, 76},
{"mapassign", funcTag, 71},
{"mapassign_fast32", funcTag, 72},
{"mapassign_fast32ptr", funcTag, 72},
{"mapassign_fast64", funcTag, 72},
{"mapassign_fast64ptr", funcTag, 72},
{"mapassign_faststr", funcTag, 72},
{"mapiterinit", funcTag, 77},
{"mapdelete", funcTag, 77},
{"mapdelete_fast32", funcTag, 78},
{"mapdelete_fast64", funcTag, 78},
{"mapdelete_faststr", funcTag, 78},
{"mapiternext", funcTag, 79},
{"mapclear", funcTag, 80},
{"makechan64", funcTag, 82},
{"makechan", funcTag, 83},
{"chanrecv1", funcTag, 85},
{"chanrecv2", funcTag, 86},
{"chansend1", funcTag, 88},
{"closechan", funcTag, 30},
{"writeBarrier", varTag, 90},
{"typedmemmove", funcTag, 91},
{"typedmemclr", funcTag, 92},
{"typedslicecopy", funcTag, 93},
{"selectnbsend", funcTag, 94},
{"selectnbrecv", funcTag, 95},
{"selectnbrecv2", funcTag, 97},
{"selectsetpc", funcTag, 98},
{"selectgo", funcTag, 99},
{"block", funcTag, 9},
{"makeslice", funcTag, 100},
{"makeslice64", funcTag, 101},
{"makeslicecopy", funcTag, 102},
{"growslice", funcTag, 104},
{"memmove", funcTag, 105},
{"memclrNoHeapPointers", funcTag, 106},
{"memclrHasPointers", funcTag, 106},
{"memequal", funcTag, 107},
{"memequal0", funcTag, 108},
{"memequal8", funcTag, 108},
{"memequal16", funcTag, 108},
{"memequal32", funcTag, 108},
{"memequal64", funcTag, 108},
{"memequal128", funcTag, 108},
{"f32equal", funcTag, 109},
{"f64equal", funcTag, 109},
{"c64equal", funcTag, 109},
{"c128equal", funcTag, 109},
{"strequal", funcTag, 109},
{"interequal", funcTag, 109},
{"nilinterequal", funcTag, 109},
{"memhash", funcTag, 110},
{"memhash0", funcTag, 111},
{"memhash8", funcTag, 111},
{"memhash16", funcTag, 111},
{"memhash32", funcTag, 111},
{"memhash64", funcTag, 111},
{"memhash128", funcTag, 111},
{"f32hash", funcTag, 111},
{"f64hash", funcTag, 111},
{"c64hash", funcTag, 111},
{"c128hash", funcTag, 111},
{"strhash", funcTag, 111},
{"interhash", funcTag, 111},
{"nilinterhash", funcTag, 111},
{"int64div", funcTag, 112},
{"uint64div", funcTag, 113},
{"int64mod", funcTag, 112},
{"uint64mod", funcTag, 113},
{"float64toint64", funcTag, 114},
{"float64touint64", funcTag, 115},
{"float64touint32", funcTag, 116},
{"int64tofloat64", funcTag, 117},
{"uint64tofloat64", funcTag, 118},
{"uint32tofloat64", funcTag, 119},
{"complex128div", funcTag, 120},
{"racefuncenter", funcTag, 31},
{"racefuncenterfp", funcTag, 9},
{"racefuncexit", funcTag, 9},
{"raceread", funcTag, 31},
{"racewrite", funcTag, 31},
{"racereadrange", funcTag, 121},
{"racewriterange", funcTag, 121},
{"msanread", funcTag, 121},
{"msanwrite", funcTag, 121},
{"msanmove", funcTag, 122},
{"checkptrAlignment", funcTag, 123},
{"checkptrArithmetic", funcTag, 125},
{"libfuzzerTraceCmp1", funcTag, 127},
{"libfuzzerTraceCmp2", funcTag, 129},
{"libfuzzerTraceCmp4", funcTag, 130},
{"libfuzzerTraceCmp8", funcTag, 131},
{"libfuzzerTraceConstCmp1", funcTag, 127},
{"libfuzzerTraceConstCmp2", funcTag, 129},
{"libfuzzerTraceConstCmp4", funcTag, 130},
{"libfuzzerTraceConstCmp8", funcTag, 131},
{"x86HasPOPCNT", varTag, 6},
{"x86HasSSE41", varTag, 6},
{"x86HasFMA", varTag, 6},
{"armHasVFPv4", varTag, 6},
{"arm64HasATOMICS", varTag, 6},
}
func runtimeTypes() []*types.Type {
var typs [132]*types.Type
typs[0] = types.Bytetype
typs[1] = types.NewPtr(typs[0])
typs[2] = types.Types[TANY]
typs[3] = types.NewPtr(typs[2])
typs[4] = functype(nil, []*Node{anonfield(typs[1])}, []*Node{anonfield(typs[3])})
typs[5] = types.Types[TUINTPTR]
typs[6] = types.Types[TBOOL]
typs[7] = types.Types[TUNSAFEPTR]
typs[8] = functype(nil, []*Node{anonfield(typs[5]), anonfield(typs[1]), anonfield(typs[6])}, []*Node{anonfield(typs[7])})
typs[9] = functype(nil, nil, nil)
typs[10] = types.Types[TINTER]
typs[11] = functype(nil, []*Node{anonfield(typs[10])}, nil)
typs[12] = types.Types[TINT32]
typs[13] = types.NewPtr(typs[12])
typs[14] = functype(nil, []*Node{anonfield(typs[13])}, []*Node{anonfield(typs[10])})
typs[15] = types.Types[TINT]
typs[16] = functype(nil, []*Node{anonfield(typs[15]), anonfield(typs[15])}, nil)
typs[17] = types.Types[TUINT]
typs[18] = functype(nil, []*Node{anonfield(typs[17]), anonfield(typs[15])}, nil)
typs[19] = functype(nil, []*Node{anonfield(typs[6])}, nil)
typs[20] = types.Types[TFLOAT64]
typs[21] = functype(nil, []*Node{anonfield(typs[20])}, nil)
typs[22] = types.Types[TINT64]
typs[23] = functype(nil, []*Node{anonfield(typs[22])}, nil)
typs[24] = types.Types[TUINT64]
typs[25] = functype(nil, []*Node{anonfield(typs[24])}, nil)
typs[26] = types.Types[TCOMPLEX128]
typs[27] = functype(nil, []*Node{anonfield(typs[26])}, nil)
typs[28] = types.Types[TSTRING]
typs[29] = functype(nil, []*Node{anonfield(typs[28])}, nil)
typs[30] = functype(nil, []*Node{anonfield(typs[2])}, nil)
typs[31] = functype(nil, []*Node{anonfield(typs[5])}, nil)
typs[32] = types.NewArray(typs[0], 32)
typs[33] = types.NewPtr(typs[32])
typs[34] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
typs[35] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
typs[36] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
typs[37] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
typs[38] = types.NewSlice(typs[28])
typs[39] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[38])}, []*Node{anonfield(typs[28])})
typs[40] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[15])})
typs[41] = types.NewArray(typs[0], 4)
typs[42] = types.NewPtr(typs[41])
typs[43] = functype(nil, []*Node{anonfield(typs[42]), anonfield(typs[22])}, []*Node{anonfield(typs[28])})
typs[44] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
typs[45] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
typs[46] = types.Runetype
typs[47] = types.NewSlice(typs[46])
typs[48] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[47])}, []*Node{anonfield(typs[28])})
typs[49] = types.NewSlice(typs[0])
typs[50] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28])}, []*Node{anonfield(typs[49])})
typs[51] = types.NewArray(typs[46], 32)
typs[52] = types.NewPtr(typs[51])
typs[53] = functype(nil, []*Node{anonfield(typs[52]), anonfield(typs[28])}, []*Node{anonfield(typs[47])})
typs[54] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []*Node{anonfield(typs[15])})
typs[55] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[15])}, []*Node{anonfield(typs[46]), anonfield(typs[15])})
typs[56] = functype(nil, []*Node{anonfield(typs[28])}, []*Node{anonfield(typs[15])})
typs[57] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2])})
typs[58] = functype(nil, []*Node{anonfield(typs[2])}, []*Node{anonfield(typs[7])})
typs[59] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, []*Node{anonfield(typs[2])})
typs[60] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2]), anonfield(typs[6])})
typs[61] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil)
typs[62] = functype(nil, []*Node{anonfield(typs[1])}, nil)
typs[63] = types.NewPtr(typs[5])
typs[64] = functype(nil, []*Node{anonfield(typs[63]), anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
typs[65] = types.Types[TUINT32]
typs[66] = functype(nil, nil, []*Node{anonfield(typs[65])})
typs[67] = types.NewMap(typs[2], typs[2])
typs[68] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []*Node{anonfield(typs[67])})
typs[69] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []*Node{anonfield(typs[67])})
typs[70] = functype(nil, nil, []*Node{anonfield(typs[67])})
typs[71] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*Node{anonfield(typs[3])})
typs[72] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*Node{anonfield(typs[3])})
typs[73] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3])})
typs[74] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
typs[75] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
typs[76] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
typs[77] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, nil)
typs[78] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, nil)
typs[79] = functype(nil, []*Node{anonfield(typs[3])}, nil)
typs[80] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67])}, nil)
typs[81] = types.NewChan(typs[2], types.Cboth)
typs[82] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22])}, []*Node{anonfield(typs[81])})
typs[83] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[81])})
typs[84] = types.NewChan(typs[2], types.Crecv)
typs[85] = functype(nil, []*Node{anonfield(typs[84]), anonfield(typs[3])}, nil)
typs[86] = functype(nil, []*Node{anonfield(typs[84]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
typs[87] = types.NewChan(typs[2], types.Csend)
typs[88] = functype(nil, []*Node{anonfield(typs[87]), anonfield(typs[3])}, nil)
typs[89] = types.NewArray(typs[0], 3)
typs[90] = tostruct([]*Node{namedfield("enabled", typs[6]), namedfield("pad", typs[89]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])})
typs[91] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
typs[92] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, nil)
typs[93] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []*Node{anonfield(typs[15])})
typs[94] = functype(nil, []*Node{anonfield(typs[87]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
typs[95] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[84])}, []*Node{anonfield(typs[6])})
typs[96] = types.NewPtr(typs[6])
typs[97] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[96]), anonfield(typs[84])}, []*Node{anonfield(typs[6])})
typs[98] = functype(nil, []*Node{anonfield(typs[63])}, nil)
typs[99] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[63]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []*Node{anonfield(typs[15]), anonfield(typs[6])})
typs[100] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*Node{anonfield(typs[7])})
typs[101] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[7])})
typs[102] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []*Node{anonfield(typs[7])})
typs[103] = types.NewSlice(typs[2])
typs[104] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[103]), anonfield(typs[15])}, []*Node{anonfield(typs[103])})
typs[105] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil)
typs[106] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, nil)
typs[107] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []*Node{anonfield(typs[6])})
typs[108] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
typs[109] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
typs[110] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
typs[111] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
typs[112] = functype(nil, []*Node{anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[22])})
typs[113] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, []*Node{anonfield(typs[24])})
typs[114] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[22])})
typs[115] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[24])})
typs[116] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[65])})
typs[117] = functype(nil, []*Node{anonfield(typs[22])}, []*Node{anonfield(typs[20])})
typs[118] = functype(nil, []*Node{anonfield(typs[24])}, []*Node{anonfield(typs[20])})
typs[119] = functype(nil, []*Node{anonfield(typs[65])}, []*Node{anonfield(typs[20])})
typs[120] = functype(nil, []*Node{anonfield(typs[26]), anonfield(typs[26])}, []*Node{anonfield(typs[26])})
typs[121] = functype(nil, []*Node{anonfield(typs[5]), anonfield(typs[5])}, nil)
typs[122] = functype(nil, []*Node{anonfield(typs[5]), anonfield(typs[5]), anonfield(typs[5])}, nil)
typs[123] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[1]), anonfield(typs[5])}, nil)
typs[124] = types.NewSlice(typs[7])
typs[125] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[124])}, nil)
typs[126] = types.Types[TUINT8]
typs[127] = functype(nil, []*Node{anonfield(typs[126]), anonfield(typs[126])}, nil)
typs[128] = types.Types[TUINT16]
typs[129] = functype(nil, []*Node{anonfield(typs[128]), anonfield(typs[128])}, nil)
typs[130] = functype(nil, []*Node{anonfield(typs[65]), anonfield(typs[65])}, nil)
typs[131] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, nil)
return typs[:]
}

View File

@@ -101,10 +101,10 @@ func convT2I(tab *byte, elem *any) (ret any)
func convT2Inoptr(tab *byte, elem *any) (ret any)
// interface type assertions x.(T)
func assertE2I(inter *byte, typ *byte) *byte
func assertE2I2(inter *byte, eface any) (ret any)
func assertI2I(inter *byte, tab *byte) *byte
func assertI2I2(inter *byte, iface any) (ret any)
func assertE2I(typ *byte, iface any) (ret any)
func assertE2I2(typ *byte, iface any) (ret any, b bool)
func assertI2I(typ *byte, iface any) (ret any)
func assertI2I2(typ *byte, iface any) (ret any, b bool)
func panicdottypeE(have, want, iface *byte)
func panicdottypeI(have, want, iface *byte)
func panicnildottype(want *byte)
@@ -166,7 +166,8 @@ func typedmemclr(typ *byte, dst *any)
func typedslicecopy(typ *byte, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int
func selectnbsend(hchan chan<- any, elem *any) bool
func selectnbrecv(elem *any, hchan <-chan any) (bool, bool)
func selectnbrecv(elem *any, hchan <-chan any) bool
func selectnbrecv2(elem *any, received *bool, hchan <-chan any) bool
func selectsetpc(pc *uintptr)
func selectgo(cas0 *byte, order0 *byte, pc0 *uintptr, nsends int, nrecvs int, block bool) (int, bool)

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package typecheck
package gc_test
import (
"bytes"

View File

@@ -0,0 +1,278 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"math/bits"
)
const (
wordBits = 32
wordMask = wordBits - 1
wordShift = 5
)
// A bvec is a bit vector.
type bvec struct {
n int32 // number of bits in vector
b []uint32 // words holding bits
}
func bvalloc(n int32) bvec {
nword := (n + wordBits - 1) / wordBits
return bvec{n, make([]uint32, nword)}
}
type bulkBvec struct {
words []uint32
nbit int32
nword int32
}
func bvbulkalloc(nbit int32, count int32) bulkBvec {
nword := (nbit + wordBits - 1) / wordBits
size := int64(nword) * int64(count)
if int64(int32(size*4)) != size*4 {
Fatalf("bvbulkalloc too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size)
}
return bulkBvec{
words: make([]uint32, size),
nbit: nbit,
nword: nword,
}
}
func (b *bulkBvec) next() bvec {
out := bvec{b.nbit, b.words[:b.nword]}
b.words = b.words[b.nword:]
return out
}
func (bv1 bvec) Eq(bv2 bvec) bool {
if bv1.n != bv2.n {
Fatalf("bvequal: lengths %d and %d are not equal", bv1.n, bv2.n)
}
for i, x := range bv1.b {
if x != bv2.b[i] {
return false
}
}
return true
}
func (dst bvec) Copy(src bvec) {
copy(dst.b, src.b)
}
func (bv bvec) Get(i int32) bool {
if i < 0 || i >= bv.n {
Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.n)
}
mask := uint32(1 << uint(i%wordBits))
return bv.b[i>>wordShift]&mask != 0
}
func (bv bvec) Set(i int32) {
if i < 0 || i >= bv.n {
Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.n)
}
mask := uint32(1 << uint(i%wordBits))
bv.b[i/wordBits] |= mask
}
func (bv bvec) Unset(i int32) {
if i < 0 || i >= bv.n {
Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.n)
}
mask := uint32(1 << uint(i%wordBits))
bv.b[i/wordBits] &^= mask
}
// bvnext returns the smallest index >= i for which bvget(bv, i) == 1.
// If there is no such index, bvnext returns -1.
func (bv bvec) Next(i int32) int32 {
if i >= bv.n {
return -1
}
// Jump i ahead to next word with bits.
if bv.b[i>>wordShift]>>uint(i&wordMask) == 0 {
i &^= wordMask
i += wordBits
for i < bv.n && bv.b[i>>wordShift] == 0 {
i += wordBits
}
}
if i >= bv.n {
return -1
}
// Find 1 bit.
w := bv.b[i>>wordShift] >> uint(i&wordMask)
i += int32(bits.TrailingZeros32(w))
return i
}
func (bv bvec) IsEmpty() bool {
for _, x := range bv.b {
if x != 0 {
return false
}
}
return true
}
func (bv bvec) Not() {
for i, x := range bv.b {
bv.b[i] = ^x
}
}
// union
func (dst bvec) Or(src1, src2 bvec) {
if len(src1.b) == 0 {
return
}
_, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop
for i, x := range src1.b {
dst.b[i] = x | src2.b[i]
}
}
// intersection
func (dst bvec) And(src1, src2 bvec) {
if len(src1.b) == 0 {
return
}
_, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop
for i, x := range src1.b {
dst.b[i] = x & src2.b[i]
}
}
// difference
func (dst bvec) AndNot(src1, src2 bvec) {
if len(src1.b) == 0 {
return
}
_, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop
for i, x := range src1.b {
dst.b[i] = x &^ src2.b[i]
}
}
func (bv bvec) String() string {
s := make([]byte, 2+bv.n)
copy(s, "#*")
for i := int32(0); i < bv.n; i++ {
ch := byte('0')
if bv.Get(i) {
ch = '1'
}
s[2+i] = ch
}
return string(s)
}
func (bv bvec) Clear() {
for i := range bv.b {
bv.b[i] = 0
}
}
// FNV-1 hash function constants.
const (
H0 = 2166136261
Hp = 16777619
)
func hashbitmap(h uint32, bv bvec) uint32 {
n := int((bv.n + 31) / 32)
for i := 0; i < n; i++ {
w := bv.b[i]
h = (h * Hp) ^ (w & 0xff)
h = (h * Hp) ^ ((w >> 8) & 0xff)
h = (h * Hp) ^ ((w >> 16) & 0xff)
h = (h * Hp) ^ ((w >> 24) & 0xff)
}
return h
}
// bvecSet is a set of bvecs, in initial insertion order.
type bvecSet struct {
index []int // hash -> uniq index. -1 indicates empty slot.
uniq []bvec // unique bvecs, in insertion order
}
func (m *bvecSet) grow() {
// Allocate new index.
n := len(m.index) * 2
if n == 0 {
n = 32
}
newIndex := make([]int, n)
for i := range newIndex {
newIndex[i] = -1
}
// Rehash into newIndex.
for i, bv := range m.uniq {
h := hashbitmap(H0, bv) % uint32(len(newIndex))
for {
j := newIndex[h]
if j < 0 {
newIndex[h] = i
break
}
h++
if h == uint32(len(newIndex)) {
h = 0
}
}
}
m.index = newIndex
}
// add adds bv to the set and returns its index in m.extractUniqe.
// The caller must not modify bv after this.
func (m *bvecSet) add(bv bvec) int {
if len(m.uniq)*4 >= len(m.index) {
m.grow()
}
index := m.index
h := hashbitmap(H0, bv) % uint32(len(index))
for {
j := index[h]
if j < 0 {
// New bvec.
index[h] = len(m.uniq)
m.uniq = append(m.uniq, bv)
return len(m.uniq) - 1
}
jlive := m.uniq[j]
if bv.Eq(jlive) {
// Existing bvec.
return j
}
h++
if h == uint32(len(index)) {
h = 0
}
}
}
// extractUniqe returns this slice of unique bit vectors in m, as
// indexed by the result of bvecSet.add.
func (m *bvecSet) extractUniqe() []bvec {
return m.uniq
}

View File

@@ -1,6 +1,6 @@
// Code generated by "stringer -type=Class name.go"; DO NOT EDIT.
// Code generated by "stringer -type=Class"; DO NOT EDIT.
package ir
package gc
import "strconv"
@@ -14,13 +14,12 @@ func _() {
_ = x[PAUTOHEAP-3]
_ = x[PPARAM-4]
_ = x[PPARAMOUT-5]
_ = x[PTYPEPARAM-6]
_ = x[PFUNC-7]
_ = x[PFUNC-6]
}
const _Class_name = "PxxxPEXTERNPAUTOPAUTOHEAPPPARAMPPARAMOUTPTYPEPARAMPFUNC"
const _Class_name = "PxxxPEXTERNPAUTOPAUTOHEAPPPARAMPPARAMOUTPFUNC"
var _Class_index = [...]uint8{0, 4, 11, 16, 25, 31, 40, 50, 55}
var _Class_index = [...]uint8{0, 4, 11, 16, 25, 31, 40, 45}
func (i Class) String() string {
if i >= Class(len(_Class_index)-1) {

View File

@@ -0,0 +1,594 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/syntax"
"cmd/compile/internal/types"
"fmt"
)
func (p *noder) funcLit(expr *syntax.FuncLit) *Node {
xtype := p.typeExpr(expr.Type)
ntype := p.typeExpr(expr.Type)
xfunc := p.nod(expr, ODCLFUNC, nil, nil)
xfunc.Func.SetIsHiddenClosure(Curfn != nil)
xfunc.Func.Nname = newfuncnamel(p.pos(expr), nblank.Sym) // filled in by typecheckclosure
xfunc.Func.Nname.Name.Param.Ntype = xtype
xfunc.Func.Nname.Name.Defn = xfunc
clo := p.nod(expr, OCLOSURE, nil, nil)
clo.Func.Ntype = ntype
xfunc.Func.Closure = clo
clo.Func.Closure = xfunc
p.funcBody(xfunc, expr.Body)
// closure-specific variables are hanging off the
// ordinary ones in the symbol table; see oldname.
// unhook them.
// make the list of pointers for the closure call.
for _, v := range xfunc.Func.Cvars.Slice() {
// Unlink from v1; see comment in syntax.go type Param for these fields.
v1 := v.Name.Defn
v1.Name.Param.Innermost = v.Name.Param.Outer
// If the closure usage of v is not dense,
// we need to make it dense; now that we're out
// of the function in which v appeared,
// look up v.Sym in the enclosing function
// and keep it around for use in the compiled code.
//
// That is, suppose we just finished parsing the innermost
// closure f4 in this code:
//
// func f() {
// v := 1
// func() { // f2
// use(v)
// func() { // f3
// func() { // f4
// use(v)
// }()
// }()
// }()
// }
//
// At this point v.Outer is f2's v; there is no f3's v.
// To construct the closure f4 from within f3,
// we need to use f3's v and in this case we need to create f3's v.
// We are now in the context of f3, so calling oldname(v.Sym)
// obtains f3's v, creating it if necessary (as it is in the example).
//
// capturevars will decide whether to use v directly or &v.
v.Name.Param.Outer = oldname(v.Sym)
}
return clo
}
// typecheckclosure typechecks an OCLOSURE node. It also creates the named
// function associated with the closure.
// TODO: This creation of the named function should probably really be done in a
// separate pass from type-checking.
func typecheckclosure(clo *Node, top int) {
xfunc := clo.Func.Closure
// Set current associated iota value, so iota can be used inside
// function in ConstSpec, see issue #22344
if x := getIotaValue(); x >= 0 {
xfunc.SetIota(x)
}
clo.Func.Ntype = typecheck(clo.Func.Ntype, ctxType)
clo.Type = clo.Func.Ntype.Type
clo.Func.Top = top
// Do not typecheck xfunc twice, otherwise, we will end up pushing
// xfunc to xtop multiple times, causing initLSym called twice.
// See #30709
if xfunc.Typecheck() == 1 {
return
}
for _, ln := range xfunc.Func.Cvars.Slice() {
n := ln.Name.Defn
if !n.Name.Captured() {
n.Name.SetCaptured(true)
if n.Name.Decldepth == 0 {
Fatalf("typecheckclosure: var %S does not have decldepth assigned", n)
}
// Ignore assignments to the variable in straightline code
// preceding the first capturing by a closure.
if n.Name.Decldepth == decldepth {
n.Name.SetAssigned(false)
}
}
}
xfunc.Func.Nname.Sym = closurename(Curfn)
setNodeNameFunc(xfunc.Func.Nname)
xfunc = typecheck(xfunc, ctxStmt)
// Type check the body now, but only if we're inside a function.
// At top level (in a variable initialization: curfn==nil) we're not
// ready to type check code yet; we'll check it later, because the
// underlying closure function we create is added to xtop.
if Curfn != nil && clo.Type != nil {
oldfn := Curfn
Curfn = xfunc
olddd := decldepth
decldepth = 1
typecheckslice(xfunc.Nbody.Slice(), ctxStmt)
decldepth = olddd
Curfn = oldfn
}
xtop = append(xtop, xfunc)
}
// globClosgen is like Func.Closgen, but for the global scope.
var globClosgen int
// closurename generates a new unique name for a closure within
// outerfunc.
func closurename(outerfunc *Node) *types.Sym {
outer := "glob."
prefix := "func"
gen := &globClosgen
if outerfunc != nil {
if outerfunc.Func.Closure != nil {
prefix = ""
}
outer = outerfunc.funcname()
// There may be multiple functions named "_". In those
// cases, we can't use their individual Closgens as it
// would lead to name clashes.
if !outerfunc.Func.Nname.isBlank() {
gen = &outerfunc.Func.Closgen
}
}
*gen++
return lookup(fmt.Sprintf("%s.%s%d", outer, prefix, *gen))
}
// capturevarscomplete is set to true when the capturevars phase is done.
var capturevarscomplete bool
// capturevars is called in a separate phase after all typechecking is done.
// It decides whether each variable captured by a closure should be captured
// by value or by reference.
// We use value capturing for values <= 128 bytes that are never reassigned
// after capturing (effectively constant).
func capturevars(xfunc *Node) {
lno := lineno
lineno = xfunc.Pos
clo := xfunc.Func.Closure
cvars := xfunc.Func.Cvars.Slice()
out := cvars[:0]
for _, v := range cvars {
if v.Type == nil {
// If v.Type is nil, it means v looked like it
// was going to be used in the closure, but
// isn't. This happens in struct literals like
// s{f: x} where we can't distinguish whether
// f is a field identifier or expression until
// resolving s.
continue
}
out = append(out, v)
// type check the & of closed variables outside the closure,
// so that the outer frame also grabs them and knows they escape.
dowidth(v.Type)
outer := v.Name.Param.Outer
outermost := v.Name.Defn
// out parameters will be assigned to implicitly upon return.
if outermost.Class() != PPARAMOUT && !outermost.Name.Addrtaken() && !outermost.Name.Assigned() && v.Type.Width <= 128 {
v.Name.SetByval(true)
} else {
outermost.Name.SetAddrtaken(true)
outer = nod(OADDR, outer, nil)
}
if Debug.m > 1 {
var name *types.Sym
if v.Name.Curfn != nil && v.Name.Curfn.Func.Nname != nil {
name = v.Name.Curfn.Func.Nname.Sym
}
how := "ref"
if v.Name.Byval() {
how = "value"
}
Warnl(v.Pos, "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym, outermost.Name.Addrtaken(), outermost.Name.Assigned(), int32(v.Type.Width))
}
outer = typecheck(outer, ctxExpr)
clo.Func.Enter.Append(outer)
}
xfunc.Func.Cvars.Set(out)
lineno = lno
}
// transformclosure is called in a separate phase after escape analysis.
// It transform closure bodies to properly reference captured variables.
func transformclosure(xfunc *Node) {
lno := lineno
lineno = xfunc.Pos
clo := xfunc.Func.Closure
if clo.Func.Top&ctxCallee != 0 {
// If the closure is directly called, we transform it to a plain function call
// with variables passed as args. This avoids allocation of a closure object.
// Here we do only a part of the transformation. Walk of OCALLFUNC(OCLOSURE)
// will complete the transformation later.
// For illustration, the following closure:
// func(a int) {
// println(byval)
// byref++
// }(42)
// becomes:
// func(byval int, &byref *int, a int) {
// println(byval)
// (*&byref)++
// }(byval, &byref, 42)
// f is ONAME of the actual function.
f := xfunc.Func.Nname
// We are going to insert captured variables before input args.
var params []*types.Field
var decls []*Node
for _, v := range xfunc.Func.Cvars.Slice() {
if !v.Name.Byval() {
// If v of type T is captured by reference,
// we introduce function param &v *T
// and v remains PAUTOHEAP with &v heapaddr
// (accesses will implicitly deref &v).
addr := newname(lookup("&" + v.Sym.Name))
addr.Type = types.NewPtr(v.Type)
v.Name.Param.Heapaddr = addr
v = addr
}
v.SetClass(PPARAM)
decls = append(decls, v)
fld := types.NewField()
fld.Nname = asTypesNode(v)
fld.Type = v.Type
fld.Sym = v.Sym
params = append(params, fld)
}
if len(params) > 0 {
// Prepend params and decls.
f.Type.Params().SetFields(append(params, f.Type.Params().FieldSlice()...))
xfunc.Func.Dcl = append(decls, xfunc.Func.Dcl...)
}
dowidth(f.Type)
xfunc.Type = f.Type // update type of ODCLFUNC
} else {
// The closure is not called, so it is going to stay as closure.
var body []*Node
offset := int64(Widthptr)
for _, v := range xfunc.Func.Cvars.Slice() {
// cv refers to the field inside of closure OSTRUCTLIT.
cv := nod(OCLOSUREVAR, nil, nil)
cv.Type = v.Type
if !v.Name.Byval() {
cv.Type = types.NewPtr(v.Type)
}
offset = Rnd(offset, int64(cv.Type.Align))
cv.Xoffset = offset
offset += cv.Type.Width
if v.Name.Byval() && v.Type.Width <= int64(2*Widthptr) {
// If it is a small variable captured by value, downgrade it to PAUTO.
v.SetClass(PAUTO)
xfunc.Func.Dcl = append(xfunc.Func.Dcl, v)
body = append(body, nod(OAS, v, cv))
} else {
// Declare variable holding addresses taken from closure
// and initialize in entry prologue.
addr := newname(lookup("&" + v.Sym.Name))
addr.Type = types.NewPtr(v.Type)
addr.SetClass(PAUTO)
addr.Name.SetUsed(true)
addr.Name.Curfn = xfunc
xfunc.Func.Dcl = append(xfunc.Func.Dcl, addr)
v.Name.Param.Heapaddr = addr
if v.Name.Byval() {
cv = nod(OADDR, cv, nil)
}
body = append(body, nod(OAS, addr, cv))
}
}
if len(body) > 0 {
typecheckslice(body, ctxStmt)
xfunc.Func.Enter.Set(body)
xfunc.Func.SetNeedctxt(true)
}
}
lineno = lno
}
// hasemptycvars reports whether closure clo has an
// empty list of captured vars.
func hasemptycvars(clo *Node) bool {
xfunc := clo.Func.Closure
return xfunc.Func.Cvars.Len() == 0
}
// closuredebugruntimecheck applies boilerplate checks for debug flags
// and compiling runtime
func closuredebugruntimecheck(clo *Node) {
if Debug_closure > 0 {
xfunc := clo.Func.Closure
if clo.Esc == EscHeap {
Warnl(clo.Pos, "heap closure, captured vars = %v", xfunc.Func.Cvars)
} else {
Warnl(clo.Pos, "stack closure, captured vars = %v", xfunc.Func.Cvars)
}
}
if compiling_runtime && clo.Esc == EscHeap {
yyerrorl(clo.Pos, "heap-allocated closure, not allowed in runtime")
}
}
// closureType returns the struct type used to hold all the information
// needed in the closure for clo (clo must be a OCLOSURE node).
// The address of a variable of the returned type can be cast to a func.
func closureType(clo *Node) *types.Type {
// Create closure in the form of a composite literal.
// supposing the closure captures an int i and a string s
// and has one float64 argument and no results,
// the generated code looks like:
//
// clos = &struct{.F uintptr; i *int; s *string}{func.1, &i, &s}
//
// The use of the struct provides type information to the garbage
// collector so that it can walk the closure. We could use (in this case)
// [3]unsafe.Pointer instead, but that would leave the gc in the dark.
// The information appears in the binary in the form of type descriptors;
// the struct is unnamed so that closures in multiple packages with the
// same struct type can share the descriptor.
fields := []*Node{
namedfield(".F", types.Types[TUINTPTR]),
}
for _, v := range clo.Func.Closure.Func.Cvars.Slice() {
typ := v.Type
if !v.Name.Byval() {
typ = types.NewPtr(typ)
}
fields = append(fields, symfield(v.Sym, typ))
}
typ := tostruct(fields)
typ.SetNoalg(true)
return typ
}
func walkclosure(clo *Node, init *Nodes) *Node {
xfunc := clo.Func.Closure
// If no closure vars, don't bother wrapping.
if hasemptycvars(clo) {
if Debug_closure > 0 {
Warnl(clo.Pos, "closure converted to global")
}
return xfunc.Func.Nname
}
closuredebugruntimecheck(clo)
typ := closureType(clo)
clos := nod(OCOMPLIT, nil, typenod(typ))
clos.Esc = clo.Esc
clos.List.Set(append([]*Node{nod(OCFUNC, xfunc.Func.Nname, nil)}, clo.Func.Enter.Slice()...))
clos = nod(OADDR, clos, nil)
clos.Esc = clo.Esc
// Force type conversion from *struct to the func type.
clos = convnop(clos, clo.Type)
// non-escaping temp to use, if any.
if x := prealloc[clo]; x != nil {
if !types.Identical(typ, x.Type) {
panic("closure type does not match order's assigned type")
}
clos.Left.Right = x
delete(prealloc, clo)
}
return walkexpr(clos, init)
}
func typecheckpartialcall(fn *Node, sym *types.Sym) {
switch fn.Op {
case ODOTINTER, ODOTMETH:
break
default:
Fatalf("invalid typecheckpartialcall")
}
// Create top-level function.
xfunc := makepartialcall(fn, fn.Type, sym)
fn.Func = xfunc.Func
fn.Func.SetWrapper(true)
fn.Right = newname(sym)
fn.Op = OCALLPART
fn.Type = xfunc.Type
}
// makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed
// for partial calls.
func makepartialcall(fn *Node, t0 *types.Type, meth *types.Sym) *Node {
rcvrtype := fn.Left.Type
sym := methodSymSuffix(rcvrtype, meth, "-fm")
if sym.Uniq() {
return asNode(sym.Def)
}
sym.SetUniq(true)
savecurfn := Curfn
saveLineNo := lineno
Curfn = nil
// Set line number equal to the line number where the method is declared.
var m *types.Field
if lookdot0(meth, rcvrtype, &m, false) == 1 && m.Pos.IsKnown() {
lineno = m.Pos
}
// Note: !m.Pos.IsKnown() happens for method expressions where
// the method is implicitly declared. The Error method of the
// built-in error type is one such method. We leave the line
// number at the use of the method expression in this
// case. See issue 29389.
tfn := nod(OTFUNC, nil, nil)
tfn.List.Set(structargs(t0.Params(), true))
tfn.Rlist.Set(structargs(t0.Results(), false))
xfunc := dclfunc(sym, tfn)
xfunc.Func.SetDupok(true)
xfunc.Func.SetNeedctxt(true)
tfn.Type.SetPkg(t0.Pkg())
// Declare and initialize variable holding receiver.
cv := nod(OCLOSUREVAR, nil, nil)
cv.Type = rcvrtype
cv.Xoffset = Rnd(int64(Widthptr), int64(cv.Type.Align))
ptr := newname(lookup(".this"))
declare(ptr, PAUTO)
ptr.Name.SetUsed(true)
var body []*Node
if rcvrtype.IsPtr() || rcvrtype.IsInterface() {
ptr.Type = rcvrtype
body = append(body, nod(OAS, ptr, cv))
} else {
ptr.Type = types.NewPtr(rcvrtype)
body = append(body, nod(OAS, ptr, nod(OADDR, cv, nil)))
}
call := nod(OCALL, nodSym(OXDOT, ptr, meth), nil)
call.List.Set(paramNnames(tfn.Type))
call.SetIsDDD(tfn.Type.IsVariadic())
if t0.NumResults() != 0 {
n := nod(ORETURN, nil, nil)
n.List.Set1(call)
call = n
}
body = append(body, call)
xfunc.Nbody.Set(body)
funcbody()
xfunc = typecheck(xfunc, ctxStmt)
// Need to typecheck the body of the just-generated wrapper.
// typecheckslice() requires that Curfn is set when processing an ORETURN.
Curfn = xfunc
typecheckslice(xfunc.Nbody.Slice(), ctxStmt)
sym.Def = asTypesNode(xfunc)
xtop = append(xtop, xfunc)
Curfn = savecurfn
lineno = saveLineNo
return xfunc
}
// partialCallType returns the struct type used to hold all the information
// needed in the closure for n (n must be a OCALLPART node).
// The address of a variable of the returned type can be cast to a func.
func partialCallType(n *Node) *types.Type {
t := tostruct([]*Node{
namedfield("F", types.Types[TUINTPTR]),
namedfield("R", n.Left.Type),
})
t.SetNoalg(true)
return t
}
func walkpartialcall(n *Node, init *Nodes) *Node {
// Create closure in the form of a composite literal.
// For x.M with receiver (x) type T, the generated code looks like:
//
// clos = &struct{F uintptr; R T}{T.M·f, x}
//
// Like walkclosure above.
if n.Left.Type.IsInterface() {
// Trigger panic for method on nil interface now.
// Otherwise it happens in the wrapper and is confusing.
n.Left = cheapexpr(n.Left, init)
n.Left = walkexpr(n.Left, nil)
tab := nod(OITAB, n.Left, nil)
tab = typecheck(tab, ctxExpr)
c := nod(OCHECKNIL, tab, nil)
c.SetTypecheck(1)
init.Append(c)
}
typ := partialCallType(n)
clos := nod(OCOMPLIT, nil, typenod(typ))
clos.Esc = n.Esc
clos.List.Set2(nod(OCFUNC, n.Func.Nname, nil), n.Left)
clos = nod(OADDR, clos, nil)
clos.Esc = n.Esc
// Force type conversion from *struct to the func type.
clos = convnop(clos, n.Type)
// non-escaping temp to use, if any.
if x := prealloc[n]; x != nil {
if !types.Identical(typ, x.Type) {
panic("partial call type does not match order's assigned type")
}
clos.Left.Right = x
delete(prealloc, n)
}
return walkexpr(clos, init)
}
// callpartMethod returns the *types.Field representing the method
// referenced by method value n.
func callpartMethod(n *Node) *types.Field {
if n.Op != OCALLPART {
Fatalf("expected OCALLPART, got %v", n)
}
// TODO(mdempsky): Optimize this. If necessary,
// makepartialcall could save m for us somewhere.
var m *types.Field
if lookdot0(n.Right.Sym, n.Left.Type, &m, false) != 1 {
Fatalf("failed to find field for OCALLPART")
}
return m
}

View File

@@ -1,150 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"internal/race"
"math/rand"
"sort"
"sync"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/liveness"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/compile/internal/walk"
)
// "Portable" code generation.
var (
compilequeue []*ir.Func // functions waiting to be compiled
)
func enqueueFunc(fn *ir.Func) {
if ir.CurFunc != nil {
base.FatalfAt(fn.Pos(), "enqueueFunc %v inside %v", fn, ir.CurFunc)
}
if ir.FuncName(fn) == "_" {
// Skip compiling blank functions.
// Frontend already reported any spec-mandated errors (#29870).
return
}
if clo := fn.OClosure; clo != nil && !ir.IsTrivialClosure(clo) {
return // we'll get this as part of its enclosing function
}
if len(fn.Body) == 0 {
// Initialize ABI wrappers if necessary.
ssagen.InitLSym(fn, false)
types.CalcSize(fn.Type()) // TODO register args; remove this once all is done by abiutils
a := ssagen.AbiForFunc(fn)
a.ABIAnalyze(fn.Type()) // will set parameter spill/home locations correctly
liveness.WriteFuncMap(fn)
return
}
errorsBefore := base.Errors()
todo := []*ir.Func{fn}
for len(todo) > 0 {
next := todo[len(todo)-1]
todo = todo[:len(todo)-1]
prepareFunc(next)
todo = append(todo, next.Closures...)
}
if base.Errors() > errorsBefore {
return
}
// Enqueue just fn itself. compileFunctions will handle
// scheduling compilation of its closures after it's done.
compilequeue = append(compilequeue, fn)
}
// prepareFunc handles any remaining frontend compilation tasks that
// aren't yet safe to perform concurrently.
func prepareFunc(fn *ir.Func) {
// Set up the function's LSym early to avoid data races with the assemblers.
// Do this before walk, as walk needs the LSym to set attributes/relocations
// (e.g. in MarkTypeUsedInInterface).
ssagen.InitLSym(fn, true)
// Calculate parameter offsets.
types.CalcSize(fn.Type())
typecheck.DeclContext = ir.PAUTO
ir.CurFunc = fn
walk.Walk(fn)
ir.CurFunc = nil // enforce no further uses of CurFunc
typecheck.DeclContext = ir.PEXTERN
}
// compileFunctions compiles all functions in compilequeue.
// It fans out nBackendWorkers to do the work
// and waits for them to complete.
func compileFunctions() {
if len(compilequeue) == 0 {
return
}
if race.Enabled {
// Randomize compilation order to try to shake out races.
tmp := make([]*ir.Func, len(compilequeue))
perm := rand.Perm(len(compilequeue))
for i, v := range perm {
tmp[v] = compilequeue[i]
}
copy(compilequeue, tmp)
} else {
// Compile the longest functions first,
// since they're most likely to be the slowest.
// This helps avoid stragglers.
sort.Slice(compilequeue, func(i, j int) bool {
return len(compilequeue[i].Body) > len(compilequeue[j].Body)
})
}
// We queue up a goroutine per function that needs to be
// compiled, but require them to grab an available worker ID
// before doing any substantial work to limit parallelism.
workerIDs := make(chan int, base.Flag.LowerC)
for i := 0; i < base.Flag.LowerC; i++ {
workerIDs <- i
}
var wg sync.WaitGroup
var asyncCompile func(*ir.Func)
asyncCompile = func(fn *ir.Func) {
wg.Add(1)
go func() {
worker := <-workerIDs
ssagen.Compile(fn, worker)
workerIDs <- worker
// Done compiling fn. Schedule it's closures for compilation.
for _, closure := range fn.Closures {
asyncCompile(closure)
}
wg.Done()
}()
}
types.CalcSizeDisabled = true // not safe to calculate sizes concurrently
base.Ctxt.InParallel = true
for _, fn := range compilequeue {
asyncCompile(fn)
}
compilequeue = nil
wg.Wait()
base.Ctxt.InParallel = false
types.CalcSizeDisabled = false
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +1,7 @@
// run
// Code generated by gen/constFoldGen.go. DO NOT EDIT.
package test
package gc
import "testing"

File diff suppressed because it is too large Load Diff

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package test
package gc
import (
"internal/testenv"
@@ -18,13 +18,8 @@ func TestDeps(t *testing.T) {
}
for _, dep := range strings.Fields(strings.Trim(string(out), "[]")) {
switch dep {
case "go/build", "go/scanner":
// cmd/compile/internal/importer introduces a dependency
// on go/build and go/token; cmd/compile/internal/ uses
// go/constant which uses go/token in its API. Once we
// got rid of those dependencies, enable this check again.
// TODO(gri) fix this
// t.Errorf("undesired dependency on %q", dep)
case "go/build", "go/token":
t.Errorf("undesired dependency on %q", dep)
}
}
}

View File

@@ -6,23 +6,21 @@
// for debugging purposes. The code is customized for Node graphs
// and may be used for an alternative view of the node structure.
package ir
package gc
import (
"cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
"io"
"os"
"reflect"
"regexp"
"cmd/compile/internal/base"
"cmd/compile/internal/types"
"cmd/internal/src"
)
// dump is like fdump but prints to stderr.
func DumpAny(root interface{}, filter string, depth int) {
FDumpAny(os.Stderr, root, filter, depth)
func dump(root interface{}, filter string, depth int) {
fdump(os.Stderr, root, filter, depth)
}
// fdump prints the structure of a rooted data structure
@@ -42,7 +40,7 @@ func DumpAny(root interface{}, filter string, depth int) {
// rather than their type; struct fields with zero values or
// non-matching field names are omitted, and "…" means recursion
// depth has been reached or struct fields have been omitted.
func FDumpAny(w io.Writer, root interface{}, filter string, depth int) {
func fdump(w io.Writer, root interface{}, filter string, depth int) {
if root == nil {
fmt.Fprintln(w, "nil")
return
@@ -140,9 +138,19 @@ func (p *dumper) dump(x reflect.Value, depth int) {
return
}
if pos, ok := x.Interface().(src.XPos); ok {
p.printf("%s", base.FmtPos(pos))
// special cases
switch v := x.Interface().(type) {
case Nodes:
// unpack Nodes since reflect cannot look inside
// due to the unexported field in its struct
x = reflect.ValueOf(v.Slice())
case src.XPos:
p.printf("%s", linestr(v))
return
case *types.Node:
x = reflect.ValueOf(asNode(v))
}
switch x.Kind() {
@@ -195,7 +203,7 @@ func (p *dumper) dump(x reflect.Value, depth int) {
isNode := false
if n, ok := x.Interface().(Node); ok {
isNode = true
p.printf("%s %s {", n.Op().String(), p.addr(x))
p.printf("%s %s {", n.Op.String(), p.addr(x))
} else {
p.printf("%s {", typ)
}
@@ -222,7 +230,7 @@ func (p *dumper) dump(x reflect.Value, depth int) {
omitted = true
continue // exclude zero-valued fields
}
if n, ok := x.Interface().(Nodes); ok && len(n) == 0 {
if n, ok := x.Interface().(Nodes); ok && n.Len() == 0 {
omitted = true
continue // exclude empty Nodes slices
}

View File

@@ -2,17 +2,14 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package dwarfgen
package gc
import (
"fmt"
"strings"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/internal/dwarf"
"cmd/internal/obj"
"cmd/internal/src"
"fmt"
"strings"
)
// To identify variables by original source position.
@@ -29,8 +26,8 @@ type varPos struct {
func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls {
var inlcalls dwarf.InlCalls
if base.Debug.DwarfInl != 0 {
base.Ctxt.Logf("assembling DWARF inlined routine info for %v\n", fnsym.Name)
if Debug_gendwarfinl != 0 {
Ctxt.Logf("assembling DWARF inlined routine info for %v\n", fnsym.Name)
}
// This maps inline index (from Ctxt.InlTree) to index in inlcalls.Calls
@@ -109,7 +106,7 @@ func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls {
}
m = makePreinlineDclMap(fnsym)
} else {
ifnlsym := base.Ctxt.InlTree.InlinedFunction(int(ii - 1))
ifnlsym := Ctxt.InlTree.InlinedFunction(int(ii - 1))
m = makePreinlineDclMap(ifnlsym)
}
@@ -184,7 +181,7 @@ func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls {
}
// Debugging
if base.Debug.DwarfInl != 0 {
if Debug_gendwarfinl != 0 {
dumpInlCalls(inlcalls)
dumpInlVars(dwVars)
}
@@ -207,17 +204,16 @@ func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls {
// late in the compilation when it is determined that we need an
// abstract function DIE for an inlined routine imported from a
// previously compiled package.
func AbstractFunc(fn *obj.LSym) {
ifn := base.Ctxt.DwFixups.GetPrecursorFunc(fn)
func genAbstractFunc(fn *obj.LSym) {
ifn := Ctxt.DwFixups.GetPrecursorFunc(fn)
if ifn == nil {
base.Ctxt.Diag("failed to locate precursor fn for %v", fn)
Ctxt.Diag("failed to locate precursor fn for %v", fn)
return
}
_ = ifn.(*ir.Func)
if base.Debug.DwarfInl != 0 {
base.Ctxt.Logf("DwarfAbstractFunc(%v)\n", fn.Name)
if Debug_gendwarfinl != 0 {
Ctxt.Logf("DwarfAbstractFunc(%v)\n", fn.Name)
}
base.Ctxt.DwarfAbstractFunc(ifn, fn, base.Ctxt.Pkgpath)
Ctxt.DwarfAbstractFunc(ifn, fn, myimportpath)
}
// Undo any versioning performed when a name was written
@@ -239,9 +235,9 @@ func makePreinlineDclMap(fnsym *obj.LSym) map[varPos]int {
dcl := preInliningDcls(fnsym)
m := make(map[varPos]int)
for i, n := range dcl {
pos := base.Ctxt.InnermostPos(n.Pos())
pos := Ctxt.InnermostPos(n.Pos)
vp := varPos{
DeclName: unversion(n.Sym().Name),
DeclName: unversion(n.Sym.Name),
DeclFile: pos.RelFilename(),
DeclLine: pos.RelLine(),
DeclCol: pos.Col(),
@@ -265,17 +261,17 @@ func insertInlCall(dwcalls *dwarf.InlCalls, inlIdx int, imap map[int]int) int {
// is one. We do this first so that parents appear before their
// children in the resulting table.
parCallIdx := -1
parInlIdx := base.Ctxt.InlTree.Parent(inlIdx)
parInlIdx := Ctxt.InlTree.Parent(inlIdx)
if parInlIdx >= 0 {
parCallIdx = insertInlCall(dwcalls, parInlIdx, imap)
}
// Create new entry for this inline
inlinedFn := base.Ctxt.InlTree.InlinedFunction(inlIdx)
callXPos := base.Ctxt.InlTree.CallPos(inlIdx)
absFnSym := base.Ctxt.DwFixups.AbsFuncDwarfSym(inlinedFn)
pb := base.Ctxt.PosTable.Pos(callXPos).Base()
callFileSym := base.Ctxt.Lookup(pb.SymFilename())
inlinedFn := Ctxt.InlTree.InlinedFunction(inlIdx)
callXPos := Ctxt.InlTree.CallPos(inlIdx)
absFnSym := Ctxt.DwFixups.AbsFuncDwarfSym(inlinedFn)
pb := Ctxt.PosTable.Pos(callXPos).Base()
callFileSym := Ctxt.Lookup(pb.SymFilename())
ic := dwarf.InlCall{
InlIndex: inlIdx,
CallFile: callFileSym,
@@ -303,7 +299,7 @@ func insertInlCall(dwcalls *dwarf.InlCalls, inlIdx int, imap map[int]int) int {
// the index for a node from the inlined body of D will refer to the
// call to D from C. Whew.
func posInlIndex(xpos src.XPos) int {
pos := base.Ctxt.PosTable.Pos(xpos)
pos := Ctxt.PosTable.Pos(xpos)
if b := pos.Base(); b != nil {
ii := b.InliningIndex()
if ii >= 0 {
@@ -329,7 +325,7 @@ func addRange(calls []dwarf.InlCall, start, end int64, ii int, imap map[int]int)
// Append range to correct inlined call
callIdx, found := imap[ii]
if !found {
base.Fatalf("can't find inlIndex %d in imap for prog at %d\n", ii, start)
Fatalf("can't find inlIndex %d in imap for prog at %d\n", ii, start)
}
call := &calls[callIdx]
call.Ranges = append(call.Ranges, dwarf.Range{Start: start, End: end})
@@ -337,23 +333,23 @@ func addRange(calls []dwarf.InlCall, start, end int64, ii int, imap map[int]int)
func dumpInlCall(inlcalls dwarf.InlCalls, idx, ilevel int) {
for i := 0; i < ilevel; i++ {
base.Ctxt.Logf(" ")
Ctxt.Logf(" ")
}
ic := inlcalls.Calls[idx]
callee := base.Ctxt.InlTree.InlinedFunction(ic.InlIndex)
base.Ctxt.Logf(" %d: II:%d (%s) V: (", idx, ic.InlIndex, callee.Name)
callee := Ctxt.InlTree.InlinedFunction(ic.InlIndex)
Ctxt.Logf(" %d: II:%d (%s) V: (", idx, ic.InlIndex, callee.Name)
for _, f := range ic.InlVars {
base.Ctxt.Logf(" %v", f.Name)
Ctxt.Logf(" %v", f.Name)
}
base.Ctxt.Logf(" ) C: (")
Ctxt.Logf(" ) C: (")
for _, k := range ic.Children {
base.Ctxt.Logf(" %v", k)
Ctxt.Logf(" %v", k)
}
base.Ctxt.Logf(" ) R:")
Ctxt.Logf(" ) R:")
for _, r := range ic.Ranges {
base.Ctxt.Logf(" [%d,%d)", r.Start, r.End)
Ctxt.Logf(" [%d,%d)", r.Start, r.End)
}
base.Ctxt.Logf("\n")
Ctxt.Logf("\n")
for _, k := range ic.Children {
dumpInlCall(inlcalls, k, ilevel+1)
}
@@ -378,7 +374,7 @@ func dumpInlVars(dwvars []*dwarf.Var) {
if dwv.IsInAbstract {
ia = 1
}
base.Ctxt.Logf("V%d: %s CI:%d II:%d IA:%d %s\n", i, dwv.Name, dwv.ChildIndex, dwv.InlIndex-1, ia, typ)
Ctxt.Logf("V%d: %s CI:%d II:%d IA:%d %s\n", i, dwv.Name, dwv.ChildIndex, dwv.InlIndex-1, ia, typ)
}
}
@@ -415,7 +411,7 @@ func checkInlCall(funcName string, inlCalls dwarf.InlCalls, funcSize int64, idx,
// Callee
ic := inlCalls.Calls[idx]
callee := base.Ctxt.InlTree.InlinedFunction(ic.InlIndex).Name
callee := Ctxt.InlTree.InlinedFunction(ic.InlIndex).Name
calleeRanges := ic.Ranges
// Caller
@@ -423,14 +419,14 @@ func checkInlCall(funcName string, inlCalls dwarf.InlCalls, funcSize int64, idx,
parentRanges := []dwarf.Range{dwarf.Range{Start: int64(0), End: funcSize}}
if parentIdx != -1 {
pic := inlCalls.Calls[parentIdx]
caller = base.Ctxt.InlTree.InlinedFunction(pic.InlIndex).Name
caller = Ctxt.InlTree.InlinedFunction(pic.InlIndex).Name
parentRanges = pic.Ranges
}
// Callee ranges contained in caller ranges?
c, m := rangesContainsAll(parentRanges, calleeRanges)
if !c {
base.Fatalf("** malformed inlined routine range in %s: caller %s callee %s II=%d %s\n", funcName, caller, callee, idx, m)
Fatalf("** malformed inlined routine range in %s: caller %s callee %s II=%d %s\n", funcName, caller, callee, idx, m)
}
// Now visit kids

View File

@@ -0,0 +1,256 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/syntax"
"cmd/compile/internal/types"
"cmd/internal/obj"
"encoding/json"
"io/ioutil"
"log"
"path"
"sort"
"strconv"
"strings"
)
var embedlist []*Node
var embedCfg struct {
Patterns map[string][]string
Files map[string]string
}
func readEmbedCfg(file string) {
data, err := ioutil.ReadFile(file)
if err != nil {
log.Fatalf("-embedcfg: %v", err)
}
if err := json.Unmarshal(data, &embedCfg); err != nil {
log.Fatalf("%s: %v", file, err)
}
if embedCfg.Patterns == nil {
log.Fatalf("%s: invalid embedcfg: missing Patterns", file)
}
if embedCfg.Files == nil {
log.Fatalf("%s: invalid embedcfg: missing Files", file)
}
}
const (
embedUnknown = iota
embedBytes
embedString
embedFiles
)
func varEmbed(p *noder, names []*Node, typ *Node, exprs []*Node, embeds []PragmaEmbed) {
haveEmbed := false
for _, decl := range p.file.DeclList {
imp, ok := decl.(*syntax.ImportDecl)
if !ok {
// imports always come first
break
}
path, _ := strconv.Unquote(imp.Path.Value)
if path == "embed" {
haveEmbed = true
break
}
}
pos := embeds[0].Pos
if !haveEmbed {
p.yyerrorpos(pos, "invalid go:embed: missing import \"embed\"")
return
}
if len(names) > 1 {
p.yyerrorpos(pos, "go:embed cannot apply to multiple vars")
return
}
if len(exprs) > 0 {
p.yyerrorpos(pos, "go:embed cannot apply to var with initializer")
return
}
if typ == nil {
// Should not happen, since len(exprs) == 0 now.
p.yyerrorpos(pos, "go:embed cannot apply to var without type")
return
}
if dclcontext != PEXTERN {
p.yyerrorpos(pos, "go:embed cannot apply to var inside func")
return
}
var list []irEmbed
for _, e := range embeds {
list = append(list, irEmbed{Pos: p.makeXPos(e.Pos), Patterns: e.Patterns})
}
v := names[0]
v.Name.Param.SetEmbedList(list)
embedlist = append(embedlist, v)
}
func embedFileList(v *Node, kind int) []string {
// Build list of files to store.
have := make(map[string]bool)
var list []string
for _, e := range v.Name.Param.EmbedList() {
for _, pattern := range e.Patterns {
files, ok := embedCfg.Patterns[pattern]
if !ok {
yyerrorl(e.Pos, "invalid go:embed: build system did not map pattern: %s", pattern)
}
for _, file := range files {
if embedCfg.Files[file] == "" {
yyerrorl(e.Pos, "invalid go:embed: build system did not map file: %s", file)
continue
}
if !have[file] {
have[file] = true
list = append(list, file)
}
if kind == embedFiles {
for dir := path.Dir(file); dir != "." && !have[dir]; dir = path.Dir(dir) {
have[dir] = true
list = append(list, dir+"/")
}
}
}
}
}
sort.Slice(list, func(i, j int) bool {
return embedFileLess(list[i], list[j])
})
if kind == embedString || kind == embedBytes {
if len(list) > 1 {
yyerrorl(v.Pos, "invalid go:embed: multiple files for type %v", v.Type)
return nil
}
}
return list
}
// embedKind determines the kind of embedding variable.
func embedKind(typ *types.Type) int {
if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == localpkg && myimportpath == "embed")) {
return embedFiles
}
if typ.Etype == types.TSTRING {
return embedString
}
if typ.Etype == types.TSLICE && typ.Elem().Etype == types.TUINT8 {
return embedBytes
}
return embedUnknown
}
func embedFileNameSplit(name string) (dir, elem string, isDir bool) {
if name[len(name)-1] == '/' {
isDir = true
name = name[:len(name)-1]
}
i := len(name) - 1
for i >= 0 && name[i] != '/' {
i--
}
if i < 0 {
return ".", name, isDir
}
return name[:i], name[i+1:], isDir
}
// embedFileLess implements the sort order for a list of embedded files.
// See the comment inside ../../../../embed/embed.go's Files struct for rationale.
func embedFileLess(x, y string) bool {
xdir, xelem, _ := embedFileNameSplit(x)
ydir, yelem, _ := embedFileNameSplit(y)
return xdir < ydir || xdir == ydir && xelem < yelem
}
func dumpembeds() {
for _, v := range embedlist {
initEmbed(v)
}
}
// initEmbed emits the init data for a //go:embed variable,
// which is either a string, a []byte, or an embed.FS.
func initEmbed(v *Node) {
commentPos := v.Name.Param.EmbedList()[0].Pos
if !langSupported(1, 16, localpkg) {
lno := lineno
lineno = commentPos
yyerrorv("go1.16", "go:embed")
lineno = lno
return
}
if embedCfg.Patterns == nil {
yyerrorl(commentPos, "invalid go:embed: build system did not supply embed configuration")
return
}
kind := embedKind(v.Type)
if kind == embedUnknown {
yyerrorl(v.Pos, "go:embed cannot apply to var of type %v", v.Type)
return
}
files := embedFileList(v, kind)
switch kind {
case embedString, embedBytes:
file := files[0]
fsym, size, err := fileStringSym(v.Pos, embedCfg.Files[file], kind == embedString, nil)
if err != nil {
yyerrorl(v.Pos, "embed %s: %v", file, err)
}
sym := v.Sym.Linksym()
off := 0
off = dsymptr(sym, off, fsym, 0) // data string
off = duintptr(sym, off, uint64(size)) // len
if kind == embedBytes {
duintptr(sym, off, uint64(size)) // cap for slice
}
case embedFiles:
slicedata := Ctxt.Lookup(`"".` + v.Sym.Name + `.files`)
off := 0
// []files pointed at by Files
off = dsymptr(slicedata, off, slicedata, 3*Widthptr) // []file, pointing just past slice
off = duintptr(slicedata, off, uint64(len(files)))
off = duintptr(slicedata, off, uint64(len(files)))
// embed/embed.go type file is:
// name string
// data string
// hash [16]byte
// Emit one of these per file in the set.
const hashSize = 16
hash := make([]byte, hashSize)
for _, file := range files {
off = dsymptr(slicedata, off, stringsym(v.Pos, file), 0) // file string
off = duintptr(slicedata, off, uint64(len(file)))
if strings.HasSuffix(file, "/") {
// entry for directory - no data
off = duintptr(slicedata, off, 0)
off = duintptr(slicedata, off, 0)
off += hashSize
} else {
fsym, size, err := fileStringSym(v.Pos, embedCfg.Files[file], true, hash)
if err != nil {
yyerrorl(v.Pos, "embed %s: %v", file, err)
}
off = dsymptr(slicedata, off, fsym, 0) // data string
off = duintptr(slicedata, off, uint64(size))
off = int(slicedata.WriteBytes(Ctxt, int64(off), hash))
}
}
ggloblsym(slicedata, int32(off), obj.RODATA|obj.LOCAL)
sym := v.Sym.Linksym()
dsymptr(sym, 0, slicedata, 0)
}
}

View File

@@ -0,0 +1,472 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/types"
"fmt"
)
func escapes(all []*Node) {
visitBottomUp(all, escapeFuncs)
}
const (
EscFuncUnknown = 0 + iota
EscFuncPlanned
EscFuncStarted
EscFuncTagged
)
func min8(a, b int8) int8 {
if a < b {
return a
}
return b
}
func max8(a, b int8) int8 {
if a > b {
return a
}
return b
}
const (
EscUnknown = iota
EscNone // Does not escape to heap, result, or parameters.
EscHeap // Reachable from the heap
EscNever // By construction will not escape.
)
// funcSym returns fn.Func.Nname.Sym if no nils are encountered along the way.
func funcSym(fn *Node) *types.Sym {
if fn == nil || fn.Func.Nname == nil {
return nil
}
return fn.Func.Nname.Sym
}
// Mark labels that have no backjumps to them as not increasing e.loopdepth.
// Walk hasn't generated (goto|label).Left.Sym.Label yet, so we'll cheat
// and set it to one of the following two. Then in esc we'll clear it again.
var (
looping Node
nonlooping Node
)
func isSliceSelfAssign(dst, src *Node) bool {
// Detect the following special case.
//
// func (b *Buffer) Foo() {
// n, m := ...
// b.buf = b.buf[n:m]
// }
//
// This assignment is a no-op for escape analysis,
// it does not store any new pointers into b that were not already there.
// However, without this special case b will escape, because we assign to OIND/ODOTPTR.
// Here we assume that the statement will not contain calls,
// that is, that order will move any calls to init.
// Otherwise base ONAME value could change between the moments
// when we evaluate it for dst and for src.
// dst is ONAME dereference.
if dst.Op != ODEREF && dst.Op != ODOTPTR || dst.Left.Op != ONAME {
return false
}
// src is a slice operation.
switch src.Op {
case OSLICE, OSLICE3, OSLICESTR:
// OK.
case OSLICEARR, OSLICE3ARR:
// Since arrays are embedded into containing object,
// slice of non-pointer array will introduce a new pointer into b that was not already there
// (pointer to b itself). After such assignment, if b contents escape,
// b escapes as well. If we ignore such OSLICEARR, we will conclude
// that b does not escape when b contents do.
//
// Pointer to an array is OK since it's not stored inside b directly.
// For slicing an array (not pointer to array), there is an implicit OADDR.
// We check that to determine non-pointer array slicing.
if src.Left.Op == OADDR {
return false
}
default:
return false
}
// slice is applied to ONAME dereference.
if src.Left.Op != ODEREF && src.Left.Op != ODOTPTR || src.Left.Left.Op != ONAME {
return false
}
// dst and src reference the same base ONAME.
return dst.Left == src.Left.Left
}
// isSelfAssign reports whether assignment from src to dst can
// be ignored by the escape analysis as it's effectively a self-assignment.
func isSelfAssign(dst, src *Node) bool {
if isSliceSelfAssign(dst, src) {
return true
}
// Detect trivial assignments that assign back to the same object.
//
// It covers these cases:
// val.x = val.y
// val.x[i] = val.y[j]
// val.x1.x2 = val.x1.y2
// ... etc
//
// These assignments do not change assigned object lifetime.
if dst == nil || src == nil || dst.Op != src.Op {
return false
}
switch dst.Op {
case ODOT, ODOTPTR:
// Safe trailing accessors that are permitted to differ.
case OINDEX:
if mayAffectMemory(dst.Right) || mayAffectMemory(src.Right) {
return false
}
default:
return false
}
// The expression prefix must be both "safe" and identical.
return samesafeexpr(dst.Left, src.Left)
}
// mayAffectMemory reports whether evaluation of n may affect the program's
// memory state. If the expression can't affect memory state, then it can be
// safely ignored by the escape analysis.
func mayAffectMemory(n *Node) bool {
// We may want to use a list of "memory safe" ops instead of generally
// "side-effect free", which would include all calls and other ops that can
// allocate or change global state. For now, it's safer to start with the latter.
//
// We're ignoring things like division by zero, index out of range,
// and nil pointer dereference here.
switch n.Op {
case ONAME, OCLOSUREVAR, OLITERAL:
return false
// Left+Right group.
case OINDEX, OADD, OSUB, OOR, OXOR, OMUL, OLSH, ORSH, OAND, OANDNOT, ODIV, OMOD:
return mayAffectMemory(n.Left) || mayAffectMemory(n.Right)
// Left group.
case ODOT, ODOTPTR, ODEREF, OCONVNOP, OCONV, OLEN, OCAP,
ONOT, OBITNOT, OPLUS, ONEG, OALIGNOF, OOFFSETOF, OSIZEOF:
return mayAffectMemory(n.Left)
default:
return true
}
}
// heapAllocReason returns the reason the given Node must be heap
// allocated, or the empty string if it doesn't.
func heapAllocReason(n *Node) string {
if n.Type == nil {
return ""
}
// Parameters are always passed via the stack.
if n.Op == ONAME && (n.Class() == PPARAM || n.Class() == PPARAMOUT) {
return ""
}
if n.Type.Width > maxStackVarSize {
return "too large for stack"
}
if (n.Op == ONEW || n.Op == OPTRLIT) && n.Type.Elem().Width >= maxImplicitStackVarSize {
return "too large for stack"
}
if n.Op == OCLOSURE && closureType(n).Size() >= maxImplicitStackVarSize {
return "too large for stack"
}
if n.Op == OCALLPART && partialCallType(n).Size() >= maxImplicitStackVarSize {
return "too large for stack"
}
if n.Op == OMAKESLICE {
r := n.Right
if r == nil {
r = n.Left
}
if !smallintconst(r) {
return "non-constant size"
}
if t := n.Type; t.Elem().Width != 0 && r.Int64Val() >= maxImplicitStackVarSize/t.Elem().Width {
return "too large for stack"
}
}
return ""
}
// addrescapes tags node n as having had its address taken
// by "increasing" the "value" of n.Esc to EscHeap.
// Storage is allocated as necessary to allow the address
// to be taken.
func addrescapes(n *Node) {
switch n.Op {
default:
// Unexpected Op, probably due to a previous type error. Ignore.
case ODEREF, ODOTPTR:
// Nothing to do.
case ONAME:
if n == nodfp {
break
}
// if this is a tmpname (PAUTO), it was tagged by tmpname as not escaping.
// on PPARAM it means something different.
if n.Class() == PAUTO && n.Esc == EscNever {
break
}
// If a closure reference escapes, mark the outer variable as escaping.
if n.Name.IsClosureVar() {
addrescapes(n.Name.Defn)
break
}
if n.Class() != PPARAM && n.Class() != PPARAMOUT && n.Class() != PAUTO {
break
}
// This is a plain parameter or local variable that needs to move to the heap,
// but possibly for the function outside the one we're compiling.
// That is, if we have:
//
// func f(x int) {
// func() {
// global = &x
// }
// }
//
// then we're analyzing the inner closure but we need to move x to the
// heap in f, not in the inner closure. Flip over to f before calling moveToHeap.
oldfn := Curfn
Curfn = n.Name.Curfn
if Curfn.Func.Closure != nil && Curfn.Op == OCLOSURE {
Curfn = Curfn.Func.Closure
}
ln := lineno
lineno = Curfn.Pos
moveToHeap(n)
Curfn = oldfn
lineno = ln
// ODOTPTR has already been introduced,
// so these are the non-pointer ODOT and OINDEX.
// In &x[0], if x is a slice, then x does not
// escape--the pointer inside x does, but that
// is always a heap pointer anyway.
case ODOT, OINDEX, OPAREN, OCONVNOP:
if !n.Left.Type.IsSlice() {
addrescapes(n.Left)
}
}
}
// moveToHeap records the parameter or local variable n as moved to the heap.
func moveToHeap(n *Node) {
if Debug.r != 0 {
Dump("MOVE", n)
}
if compiling_runtime {
yyerror("%v escapes to heap, not allowed in runtime", n)
}
if n.Class() == PAUTOHEAP {
Dump("n", n)
Fatalf("double move to heap")
}
// Allocate a local stack variable to hold the pointer to the heap copy.
// temp will add it to the function declaration list automatically.
heapaddr := temp(types.NewPtr(n.Type))
heapaddr.Sym = lookup("&" + n.Sym.Name)
heapaddr.Orig.Sym = heapaddr.Sym
heapaddr.Pos = n.Pos
// Unset AutoTemp to persist the &foo variable name through SSA to
// liveness analysis.
// TODO(mdempsky/drchase): Cleaner solution?
heapaddr.Name.SetAutoTemp(false)
// Parameters have a local stack copy used at function start/end
// in addition to the copy in the heap that may live longer than
// the function.
if n.Class() == PPARAM || n.Class() == PPARAMOUT {
if n.Xoffset == BADWIDTH {
Fatalf("addrescapes before param assignment")
}
// We rewrite n below to be a heap variable (indirection of heapaddr).
// Preserve a copy so we can still write code referring to the original,
// and substitute that copy into the function declaration list
// so that analyses of the local (on-stack) variables use it.
stackcopy := newname(n.Sym)
stackcopy.Type = n.Type
stackcopy.Xoffset = n.Xoffset
stackcopy.SetClass(n.Class())
stackcopy.Name.Param.Heapaddr = heapaddr
if n.Class() == PPARAMOUT {
// Make sure the pointer to the heap copy is kept live throughout the function.
// The function could panic at any point, and then a defer could recover.
// Thus, we need the pointer to the heap copy always available so the
// post-deferreturn code can copy the return value back to the stack.
// See issue 16095.
heapaddr.Name.SetIsOutputParamHeapAddr(true)
}
n.Name.Param.Stackcopy = stackcopy
// Substitute the stackcopy into the function variable list so that
// liveness and other analyses use the underlying stack slot
// and not the now-pseudo-variable n.
found := false
for i, d := range Curfn.Func.Dcl {
if d == n {
Curfn.Func.Dcl[i] = stackcopy
found = true
break
}
// Parameters are before locals, so can stop early.
// This limits the search even in functions with many local variables.
if d.Class() == PAUTO {
break
}
}
if !found {
Fatalf("cannot find %v in local variable list", n)
}
Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
}
// Modify n in place so that uses of n now mean indirection of the heapaddr.
n.SetClass(PAUTOHEAP)
n.Xoffset = 0
n.Name.Param.Heapaddr = heapaddr
n.Esc = EscHeap
if Debug.m != 0 {
Warnl(n.Pos, "moved to heap: %v", n)
}
}
// This special tag is applied to uintptr variables
// that we believe may hold unsafe.Pointers for
// calls into assembly functions.
const unsafeUintptrTag = "unsafe-uintptr"
// This special tag is applied to uintptr parameters of functions
// marked go:uintptrescapes.
const uintptrEscapesTag = "uintptr-escapes"
func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string {
name := func() string {
if f.Sym != nil {
return f.Sym.Name
}
return fmt.Sprintf("arg#%d", narg)
}
if fn.Nbody.Len() == 0 {
// Assume that uintptr arguments must be held live across the call.
// This is most important for syscall.Syscall.
// See golang.org/issue/13372.
// This really doesn't have much to do with escape analysis per se,
// but we are reusing the ability to annotate an individual function
// argument and pass those annotations along to importing code.
if f.Type.IsUintptr() {
if Debug.m != 0 {
Warnl(f.Pos, "assuming %v is unsafe uintptr", name())
}
return unsafeUintptrTag
}
if !f.Type.HasPointers() { // don't bother tagging for scalars
return ""
}
var esc EscLeaks
// External functions are assumed unsafe, unless
// //go:noescape is given before the declaration.
if fn.Func.Pragma&Noescape != 0 {
if Debug.m != 0 && f.Sym != nil {
Warnl(f.Pos, "%v does not escape", name())
}
} else {
if Debug.m != 0 && f.Sym != nil {
Warnl(f.Pos, "leaking param: %v", name())
}
esc.AddHeap(0)
}
return esc.Encode()
}
if fn.Func.Pragma&UintptrEscapes != 0 {
if f.Type.IsUintptr() {
if Debug.m != 0 {
Warnl(f.Pos, "marking %v as escaping uintptr", name())
}
return uintptrEscapesTag
}
if f.IsDDD() && f.Type.Elem().IsUintptr() {
// final argument is ...uintptr.
if Debug.m != 0 {
Warnl(f.Pos, "marking %v as escaping ...uintptr", name())
}
return uintptrEscapesTag
}
}
if !f.Type.HasPointers() { // don't bother tagging for scalars
return ""
}
// Unnamed parameters are unused and therefore do not escape.
if f.Sym == nil || f.Sym.IsBlank() {
var esc EscLeaks
return esc.Encode()
}
n := asNode(f.Nname)
loc := e.oldLoc(n)
esc := loc.paramEsc
esc.Optimize()
if Debug.m != 0 && !loc.escapes {
if esc.Empty() {
Warnl(f.Pos, "%v does not escape", name())
}
if x := esc.Heap(); x >= 0 {
if x == 0 {
Warnl(f.Pos, "leaking param: %v", name())
} else {
// TODO(mdempsky): Mention level=x like below?
Warnl(f.Pos, "leaking param content: %v", name())
}
}
for i := 0; i < numEscResults; i++ {
if x := esc.Result(i); x >= 0 {
res := fn.Type.Results().Field(i).Sym
Warnl(f.Pos, "leaking param: %v to result %v level=%d", name(), res, x)
}
}
}
return esc.Encode()
}

File diff suppressed because it is too large Load Diff

View File

@@ -5,73 +5,225 @@
package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/inline"
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/bio"
"cmd/internal/src"
"fmt"
"go/constant"
)
var (
Debug_export int // if set, print debugging information about export data
)
func exportf(bout *bio.Writer, format string, args ...interface{}) {
fmt.Fprintf(bout, format, args...)
if base.Debug.Export != 0 {
if Debug_export != 0 {
fmt.Printf(format, args...)
}
}
func dumpexport(bout *bio.Writer) {
p := &exporter{marked: make(map[*types.Type]bool)}
for _, n := range typecheck.Target.Exports {
// Must catch it here rather than Export(), because the type can be
// not fully set (still TFORW) when Export() is called.
if n.Type() != nil && n.Type().HasTParam() {
base.Fatalf("Cannot (yet) export a generic type: %v", n)
}
p.markObject(n)
var asmlist []*Node
// exportsym marks n for export (or reexport).
func exportsym(n *Node) {
if n.Sym.OnExportList() {
return
}
n.Sym.SetOnExportList(true)
if Debug.E != 0 {
fmt.Printf("export symbol %v\n", n.Sym)
}
exportlist = append(exportlist, n)
}
func initname(s string) bool {
return s == "init"
}
func autoexport(n *Node, ctxt Class) {
if n.Sym.Pkg != localpkg {
return
}
if (ctxt != PEXTERN && ctxt != PFUNC) || dclcontext != PEXTERN {
return
}
if n.Type != nil && n.Type.IsKind(TFUNC) && n.IsMethod() {
return
}
if types.IsExported(n.Sym.Name) || initname(n.Sym.Name) {
exportsym(n)
}
if asmhdr != "" && !n.Sym.Asm() {
n.Sym.SetAsm(true)
asmlist = append(asmlist, n)
}
}
func dumpexport(bout *bio.Writer) {
// The linker also looks for the $$ marker - use char after $$ to distinguish format.
exportf(bout, "\n$$B\n") // indicate binary export format
off := bout.Offset()
typecheck.WriteExports(bout.Writer)
iexport(bout.Writer)
size := bout.Offset() - off
exportf(bout, "\n$$\n")
if base.Debug.Export != 0 {
fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", base.Ctxt.Pkgpath, size)
if Debug_export != 0 {
fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", myimportpath, size)
}
}
func importsym(ipkg *types.Pkg, s *types.Sym, op Op) *Node {
n := asNode(s.PkgDef())
if n == nil {
// iimport should have created a stub ONONAME
// declaration for all imported symbols. The exception
// is declarations for Runtimepkg, which are populated
// by loadsys instead.
if s.Pkg != Runtimepkg {
Fatalf("missing ONONAME for %v\n", s)
}
n = dclname(s)
s.SetPkgDef(asTypesNode(n))
s.Importdef = ipkg
}
if n.Op != ONONAME && n.Op != op {
redeclare(lineno, s, fmt.Sprintf("during import %q", ipkg.Path))
}
return n
}
// importtype returns the named type declared by symbol s.
// If no such type has been declared yet, a forward declaration is returned.
// ipkg is the package being imported
func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *types.Type {
n := importsym(ipkg, s, OTYPE)
if n.Op != OTYPE {
t := types.New(TFORW)
t.Sym = s
t.Nod = asTypesNode(n)
n.Op = OTYPE
n.Pos = pos
n.Type = t
n.SetClass(PEXTERN)
}
t := n.Type
if t == nil {
Fatalf("importtype %v", s)
}
return t
}
// importobj declares symbol s as an imported object representable by op.
// ipkg is the package being imported
func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op Op, ctxt Class, t *types.Type) *Node {
n := importsym(ipkg, s, op)
if n.Op != ONONAME {
if n.Op == op && (n.Class() != ctxt || !types.Identical(n.Type, t)) {
redeclare(lineno, s, fmt.Sprintf("during import %q", ipkg.Path))
}
return nil
}
n.Op = op
n.Pos = pos
n.SetClass(ctxt)
if ctxt == PFUNC {
n.Sym.SetFunc(true)
}
n.Type = t
return n
}
// importconst declares symbol s as an imported constant with type t and value val.
// ipkg is the package being imported
func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val Val) {
n := importobj(ipkg, pos, s, OLITERAL, PEXTERN, t)
if n == nil { // TODO: Check that value matches.
return
}
n.SetVal(val)
if Debug.E != 0 {
fmt.Printf("import const %v %L = %v\n", s, t, val)
}
}
// importfunc declares symbol s as an imported function with type t.
// ipkg is the package being imported
func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
n := importobj(ipkg, pos, s, ONAME, PFUNC, t)
if n == nil {
return
}
n.Func = new(Func)
t.SetNname(asTypesNode(n))
if Debug.E != 0 {
fmt.Printf("import func %v%S\n", s, t)
}
}
// importvar declares symbol s as an imported variable with type t.
// ipkg is the package being imported
func importvar(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
n := importobj(ipkg, pos, s, ONAME, PEXTERN, t)
if n == nil {
return
}
if Debug.E != 0 {
fmt.Printf("import var %v %L\n", s, t)
}
}
// importalias declares symbol s as an imported type alias with type t.
// ipkg is the package being imported
func importalias(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
n := importobj(ipkg, pos, s, OTYPE, PEXTERN, t)
if n == nil {
return
}
if Debug.E != 0 {
fmt.Printf("import type %v = %L\n", s, t)
}
}
func dumpasmhdr() {
b, err := bio.Create(base.Flag.AsmHdr)
b, err := bio.Create(asmhdr)
if err != nil {
base.Fatalf("%v", err)
Fatalf("%v", err)
}
fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", types.LocalPkg.Name)
for _, n := range typecheck.Target.Asms {
if n.Sym().IsBlank() {
fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", localpkg.Name)
for _, n := range asmlist {
if n.Sym.IsBlank() {
continue
}
switch n.Op() {
case ir.OLITERAL:
t := n.Val().Kind()
if t == constant.Float || t == constant.Complex {
switch n.Op {
case OLITERAL:
t := n.Val().Ctype()
if t == CTFLT || t == CTCPLX {
break
}
fmt.Fprintf(b, "#define const_%s %#v\n", n.Sym().Name, n.Val())
fmt.Fprintf(b, "#define const_%s %#v\n", n.Sym.Name, n.Val())
case ir.OTYPE:
t := n.Type()
case OTYPE:
t := n.Type
if !t.IsStruct() || t.StructType().Map != nil || t.IsFuncArgStruct() {
break
}
fmt.Fprintf(b, "#define %s__size %d\n", n.Sym().Name, int(t.Width))
fmt.Fprintf(b, "#define %s__size %d\n", n.Sym.Name, int(t.Width))
for _, f := range t.Fields().Slice() {
if !f.Sym.IsBlank() {
fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym().Name, f.Sym.Name, int(f.Offset))
fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym.Name, f.Sym.Name, int(f.Offset))
}
}
}
@@ -79,83 +231,3 @@ func dumpasmhdr() {
b.Close()
}
type exporter struct {
marked map[*types.Type]bool // types already seen by markType
}
// markObject visits a reachable object.
func (p *exporter) markObject(n ir.Node) {
if n.Op() == ir.ONAME {
n := n.(*ir.Name)
if n.Class == ir.PFUNC {
inline.Inline_Flood(n, typecheck.Export)
}
}
p.markType(n.Type())
}
// markType recursively visits types reachable from t to identify
// functions whose inline bodies may be needed.
func (p *exporter) markType(t *types.Type) {
if p.marked[t] {
return
}
p.marked[t] = true
// If this is a named type, mark all of its associated
// methods. Skip interface types because t.Methods contains
// only their unexpanded method set (i.e., exclusive of
// interface embeddings), and the switch statement below
// handles their full method set.
if t.Sym() != nil && t.Kind() != types.TINTER {
for _, m := range t.Methods().Slice() {
if types.IsExported(m.Sym.Name) {
p.markObject(ir.AsNode(m.Nname))
}
}
}
// Recursively mark any types that can be produced given a
// value of type t: dereferencing a pointer; indexing or
// iterating over an array, slice, or map; receiving from a
// channel; accessing a struct field or interface method; or
// calling a function.
//
// Notably, we don't mark function parameter types, because
// the user already needs some way to construct values of
// those types.
switch t.Kind() {
case types.TPTR, types.TARRAY, types.TSLICE:
p.markType(t.Elem())
case types.TCHAN:
if t.ChanDir().CanRecv() {
p.markType(t.Elem())
}
case types.TMAP:
p.markType(t.Key())
p.markType(t.Elem())
case types.TSTRUCT:
for _, f := range t.FieldSlice() {
if types.IsExported(f.Sym.Name) || f.Embedded != 0 {
p.markType(f.Type)
}
}
case types.TFUNC:
for _, f := range t.Results().FieldSlice() {
p.markType(f.Type)
}
case types.TINTER:
for _, f := range t.FieldSlice() {
if types.IsExported(f.Sym.Name) {
p.markType(f.Type)
}
}
}
}

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package test
package gc
import (
"internal/testenv"
@@ -75,7 +75,7 @@ func TestIssue16214(t *testing.T) {
cmd := exec.Command(testenv.GoToolPath(t), "tool", "compile", "-S", "-o", filepath.Join(dir, "out.o"), src)
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("go tool compile: %v\n%s", err, out)
t.Fatalf("fail to run go tool compile: %v", err)
}
if strings.Contains(string(out), "unknown line number") {

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package test
package gc
import (
"math"

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,86 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
"strconv"
)
// sysfunc looks up Go function name in package runtime. This function
// must follow the internal calling convention.
func sysfunc(name string) *obj.LSym {
s := Runtimepkg.Lookup(name)
s.SetFunc(true)
return s.Linksym()
}
// sysvar looks up a variable (or assembly function) name in package
// runtime. If this is a function, it may have a special calling
// convention.
func sysvar(name string) *obj.LSym {
return Runtimepkg.Lookup(name).Linksym()
}
// isParamStackCopy reports whether this is the on-stack copy of a
// function parameter that moved to the heap.
func (n *Node) isParamStackCopy() bool {
return n.Op == ONAME && (n.Class() == PPARAM || n.Class() == PPARAMOUT) && n.Name.Param.Heapaddr != nil
}
// isParamHeapCopy reports whether this is the on-heap copy of
// a function parameter that moved to the heap.
func (n *Node) isParamHeapCopy() bool {
return n.Op == ONAME && n.Class() == PAUTOHEAP && n.Name.Param.Stackcopy != nil
}
// autotmpname returns the name for an autotmp variable numbered n.
func autotmpname(n int) string {
// Give each tmp a different name so that they can be registerized.
// Add a preceding . to avoid clashing with legal names.
const prefix = ".autotmp_"
// Start with a buffer big enough to hold a large n.
b := []byte(prefix + " ")[:len(prefix)]
b = strconv.AppendInt(b, int64(n), 10)
return types.InternString(b)
}
// make a new Node off the books
func tempAt(pos src.XPos, curfn *Node, t *types.Type) *Node {
if curfn == nil {
Fatalf("no curfn for tempAt")
}
if curfn.Func.Closure != nil && curfn.Op == OCLOSURE {
Dump("tempAt", curfn)
Fatalf("adding tempAt to wrong closure function")
}
if t == nil {
Fatalf("tempAt called with nil type")
}
s := &types.Sym{
Name: autotmpname(len(curfn.Func.Dcl)),
Pkg: localpkg,
}
n := newnamel(pos, s)
s.Def = asTypesNode(n)
n.Type = t
n.SetClass(PAUTO)
n.Esc = EscNever
n.Name.Curfn = curfn
n.Name.SetUsed(true)
n.Name.SetAutoTemp(true)
curfn.Func.Dcl = append(curfn.Func.Dcl, n)
dowidth(t)
return n.Orig
}
func temp(t *types.Type) *Node {
return tempAt(lineno, Curfn, t)
}

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package test
package gc
import (
"bytes"
@@ -50,7 +50,7 @@ func main() {
cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", dst, src)
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("could not build target: %v\n%s", err, out)
t.Fatalf("could not build target: %v", err)
}
// Check destination to see if scanf code was included.
@@ -95,7 +95,7 @@ func main() {
cmd := exec.Command(testenv.GoToolPath(t), "build", "-gcflags", "-S", "-o", filepath.Join(dir, "test"), src)
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("could not build target: %v\n%s", err, out)
t.Fatalf("could not build target: %v", err)
}
patterns := []string{

View File

@@ -0,0 +1,349 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
"sync"
)
const (
BADWIDTH = types.BADWIDTH
)
var (
// maximum size variable which we will allocate on the stack.
// This limit is for explicit variable declarations like "var x T" or "x := ...".
// Note: the flag smallframes can update this value.
maxStackVarSize = int64(10 * 1024 * 1024)
// maximum size of implicit variables that we will allocate on the stack.
// p := new(T) allocating T on the stack
// p := &T{} allocating T on the stack
// s := make([]T, n) allocating [n]T on the stack
// s := []byte("...") allocating [n]byte on the stack
// Note: the flag smallframes can update this value.
maxImplicitStackVarSize = int64(64 * 1024)
// smallArrayBytes is the maximum size of an array which is considered small.
// Small arrays will be initialized directly with a sequence of constant stores.
// Large arrays will be initialized by copying from a static temp.
// 256 bytes was chosen to minimize generated code + statictmp size.
smallArrayBytes = int64(256)
)
// isRuntimePkg reports whether p is package runtime.
func isRuntimePkg(p *types.Pkg) bool {
if compiling_runtime && p == localpkg {
return true
}
return p.Path == "runtime"
}
// isReflectPkg reports whether p is package reflect.
func isReflectPkg(p *types.Pkg) bool {
if p == localpkg {
return myimportpath == "reflect"
}
return p.Path == "reflect"
}
// The Class of a variable/function describes the "storage class"
// of a variable or function. During parsing, storage classes are
// called declaration contexts.
type Class uint8
//go:generate stringer -type=Class
const (
Pxxx Class = iota // no class; used during ssa conversion to indicate pseudo-variables
PEXTERN // global variables
PAUTO // local variables
PAUTOHEAP // local variables or parameters moved to heap
PPARAM // input arguments
PPARAMOUT // output results
PFUNC // global functions
// Careful: Class is stored in three bits in Node.flags.
_ = uint((1 << 3) - iota) // static assert for iota <= (1 << 3)
)
// Slices in the runtime are represented by three components:
//
// type slice struct {
// ptr unsafe.Pointer
// len int
// cap int
// }
//
// Strings in the runtime are represented by two components:
//
// type string struct {
// ptr unsafe.Pointer
// len int
// }
//
// These variables are the offsets of fields and sizes of these structs.
var (
slicePtrOffset int64
sliceLenOffset int64
sliceCapOffset int64
sizeofSlice int64
sizeofString int64
)
var pragcgobuf [][]string
var outfile string
var linkobj string
// nerrors is the number of compiler errors reported
// since the last call to saveerrors.
var nerrors int
// nsavederrors is the total number of compiler errors
// reported before the last call to saveerrors.
var nsavederrors int
var nsyntaxerrors int
var decldepth int32
var nolocalimports bool
// gc debug flags
type DebugFlags struct {
P, B, C, E,
K, L, N, S,
W, e, h, j,
l, m, r, w int
}
var Debug DebugFlags
var debugstr string
var Debug_checknil int
var Debug_typeassert int
var localpkg *types.Pkg // package being compiled
var inimport bool // set during import
var itabpkg *types.Pkg // fake pkg for itab entries
var itablinkpkg *types.Pkg // fake package for runtime itab entries
var Runtimepkg *types.Pkg // fake package runtime
var racepkg *types.Pkg // package runtime/race
var msanpkg *types.Pkg // package runtime/msan
var unsafepkg *types.Pkg // package unsafe
var trackpkg *types.Pkg // fake package for field tracking
var mappkg *types.Pkg // fake package for map zero value
var gopkg *types.Pkg // pseudo-package for method symbols on anonymous receiver types
var zerosize int64
var myimportpath string
var localimport string
var asmhdr string
var simtype [NTYPE]types.EType
var (
isInt [NTYPE]bool
isFloat [NTYPE]bool
isComplex [NTYPE]bool
issimple [NTYPE]bool
)
var (
okforeq [NTYPE]bool
okforadd [NTYPE]bool
okforand [NTYPE]bool
okfornone [NTYPE]bool
okforcmp [NTYPE]bool
okforbool [NTYPE]bool
okforcap [NTYPE]bool
okforlen [NTYPE]bool
okforarith [NTYPE]bool
okforconst [NTYPE]bool
)
var (
okfor [OEND][]bool
iscmp [OEND]bool
)
var minintval [NTYPE]*Mpint
var maxintval [NTYPE]*Mpint
var minfltval [NTYPE]*Mpflt
var maxfltval [NTYPE]*Mpflt
var xtop []*Node
var exportlist []*Node
var importlist []*Node // imported functions and methods with inlinable bodies
var (
funcsymsmu sync.Mutex // protects funcsyms and associated package lookups (see func funcsym)
funcsyms []*types.Sym
)
var dclcontext Class // PEXTERN/PAUTO
var Curfn *Node
var Widthptr int
var Widthreg int
var nblank *Node
var typecheckok bool
var compiling_runtime bool
// Compiling the standard library
var compiling_std bool
var use_writebarrier bool
var pure_go bool
var flag_installsuffix string
var flag_race bool
var flag_msan bool
var flagDWARF bool
// Whether we are adding any sort of code instrumentation, such as
// when the race detector is enabled.
var instrumenting bool
// Whether we are tracking lexical scopes for DWARF.
var trackScopes bool
// Controls generation of DWARF inlined instance records. Zero
// disables, 1 emits inlined routines but suppresses var info,
// and 2 emits inlined routines with tracking of formals/locals.
var genDwarfInline int
var debuglive int
var Ctxt *obj.Link
var writearchive bool
var nodfp *Node
var disable_checknil int
var autogeneratedPos src.XPos
// interface to back end
type Arch struct {
LinkArch *obj.LinkArch
REGSP int
MAXWIDTH int64
SoftFloat bool
PadFrame func(int64) int64
// ZeroRange zeroes a range of memory on stack. It is only inserted
// at function entry, and it is ok to clobber registers.
ZeroRange func(*Progs, *obj.Prog, int64, int64, *uint32) *obj.Prog
Ginsnop func(*Progs) *obj.Prog
Ginsnopdefer func(*Progs) *obj.Prog // special ginsnop for deferreturn
// SSAMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
SSAMarkMoves func(*SSAGenState, *ssa.Block)
// SSAGenValue emits Prog(s) for the Value.
SSAGenValue func(*SSAGenState, *ssa.Value)
// SSAGenBlock emits end-of-block Progs. SSAGenValue should be called
// for all values in the block before SSAGenBlock.
SSAGenBlock func(s *SSAGenState, b, next *ssa.Block)
}
var thearch Arch
var (
staticuint64s,
zerobase *Node
assertE2I,
assertE2I2,
assertI2I,
assertI2I2,
deferproc,
deferprocStack,
Deferreturn,
Duffcopy,
Duffzero,
gcWriteBarrier,
goschedguarded,
growslice,
msanread,
msanwrite,
msanmove,
newobject,
newproc,
panicdivide,
panicshift,
panicdottypeE,
panicdottypeI,
panicnildottype,
panicoverflow,
raceread,
racereadrange,
racewrite,
racewriterange,
x86HasPOPCNT,
x86HasSSE41,
x86HasFMA,
armHasVFPv4,
arm64HasATOMICS,
typedmemclr,
typedmemmove,
Udiv,
writeBarrier,
zerobaseSym *obj.LSym
BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym
ExtendCheckFunc [ssa.BoundsKindCount]*obj.LSym
// Wasm
WasmMove,
WasmZero,
WasmDiv,
WasmTruncS,
WasmTruncU,
SigPanic *obj.LSym
)
// GCWriteBarrierReg maps from registers to gcWriteBarrier implementation LSyms.
var GCWriteBarrierReg map[int16]*obj.LSym

View File

@@ -0,0 +1,333 @@
// Derived from Inferno utils/6c/txt.c
// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6c/txt.c
//
// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
// Portions Copyright © 1997-1999 Vita Nuova Limited
// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
// Portions Copyright © 2004,2006 Bruce Ellis
// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
// Portions Copyright © 2009 The Go Authors. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package gc
import (
"cmd/compile/internal/ssa"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
)
var sharedProgArray = new([10000]obj.Prog) // *T instead of T to work around issue 19839
// Progs accumulates Progs for a function and converts them into machine code.
type Progs struct {
Text *obj.Prog // ATEXT Prog for this function
next *obj.Prog // next Prog
pc int64 // virtual PC; count of Progs
pos src.XPos // position to use for new Progs
curfn *Node // fn these Progs are for
progcache []obj.Prog // local progcache
cacheidx int // first free element of progcache
nextLive LivenessIndex // liveness index for the next Prog
prevLive LivenessIndex // last emitted liveness index
}
// newProgs returns a new Progs for fn.
// worker indicates which of the backend workers will use the Progs.
func newProgs(fn *Node, worker int) *Progs {
pp := new(Progs)
if Ctxt.CanReuseProgs() {
sz := len(sharedProgArray) / nBackendWorkers
pp.progcache = sharedProgArray[sz*worker : sz*(worker+1)]
}
pp.curfn = fn
// prime the pump
pp.next = pp.NewProg()
pp.clearp(pp.next)
pp.pos = fn.Pos
pp.settext(fn)
// PCDATA tables implicitly start with index -1.
pp.prevLive = LivenessIndex{-1, false}
pp.nextLive = pp.prevLive
return pp
}
func (pp *Progs) NewProg() *obj.Prog {
var p *obj.Prog
if pp.cacheidx < len(pp.progcache) {
p = &pp.progcache[pp.cacheidx]
pp.cacheidx++
} else {
p = new(obj.Prog)
}
p.Ctxt = Ctxt
return p
}
// Flush converts from pp to machine code.
func (pp *Progs) Flush() {
plist := &obj.Plist{Firstpc: pp.Text, Curfn: pp.curfn}
obj.Flushplist(Ctxt, plist, pp.NewProg, myimportpath)
}
// Free clears pp and any associated resources.
func (pp *Progs) Free() {
if Ctxt.CanReuseProgs() {
// Clear progs to enable GC and avoid abuse.
s := pp.progcache[:pp.cacheidx]
for i := range s {
s[i] = obj.Prog{}
}
}
// Clear pp to avoid abuse.
*pp = Progs{}
}
// Prog adds a Prog with instruction As to pp.
func (pp *Progs) Prog(as obj.As) *obj.Prog {
if pp.nextLive.StackMapValid() && pp.nextLive.stackMapIndex != pp.prevLive.stackMapIndex {
// Emit stack map index change.
idx := pp.nextLive.stackMapIndex
pp.prevLive.stackMapIndex = idx
p := pp.Prog(obj.APCDATA)
Addrconst(&p.From, objabi.PCDATA_StackMapIndex)
Addrconst(&p.To, int64(idx))
}
if pp.nextLive.isUnsafePoint != pp.prevLive.isUnsafePoint {
// Emit unsafe-point marker.
pp.prevLive.isUnsafePoint = pp.nextLive.isUnsafePoint
p := pp.Prog(obj.APCDATA)
Addrconst(&p.From, objabi.PCDATA_UnsafePoint)
if pp.nextLive.isUnsafePoint {
Addrconst(&p.To, objabi.PCDATA_UnsafePointUnsafe)
} else {
Addrconst(&p.To, objabi.PCDATA_UnsafePointSafe)
}
}
p := pp.next
pp.next = pp.NewProg()
pp.clearp(pp.next)
p.Link = pp.next
if !pp.pos.IsKnown() && Debug.K != 0 {
Warn("prog: unknown position (line 0)")
}
p.As = as
p.Pos = pp.pos
if pp.pos.IsStmt() == src.PosIsStmt {
// Clear IsStmt for later Progs at this pos provided that as can be marked as a stmt
if ssa.LosesStmtMark(as) {
return p
}
pp.pos = pp.pos.WithNotStmt()
}
return p
}
func (pp *Progs) clearp(p *obj.Prog) {
obj.Nopout(p)
p.As = obj.AEND
p.Pc = pp.pc
pp.pc++
}
func (pp *Progs) Appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int16, foffset int64, ttype obj.AddrType, treg int16, toffset int64) *obj.Prog {
q := pp.NewProg()
pp.clearp(q)
q.As = as
q.Pos = p.Pos
q.From.Type = ftype
q.From.Reg = freg
q.From.Offset = foffset
q.To.Type = ttype
q.To.Reg = treg
q.To.Offset = toffset
q.Link = p.Link
p.Link = q
return q
}
func (pp *Progs) settext(fn *Node) {
if pp.Text != nil {
Fatalf("Progs.settext called twice")
}
ptxt := pp.Prog(obj.ATEXT)
pp.Text = ptxt
fn.Func.lsym.Func().Text = ptxt
ptxt.From.Type = obj.TYPE_MEM
ptxt.From.Name = obj.NAME_EXTERN
ptxt.From.Sym = fn.Func.lsym
}
// initLSym defines f's obj.LSym and initializes it based on the
// properties of f. This includes setting the symbol flags and ABI and
// creating and initializing related DWARF symbols.
//
// initLSym must be called exactly once per function and must be
// called for both functions with bodies and functions without bodies.
func (f *Func) initLSym(hasBody bool) {
if f.lsym != nil {
Fatalf("Func.initLSym called twice")
}
if nam := f.Nname; !nam.isBlank() {
f.lsym = nam.Sym.Linksym()
if f.Pragma&Systemstack != 0 {
f.lsym.Set(obj.AttrCFunc, true)
}
var aliasABI obj.ABI
needABIAlias := false
defABI, hasDefABI := symabiDefs[f.lsym.Name]
if hasDefABI && defABI == obj.ABI0 {
// Symbol is defined as ABI0. Create an
// Internal -> ABI0 wrapper.
f.lsym.SetABI(obj.ABI0)
needABIAlias, aliasABI = true, obj.ABIInternal
} else {
// No ABI override. Check that the symbol is
// using the expected ABI.
want := obj.ABIInternal
if f.lsym.ABI() != want {
Fatalf("function symbol %s has the wrong ABI %v, expected %v", f.lsym.Name, f.lsym.ABI(), want)
}
}
isLinknameExported := nam.Sym.Linkname != "" && (hasBody || hasDefABI)
if abi, ok := symabiRefs[f.lsym.Name]; (ok && abi == obj.ABI0) || isLinknameExported {
// Either 1) this symbol is definitely
// referenced as ABI0 from this package; or 2)
// this symbol is defined in this package but
// given a linkname, indicating that it may be
// referenced from another package. Create an
// ABI0 -> Internal wrapper so it can be
// called as ABI0. In case 2, it's important
// that we know it's defined in this package
// since other packages may "pull" symbols
// using linkname and we don't want to create
// duplicate ABI wrappers.
if f.lsym.ABI() != obj.ABI0 {
needABIAlias, aliasABI = true, obj.ABI0
}
}
if needABIAlias {
// These LSyms have the same name as the
// native function, so we create them directly
// rather than looking them up. The uniqueness
// of f.lsym ensures uniqueness of asym.
asym := &obj.LSym{
Name: f.lsym.Name,
Type: objabi.SABIALIAS,
R: []obj.Reloc{{Sym: f.lsym}}, // 0 size, so "informational"
}
asym.SetABI(aliasABI)
asym.Set(obj.AttrDuplicateOK, true)
Ctxt.ABIAliases = append(Ctxt.ABIAliases, asym)
}
}
if !hasBody {
// For body-less functions, we only create the LSym.
return
}
var flag int
if f.Dupok() {
flag |= obj.DUPOK
}
if f.Wrapper() {
flag |= obj.WRAPPER
}
if f.Needctxt() {
flag |= obj.NEEDCTXT
}
if f.Pragma&Nosplit != 0 {
flag |= obj.NOSPLIT
}
if f.ReflectMethod() {
flag |= obj.REFLECTMETHOD
}
// Clumsy but important.
// See test/recover.go for test cases and src/reflect/value.go
// for the actual functions being considered.
if myimportpath == "reflect" {
switch f.Nname.Sym.Name {
case "callReflect", "callMethod":
flag |= obj.WRAPPER
}
}
Ctxt.InitTextSym(f.lsym, flag)
}
func ggloblnod(nam *Node) {
s := nam.Sym.Linksym()
s.Gotype = ngotype(nam).Linksym()
flags := 0
if nam.Name.Readonly() {
flags = obj.RODATA
}
if nam.Type != nil && !nam.Type.HasPointers() {
flags |= obj.NOPTR
}
Ctxt.Globl(s, nam.Type.Width, flags)
if nam.Name.LibfuzzerExtraCounter() {
s.Type = objabi.SLIBFUZZER_EXTRA_COUNTER
}
if nam.Sym.Linkname != "" {
// Make sure linkname'd symbol is non-package. When a symbol is
// both imported and linkname'd, s.Pkg may not set to "_" in
// types.Sym.Linksym because LSym already exists. Set it here.
s.Pkg = "_"
}
}
func ggloblsym(s *obj.LSym, width int32, flags int16) {
if flags&obj.LOCAL != 0 {
s.Set(obj.AttrLocal, true)
flags &^= obj.LOCAL
}
Ctxt.Globl(s, int64(width), int(flags))
}
func Addrconst(a *obj.Addr, v int64) {
a.Sym = nil
a.Type = obj.TYPE_CONST
a.Offset = v
}
func Patch(p *obj.Prog, to *obj.Prog) {
if p.To.Type != obj.TYPE_BRANCH {
Fatalf("patch: not a branch")
}
p.To.SetTarget(to)
p.To.Offset = to.Pc
}

View File

@@ -2,13 +2,15 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package test
import "testing"
package gc
// Test to make sure we make copies of the values we
// put in interfaces.
import (
"testing"
)
var x int
func TestEfaceConv1(t *testing.T) {

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More