mirror of
https://github.com/golang/go.git
synced 2026-02-07 11:25:07 +03:00
Compare commits
37 Commits
dev.regabi
...
go1.16.3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9baddd3f21 | ||
|
|
96139f2599 | ||
|
|
887c0d890f | ||
|
|
3a45c13094 | ||
|
|
2940614c63 | ||
|
|
1d967ab95c | ||
|
|
9c7463ca90 | ||
|
|
ac59d7abb9 | ||
|
|
33fb47921f | ||
|
|
902d16e97b | ||
|
|
f39c4deee8 | ||
|
|
0da04a662a | ||
|
|
3979fb9af9 | ||
|
|
5993fbbd48 | ||
|
|
6e04188440 | ||
|
|
b5c1b5aa07 | ||
|
|
e9e0473681 | ||
|
|
634d28d78c | ||
|
|
d86e53e896 | ||
|
|
3068d55c2f | ||
|
|
a9ba734e4d | ||
|
|
047ca22916 | ||
|
|
2b7243a62f | ||
|
|
a9547ad8ad | ||
|
|
292abd96ae | ||
|
|
88f91b709e | ||
|
|
4fd2617cd8 | ||
|
|
e0bd146a13 | ||
|
|
ca9cd629fb | ||
|
|
18e5d75ffb | ||
|
|
ddeae6b248 | ||
|
|
b7e0eb49d8 | ||
|
|
0b8c416688 | ||
|
|
1a7e9af153 | ||
|
|
f21be2fdc6 | ||
|
|
e34168e634 | ||
|
|
3e06467282 |
@@ -1,2 +0,0 @@
|
||||
branch: dev.regabi
|
||||
parent-branch: master
|
||||
@@ -201,12 +201,18 @@ func TestMethod(t *testing.T) {
|
||||
// Exported symbol's method must be live.
|
||||
goCmd(t, "build", "-buildmode=plugin", "-o", "plugin.so", "./method/plugin.go")
|
||||
goCmd(t, "build", "-o", "method.exe", "./method/main.go")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
cmd := exec.CommandContext(ctx, "./method.exe")
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("%s: %v\n%s", strings.Join(cmd.Args, " "), err, out)
|
||||
}
|
||||
run(t, "./method.exe")
|
||||
}
|
||||
|
||||
func TestMethod2(t *testing.T) {
|
||||
goCmd(t, "build", "-buildmode=plugin", "-o", "method2.so", "./method2/plugin.go")
|
||||
goCmd(t, "build", "-o", "method2.exe", "./method2/main.go")
|
||||
run(t, "./method2.exe")
|
||||
}
|
||||
|
||||
func TestIssue44956(t *testing.T) {
|
||||
goCmd(t, "build", "-buildmode=plugin", "-o", "issue44956p1.so", "./issue44956/plugin1.go")
|
||||
goCmd(t, "build", "-buildmode=plugin", "-o", "issue44956p2.so", "./issue44956/plugin2.go")
|
||||
goCmd(t, "build", "-o", "issue44956.exe", "./issue44956/main.go")
|
||||
run(t, "./issue44956.exe")
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// rundir
|
||||
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ignored
|
||||
package base
|
||||
|
||||
var X = &map[int]int{123: 456}
|
||||
47
misc/cgo/testplugin/testdata/issue44956/main.go
vendored
Normal file
47
misc/cgo/testplugin/testdata/issue44956/main.go
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Issue 44956: writable static temp is not exported correctly.
|
||||
// In the test below, package base is
|
||||
//
|
||||
// X = &map{...}
|
||||
//
|
||||
// which compiles to
|
||||
//
|
||||
// X = &stmp // static
|
||||
// stmp = makemap(...) // in init function
|
||||
//
|
||||
// plugin1 and plugin2 both import base. plugin1 doesn't use
|
||||
// base.X, so that symbol is deadcoded in plugin1.
|
||||
//
|
||||
// plugin1 is loaded first. base.init runs at that point, which
|
||||
// initialize base.stmp.
|
||||
//
|
||||
// plugin2 is then loaded. base.init already ran, so it doesn't run
|
||||
// again. When base.stmp is not exported, plugin2's base.X points to
|
||||
// its own private base.stmp, which is not initialized, fail.
|
||||
|
||||
package main
|
||||
|
||||
import "plugin"
|
||||
|
||||
func main() {
|
||||
_, err := plugin.Open("issue44956p1.so")
|
||||
if err != nil {
|
||||
panic("FAIL")
|
||||
}
|
||||
|
||||
p2, err := plugin.Open("issue44956p2.so")
|
||||
if err != nil {
|
||||
panic("FAIL")
|
||||
}
|
||||
f, err := p2.Lookup("F")
|
||||
if err != nil {
|
||||
panic("FAIL")
|
||||
}
|
||||
x := f.(func() *map[int]int)()
|
||||
if x == nil || (*x)[123] != 456 {
|
||||
panic("FAIL")
|
||||
}
|
||||
}
|
||||
@@ -2,6 +2,8 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package b
|
||||
package main
|
||||
|
||||
import "./a" // ERROR "cannot import package as init"
|
||||
import _ "testplugin/issue44956/base"
|
||||
|
||||
func main() {}
|
||||
@@ -4,15 +4,8 @@
|
||||
|
||||
package main
|
||||
|
||||
import "./a"
|
||||
import "testplugin/issue44956/base"
|
||||
|
||||
var g = a.G()
|
||||
func F() *map[int]int { return base.X }
|
||||
|
||||
func main() {
|
||||
if !a.F() {
|
||||
panic("FAIL")
|
||||
}
|
||||
if !g() {
|
||||
panic("FAIL")
|
||||
}
|
||||
}
|
||||
func main() {}
|
||||
32
misc/cgo/testplugin/testdata/method2/main.go
vendored
Normal file
32
misc/cgo/testplugin/testdata/method2/main.go
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// A type can be passed to a plugin and converted to interface
|
||||
// there. So its methods need to be live.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"plugin"
|
||||
|
||||
"testplugin/method2/p"
|
||||
)
|
||||
|
||||
var t p.T
|
||||
|
||||
type I interface { M() }
|
||||
|
||||
func main() {
|
||||
pl, err := plugin.Open("method2.so")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
f, err := pl.Lookup("F")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
f.(func(p.T) interface{})(t).(I).M()
|
||||
}
|
||||
@@ -2,4 +2,8 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package init
|
||||
package p
|
||||
|
||||
type T int
|
||||
|
||||
func (T) M() { println("M") }
|
||||
@@ -1,9 +1,11 @@
|
||||
// errorcheckdir
|
||||
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Issue 43962: Importing a package called "init" is an error.
|
||||
package main
|
||||
|
||||
package ignored
|
||||
import "testplugin/method2/p"
|
||||
|
||||
func main() {}
|
||||
|
||||
func F(t p.T) interface{} { return t }
|
||||
@@ -664,7 +664,7 @@ func toValidName(name string) string {
|
||||
if strings.HasPrefix(p, "/") {
|
||||
p = p[len("/"):]
|
||||
}
|
||||
for strings.HasPrefix(name, "../") {
|
||||
for strings.HasPrefix(p, "../") {
|
||||
p = p[len("../"):]
|
||||
}
|
||||
return p
|
||||
|
||||
@@ -1081,3 +1081,38 @@ func TestFS(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCVE202127919(t *testing.T) {
|
||||
// Archive containing only the file "../test.txt"
|
||||
data := []byte{
|
||||
0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x08, 0x00,
|
||||
0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x2e, 0x2e,
|
||||
0x2f, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x74, 0x78,
|
||||
0x74, 0x0a, 0xc9, 0xc8, 0x2c, 0x56, 0xc8, 0x2c,
|
||||
0x56, 0x48, 0x54, 0x28, 0x49, 0x2d, 0x2e, 0x51,
|
||||
0x28, 0x49, 0xad, 0x28, 0x51, 0x48, 0xcb, 0xcc,
|
||||
0x49, 0xd5, 0xe3, 0x02, 0x04, 0x00, 0x00, 0xff,
|
||||
0xff, 0x50, 0x4b, 0x07, 0x08, 0xc0, 0xd7, 0xed,
|
||||
0xc3, 0x20, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x00,
|
||||
0x00, 0x50, 0x4b, 0x01, 0x02, 0x14, 0x00, 0x14,
|
||||
0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0xc0, 0xd7, 0xed, 0xc3, 0x20, 0x00, 0x00,
|
||||
0x00, 0x1a, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e,
|
||||
0x2e, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x74,
|
||||
0x78, 0x74, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00,
|
||||
0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x39, 0x00,
|
||||
0x00, 0x00, 0x59, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
}
|
||||
r, err := NewReader(bytes.NewReader([]byte(data)), int64(len(data)))
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading the archive: %v", err)
|
||||
}
|
||||
_, err = r.Open("test.txt")
|
||||
if err != nil {
|
||||
t.Errorf("Error reading file: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -109,10 +109,6 @@ func archX86(linkArch *obj.LinkArch) *Arch {
|
||||
register["SB"] = RSB
|
||||
register["FP"] = RFP
|
||||
register["PC"] = RPC
|
||||
if linkArch == &x86.Linkamd64 {
|
||||
// Alias g to R14
|
||||
register["g"] = x86.REGG
|
||||
}
|
||||
// Register prefix not used on this architecture.
|
||||
|
||||
instructions := make(map[string]obj.As)
|
||||
|
||||
@@ -259,7 +259,6 @@ var amd64OperandTests = []operandTest{
|
||||
{"R15", "R15"},
|
||||
{"R8", "R8"},
|
||||
{"R9", "R9"},
|
||||
{"g", "R14"},
|
||||
{"SI", "SI"},
|
||||
{"SP", "SP"},
|
||||
{"X0", "X0"},
|
||||
|
||||
@@ -305,7 +305,7 @@ func (p *Parser) pseudo(word string, operands [][]lex.Token) bool {
|
||||
// references and writes symabis information to w.
|
||||
//
|
||||
// The symabis format is documented at
|
||||
// cmd/compile/internal/ssagen.ReadSymABIs.
|
||||
// cmd/compile/internal/gc.readSymABIs.
|
||||
func (p *Parser) symDefRef(w io.Writer, word string, operands [][]lex.Token) {
|
||||
switch word {
|
||||
case "TEXT":
|
||||
|
||||
599
src/cmd/compile/fmt_test.go
Normal file
599
src/cmd/compile/fmt_test.go
Normal file
@@ -0,0 +1,599 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file implements TestFormats; a test that verifies
|
||||
// format strings in the compiler (this directory and all
|
||||
// subdirectories, recursively).
|
||||
//
|
||||
// TestFormats finds potential (Printf, etc.) format strings.
|
||||
// If they are used in a call, the format verbs are verified
|
||||
// based on the matching argument type against a precomputed
|
||||
// map of valid formats (knownFormats). This map can be used to
|
||||
// automatically rewrite format strings across all compiler
|
||||
// files with the -r flag.
|
||||
//
|
||||
// The format map needs to be updated whenever a new (type,
|
||||
// format) combination is found and the format verb is not
|
||||
// 'v' or 'T' (as in "%v" or "%T"). To update the map auto-
|
||||
// matically from the compiler source's use of format strings,
|
||||
// use the -u flag. (Whether formats are valid for the values
|
||||
// to be formatted must be verified manually, of course.)
|
||||
//
|
||||
// The -v flag prints out the names of all functions called
|
||||
// with a format string, the names of files that were not
|
||||
// processed, and any format rewrites made (with -r).
|
||||
//
|
||||
// Run as: go test -run Formats [-r][-u][-v]
|
||||
//
|
||||
// Known shortcomings:
|
||||
// - indexed format strings ("%[2]s", etc.) are not supported
|
||||
// (the test will fail)
|
||||
// - format strings that are not simple string literals cannot
|
||||
// be updated automatically
|
||||
// (the test will fail with respective warnings)
|
||||
// - format strings in _test packages outside the current
|
||||
// package are not processed
|
||||
// (the test will report those files)
|
||||
//
|
||||
package main_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/build"
|
||||
"go/constant"
|
||||
"go/format"
|
||||
"go/importer"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"internal/testenv"
|
||||
"io"
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var (
|
||||
rewrite = flag.Bool("r", false, "rewrite format strings")
|
||||
update = flag.Bool("u", false, "update known formats")
|
||||
)
|
||||
|
||||
// The following variables collect information across all processed files.
|
||||
var (
|
||||
fset = token.NewFileSet()
|
||||
formatStrings = make(map[*ast.BasicLit]bool) // set of all potential format strings found
|
||||
foundFormats = make(map[string]bool) // set of all formats found
|
||||
callSites = make(map[*ast.CallExpr]*callSite) // map of all calls
|
||||
)
|
||||
|
||||
// A File is a corresponding (filename, ast) pair.
|
||||
type File struct {
|
||||
name string
|
||||
ast *ast.File
|
||||
}
|
||||
|
||||
func TestFormats(t *testing.T) {
|
||||
if testing.Short() && testenv.Builder() == "" {
|
||||
t.Skip("Skipping in short mode")
|
||||
}
|
||||
testenv.MustHaveGoBuild(t) // more restrictive than necessary, but that's ok
|
||||
|
||||
// process all directories
|
||||
filepath.WalkDir(".", func(path string, info fs.DirEntry, err error) error {
|
||||
if info.IsDir() {
|
||||
if info.Name() == "testdata" {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
importPath := filepath.Join("cmd/compile", path)
|
||||
if ignoredPackages[filepath.ToSlash(importPath)] {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
pkg, err := build.Import(importPath, path, 0)
|
||||
if err != nil {
|
||||
if _, ok := err.(*build.NoGoError); ok {
|
||||
return nil // nothing to do here
|
||||
}
|
||||
t.Fatal(err)
|
||||
}
|
||||
collectPkgFormats(t, pkg)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// test and rewrite formats
|
||||
updatedFiles := make(map[string]File) // files that were rewritten
|
||||
for _, p := range callSites {
|
||||
// test current format literal and determine updated one
|
||||
out := formatReplace(p.str, func(index int, in string) string {
|
||||
if in == "*" {
|
||||
return in // cannot rewrite '*' (as in "%*d")
|
||||
}
|
||||
// in != '*'
|
||||
typ := p.types[index]
|
||||
format := typ + " " + in // e.g., "*Node %n"
|
||||
|
||||
// check if format is known
|
||||
out, known := knownFormats[format]
|
||||
|
||||
// record format if not yet found
|
||||
_, found := foundFormats[format]
|
||||
if !found {
|
||||
foundFormats[format] = true
|
||||
}
|
||||
|
||||
// report an error if the format is unknown and this is the first
|
||||
// time we see it; ignore "%v" and "%T" which are always valid
|
||||
if !known && !found && in != "%v" && in != "%T" {
|
||||
t.Errorf("%s: unknown format %q for %s argument", posString(p.arg), in, typ)
|
||||
}
|
||||
|
||||
if out == "" {
|
||||
out = in
|
||||
}
|
||||
return out
|
||||
})
|
||||
|
||||
// replace existing format literal if it changed
|
||||
if out != p.str {
|
||||
// we cannot replace the argument if it's not a string literal for now
|
||||
// (e.g., it may be "foo" + "bar")
|
||||
lit, ok := p.arg.(*ast.BasicLit)
|
||||
if !ok {
|
||||
delete(callSites, p.call) // treat as if we hadn't found this site
|
||||
continue
|
||||
}
|
||||
|
||||
if testing.Verbose() {
|
||||
fmt.Printf("%s:\n\t- %q\n\t+ %q\n", posString(p.arg), p.str, out)
|
||||
}
|
||||
|
||||
// find argument index of format argument
|
||||
index := -1
|
||||
for i, arg := range p.call.Args {
|
||||
if p.arg == arg {
|
||||
index = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if index < 0 {
|
||||
// we may have processed the same call site twice,
|
||||
// but that shouldn't happen
|
||||
panic("internal error: matching argument not found")
|
||||
}
|
||||
|
||||
// replace literal
|
||||
new := *lit // make a copy
|
||||
new.Value = strconv.Quote(out) // this may introduce "-quotes where there were `-quotes
|
||||
p.call.Args[index] = &new
|
||||
updatedFiles[p.file.name] = p.file
|
||||
}
|
||||
}
|
||||
|
||||
// write dirty files back
|
||||
var filesUpdated bool
|
||||
if len(updatedFiles) > 0 && *rewrite {
|
||||
for _, file := range updatedFiles {
|
||||
var buf bytes.Buffer
|
||||
if err := format.Node(&buf, fset, file.ast); err != nil {
|
||||
t.Errorf("WARNING: gofmt %s failed: %v", file.name, err)
|
||||
continue
|
||||
}
|
||||
if err := ioutil.WriteFile(file.name, buf.Bytes(), 0x666); err != nil {
|
||||
t.Errorf("WARNING: writing %s failed: %v", file.name, err)
|
||||
continue
|
||||
}
|
||||
fmt.Printf("updated %s\n", file.name)
|
||||
filesUpdated = true
|
||||
}
|
||||
}
|
||||
|
||||
// report the names of all functions called with a format string
|
||||
if len(callSites) > 0 && testing.Verbose() {
|
||||
set := make(map[string]bool)
|
||||
for _, p := range callSites {
|
||||
set[nodeString(p.call.Fun)] = true
|
||||
}
|
||||
var list []string
|
||||
for s := range set {
|
||||
list = append(list, s)
|
||||
}
|
||||
fmt.Println("\nFunctions called with a format string")
|
||||
writeList(os.Stdout, list)
|
||||
}
|
||||
|
||||
// update formats
|
||||
if len(foundFormats) > 0 && *update {
|
||||
var list []string
|
||||
for s := range foundFormats {
|
||||
list = append(list, fmt.Sprintf("%q: \"\",", s))
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(knownFormatsHeader)
|
||||
writeList(&buf, list)
|
||||
buf.WriteString("}\n")
|
||||
out, err := format.Source(buf.Bytes())
|
||||
const outfile = "fmtmap_test.go"
|
||||
if err != nil {
|
||||
t.Errorf("WARNING: gofmt %s failed: %v", outfile, err)
|
||||
out = buf.Bytes() // continue with unformatted source
|
||||
}
|
||||
if err = ioutil.WriteFile(outfile, out, 0644); err != nil {
|
||||
t.Errorf("WARNING: updating format map failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// check that knownFormats is up to date
|
||||
if !*rewrite && !*update {
|
||||
var mismatch bool
|
||||
for s := range foundFormats {
|
||||
if _, ok := knownFormats[s]; !ok {
|
||||
mismatch = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !mismatch {
|
||||
for s := range knownFormats {
|
||||
if _, ok := foundFormats[s]; !ok {
|
||||
mismatch = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if mismatch {
|
||||
t.Errorf("format map is out of date; run 'go test -u' to update and manually verify correctness of change'")
|
||||
}
|
||||
}
|
||||
|
||||
// all format strings of calls must be in the formatStrings set (self-verification)
|
||||
for _, p := range callSites {
|
||||
if lit, ok := p.arg.(*ast.BasicLit); ok && lit.Kind == token.STRING {
|
||||
if formatStrings[lit] {
|
||||
// ok
|
||||
delete(formatStrings, lit)
|
||||
} else {
|
||||
// this should never happen
|
||||
panic(fmt.Sprintf("internal error: format string not found (%s)", posString(lit)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if we have any strings left, we may need to update them manually
|
||||
if len(formatStrings) > 0 && filesUpdated {
|
||||
var list []string
|
||||
for lit := range formatStrings {
|
||||
list = append(list, fmt.Sprintf("%s: %s", posString(lit), nodeString(lit)))
|
||||
}
|
||||
fmt.Println("\nWARNING: Potentially missed format strings")
|
||||
writeList(os.Stdout, list)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
// A callSite describes a function call that appears to contain
|
||||
// a format string.
|
||||
type callSite struct {
|
||||
file File
|
||||
call *ast.CallExpr // call containing the format string
|
||||
arg ast.Expr // format argument (string literal or constant)
|
||||
str string // unquoted format string
|
||||
types []string // argument types
|
||||
}
|
||||
|
||||
func collectPkgFormats(t *testing.T, pkg *build.Package) {
|
||||
// collect all files
|
||||
var filenames []string
|
||||
filenames = append(filenames, pkg.GoFiles...)
|
||||
filenames = append(filenames, pkg.CgoFiles...)
|
||||
filenames = append(filenames, pkg.TestGoFiles...)
|
||||
|
||||
// TODO(gri) verify _test files outside package
|
||||
for _, name := range pkg.XTestGoFiles {
|
||||
// don't process this test itself
|
||||
if name != "fmt_test.go" && testing.Verbose() {
|
||||
fmt.Printf("WARNING: %s not processed\n", filepath.Join(pkg.Dir, name))
|
||||
}
|
||||
}
|
||||
|
||||
// make filenames relative to .
|
||||
for i, name := range filenames {
|
||||
filenames[i] = filepath.Join(pkg.Dir, name)
|
||||
}
|
||||
|
||||
// parse all files
|
||||
files := make([]*ast.File, len(filenames))
|
||||
for i, filename := range filenames {
|
||||
f, err := parser.ParseFile(fset, filename, nil, parser.ParseComments)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
files[i] = f
|
||||
}
|
||||
|
||||
// typecheck package
|
||||
conf := types.Config{Importer: importer.Default()}
|
||||
etypes := make(map[ast.Expr]types.TypeAndValue)
|
||||
if _, err := conf.Check(pkg.ImportPath, fset, files, &types.Info{Types: etypes}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// collect all potential format strings (for extra verification later)
|
||||
for _, file := range files {
|
||||
ast.Inspect(file, func(n ast.Node) bool {
|
||||
if s, ok := stringLit(n); ok && isFormat(s) {
|
||||
formatStrings[n.(*ast.BasicLit)] = true
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// collect all formats/arguments of calls with format strings
|
||||
for index, file := range files {
|
||||
ast.Inspect(file, func(n ast.Node) bool {
|
||||
if call, ok := n.(*ast.CallExpr); ok {
|
||||
if ignoredFunctions[nodeString(call.Fun)] {
|
||||
return true
|
||||
}
|
||||
// look for an arguments that might be a format string
|
||||
for i, arg := range call.Args {
|
||||
if s, ok := stringVal(etypes[arg]); ok && isFormat(s) {
|
||||
// make sure we have enough arguments
|
||||
n := numFormatArgs(s)
|
||||
if i+1+n > len(call.Args) {
|
||||
t.Errorf("%s: not enough format args (ignore %s?)", posString(call), nodeString(call.Fun))
|
||||
break // ignore this call
|
||||
}
|
||||
// assume last n arguments are to be formatted;
|
||||
// determine their types
|
||||
argTypes := make([]string, n)
|
||||
for i, arg := range call.Args[len(call.Args)-n:] {
|
||||
if tv, ok := etypes[arg]; ok {
|
||||
argTypes[i] = typeString(tv.Type)
|
||||
}
|
||||
}
|
||||
// collect call site
|
||||
if callSites[call] != nil {
|
||||
panic("internal error: file processed twice?")
|
||||
}
|
||||
callSites[call] = &callSite{
|
||||
file: File{filenames[index], file},
|
||||
call: call,
|
||||
arg: arg,
|
||||
str: s,
|
||||
types: argTypes,
|
||||
}
|
||||
break // at most one format per argument list
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// writeList writes list in sorted order to w.
|
||||
func writeList(w io.Writer, list []string) {
|
||||
sort.Strings(list)
|
||||
for _, s := range list {
|
||||
fmt.Fprintln(w, "\t", s)
|
||||
}
|
||||
}
|
||||
|
||||
// posString returns a string representation of n's position
|
||||
// in the form filename:line:col: .
|
||||
func posString(n ast.Node) string {
|
||||
if n == nil {
|
||||
return ""
|
||||
}
|
||||
return fset.Position(n.Pos()).String()
|
||||
}
|
||||
|
||||
// nodeString returns a string representation of n.
|
||||
func nodeString(n ast.Node) string {
|
||||
var buf bytes.Buffer
|
||||
if err := format.Node(&buf, fset, n); err != nil {
|
||||
log.Fatal(err) // should always succeed
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// typeString returns a string representation of n.
|
||||
func typeString(typ types.Type) string {
|
||||
return filepath.ToSlash(typ.String())
|
||||
}
|
||||
|
||||
// stringLit returns the unquoted string value and true if
|
||||
// n represents a string literal; otherwise it returns ""
|
||||
// and false.
|
||||
func stringLit(n ast.Node) (string, bool) {
|
||||
if lit, ok := n.(*ast.BasicLit); ok && lit.Kind == token.STRING {
|
||||
s, err := strconv.Unquote(lit.Value)
|
||||
if err != nil {
|
||||
log.Fatal(err) // should not happen with correct ASTs
|
||||
}
|
||||
return s, true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// stringVal returns the (unquoted) string value and true if
|
||||
// tv is a string constant; otherwise it returns "" and false.
|
||||
func stringVal(tv types.TypeAndValue) (string, bool) {
|
||||
if tv.IsValue() && tv.Value != nil && tv.Value.Kind() == constant.String {
|
||||
return constant.StringVal(tv.Value), true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// formatIter iterates through the string s in increasing
|
||||
// index order and calls f for each format specifier '%..v'.
|
||||
// The arguments for f describe the specifier's index range.
|
||||
// If a format specifier contains a "*", f is called with
|
||||
// the index range for "*" alone, before being called for
|
||||
// the entire specifier. The result of f is the index of
|
||||
// the rune at which iteration continues.
|
||||
func formatIter(s string, f func(i, j int) int) {
|
||||
i := 0 // index after current rune
|
||||
var r rune // current rune
|
||||
|
||||
next := func() {
|
||||
r1, w := utf8.DecodeRuneInString(s[i:])
|
||||
if w == 0 {
|
||||
r1 = -1 // signal end-of-string
|
||||
}
|
||||
r = r1
|
||||
i += w
|
||||
}
|
||||
|
||||
flags := func() {
|
||||
for r == ' ' || r == '#' || r == '+' || r == '-' || r == '0' {
|
||||
next()
|
||||
}
|
||||
}
|
||||
|
||||
index := func() {
|
||||
if r == '[' {
|
||||
log.Fatalf("cannot handle indexed arguments: %s", s)
|
||||
}
|
||||
}
|
||||
|
||||
digits := func() {
|
||||
index()
|
||||
if r == '*' {
|
||||
i = f(i-1, i)
|
||||
next()
|
||||
return
|
||||
}
|
||||
for '0' <= r && r <= '9' {
|
||||
next()
|
||||
}
|
||||
}
|
||||
|
||||
for next(); r >= 0; next() {
|
||||
if r == '%' {
|
||||
i0 := i
|
||||
next()
|
||||
flags()
|
||||
digits()
|
||||
if r == '.' {
|
||||
next()
|
||||
digits()
|
||||
}
|
||||
index()
|
||||
// accept any letter (a-z, A-Z) as format verb;
|
||||
// ignore anything else
|
||||
if 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' {
|
||||
i = f(i0-1, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// isFormat reports whether s contains format specifiers.
|
||||
func isFormat(s string) (yes bool) {
|
||||
formatIter(s, func(i, j int) int {
|
||||
yes = true
|
||||
return len(s) // stop iteration
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// oneFormat reports whether s is exactly one format specifier.
|
||||
func oneFormat(s string) (yes bool) {
|
||||
formatIter(s, func(i, j int) int {
|
||||
yes = i == 0 && j == len(s)
|
||||
return j
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// numFormatArgs returns the number of format specifiers in s.
|
||||
func numFormatArgs(s string) int {
|
||||
count := 0
|
||||
formatIter(s, func(i, j int) int {
|
||||
count++
|
||||
return j
|
||||
})
|
||||
return count
|
||||
}
|
||||
|
||||
// formatReplace replaces the i'th format specifier s in the incoming
|
||||
// string in with the result of f(i, s) and returns the new string.
|
||||
func formatReplace(in string, f func(i int, s string) string) string {
|
||||
var buf []byte
|
||||
i0 := 0
|
||||
index := 0
|
||||
formatIter(in, func(i, j int) int {
|
||||
if sub := in[i:j]; sub != "*" { // ignore calls for "*" width/length specifiers
|
||||
buf = append(buf, in[i0:i]...)
|
||||
buf = append(buf, f(index, sub)...)
|
||||
i0 = j
|
||||
}
|
||||
index++
|
||||
return j
|
||||
})
|
||||
return string(append(buf, in[i0:]...))
|
||||
}
|
||||
|
||||
// ignoredPackages is the set of packages which can
|
||||
// be ignored.
|
||||
var ignoredPackages = map[string]bool{}
|
||||
|
||||
// ignoredFunctions is the set of functions which may have
|
||||
// format-like arguments but which don't do any formatting and
|
||||
// thus may be ignored.
|
||||
var ignoredFunctions = map[string]bool{}
|
||||
|
||||
func init() {
|
||||
// verify that knownFormats entries are correctly formatted
|
||||
for key, val := range knownFormats {
|
||||
// key must be "typename format", and format starts with a '%'
|
||||
// (formats containing '*' alone are not collected in this map)
|
||||
i := strings.Index(key, "%")
|
||||
if i < 0 || !oneFormat(key[i:]) {
|
||||
log.Fatalf("incorrect knownFormats key: %q", key)
|
||||
}
|
||||
// val must be "format" or ""
|
||||
if val != "" && !oneFormat(val) {
|
||||
log.Fatalf("incorrect knownFormats value: %q (key = %q)", val, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const knownFormatsHeader = `// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file implements the knownFormats map which records the valid
|
||||
// formats for a given type. The valid formats must correspond to
|
||||
// supported compiler formats implemented in fmt.go, or whatever
|
||||
// other format verbs are implemented for the given type. The map may
|
||||
// also be used to change the use of a format verb across all compiler
|
||||
// sources automatically (for instance, if the implementation of fmt.go
|
||||
// changes), by using the -r option together with the new formats in the
|
||||
// map. To generate this file automatically from the existing source,
|
||||
// run: go test -run Formats -u.
|
||||
//
|
||||
// See the package comment in fmt_test.go for additional information.
|
||||
|
||||
package main_test
|
||||
|
||||
// knownFormats entries are of the form "typename format" -> "newformat".
|
||||
// An absent entry means that the format is not recognized as valid.
|
||||
// An empty new format means that the format should remain unchanged.
|
||||
var knownFormats = map[string]string{
|
||||
`
|
||||
211
src/cmd/compile/fmtmap_test.go
Normal file
211
src/cmd/compile/fmtmap_test.go
Normal file
@@ -0,0 +1,211 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file implements the knownFormats map which records the valid
|
||||
// formats for a given type. The valid formats must correspond to
|
||||
// supported compiler formats implemented in fmt.go, or whatever
|
||||
// other format verbs are implemented for the given type. The map may
|
||||
// also be used to change the use of a format verb across all compiler
|
||||
// sources automatically (for instance, if the implementation of fmt.go
|
||||
// changes), by using the -r option together with the new formats in the
|
||||
// map. To generate this file automatically from the existing source,
|
||||
// run: go test -run Formats -u.
|
||||
//
|
||||
// See the package comment in fmt_test.go for additional information.
|
||||
|
||||
package main_test
|
||||
|
||||
// knownFormats entries are of the form "typename format" -> "newformat".
|
||||
// An absent entry means that the format is not recognized as valid.
|
||||
// An empty new format means that the format should remain unchanged.
|
||||
var knownFormats = map[string]string{
|
||||
"*bytes.Buffer %s": "",
|
||||
"*cmd/compile/internal/gc.EscLocation %v": "",
|
||||
"*cmd/compile/internal/gc.Mpflt %v": "",
|
||||
"*cmd/compile/internal/gc.Mpint %v": "",
|
||||
"*cmd/compile/internal/gc.Node %#v": "",
|
||||
"*cmd/compile/internal/gc.Node %+S": "",
|
||||
"*cmd/compile/internal/gc.Node %+v": "",
|
||||
"*cmd/compile/internal/gc.Node %L": "",
|
||||
"*cmd/compile/internal/gc.Node %S": "",
|
||||
"*cmd/compile/internal/gc.Node %j": "",
|
||||
"*cmd/compile/internal/gc.Node %p": "",
|
||||
"*cmd/compile/internal/gc.Node %v": "",
|
||||
"*cmd/compile/internal/ssa.Block %s": "",
|
||||
"*cmd/compile/internal/ssa.Block %v": "",
|
||||
"*cmd/compile/internal/ssa.Func %s": "",
|
||||
"*cmd/compile/internal/ssa.Func %v": "",
|
||||
"*cmd/compile/internal/ssa.Register %s": "",
|
||||
"*cmd/compile/internal/ssa.Register %v": "",
|
||||
"*cmd/compile/internal/ssa.SparseTreeNode %v": "",
|
||||
"*cmd/compile/internal/ssa.Value %s": "",
|
||||
"*cmd/compile/internal/ssa.Value %v": "",
|
||||
"*cmd/compile/internal/ssa.sparseTreeMapEntry %v": "",
|
||||
"*cmd/compile/internal/types.Field %p": "",
|
||||
"*cmd/compile/internal/types.Field %v": "",
|
||||
"*cmd/compile/internal/types.Sym %0S": "",
|
||||
"*cmd/compile/internal/types.Sym %S": "",
|
||||
"*cmd/compile/internal/types.Sym %p": "",
|
||||
"*cmd/compile/internal/types.Sym %v": "",
|
||||
"*cmd/compile/internal/types.Type %#L": "",
|
||||
"*cmd/compile/internal/types.Type %#v": "",
|
||||
"*cmd/compile/internal/types.Type %+v": "",
|
||||
"*cmd/compile/internal/types.Type %-S": "",
|
||||
"*cmd/compile/internal/types.Type %0S": "",
|
||||
"*cmd/compile/internal/types.Type %L": "",
|
||||
"*cmd/compile/internal/types.Type %S": "",
|
||||
"*cmd/compile/internal/types.Type %p": "",
|
||||
"*cmd/compile/internal/types.Type %s": "",
|
||||
"*cmd/compile/internal/types.Type %v": "",
|
||||
"*cmd/internal/obj.Addr %v": "",
|
||||
"*cmd/internal/obj.LSym %v": "",
|
||||
"*math/big.Float %f": "",
|
||||
"*math/big.Int %#x": "",
|
||||
"*math/big.Int %s": "",
|
||||
"*math/big.Int %v": "",
|
||||
"[16]byte %x": "",
|
||||
"[]*cmd/compile/internal/ssa.Block %v": "",
|
||||
"[]*cmd/compile/internal/ssa.Value %v": "",
|
||||
"[][]string %q": "",
|
||||
"[]byte %s": "",
|
||||
"[]byte %x": "",
|
||||
"[]cmd/compile/internal/ssa.Edge %v": "",
|
||||
"[]cmd/compile/internal/ssa.ID %v": "",
|
||||
"[]cmd/compile/internal/ssa.posetNode %v": "",
|
||||
"[]cmd/compile/internal/ssa.posetUndo %v": "",
|
||||
"[]cmd/compile/internal/syntax.token %s": "",
|
||||
"[]string %v": "",
|
||||
"[]uint32 %v": "",
|
||||
"bool %v": "",
|
||||
"byte %08b": "",
|
||||
"byte %c": "",
|
||||
"byte %q": "",
|
||||
"byte %v": "",
|
||||
"cmd/compile/internal/arm.shift %d": "",
|
||||
"cmd/compile/internal/gc.Class %d": "",
|
||||
"cmd/compile/internal/gc.Class %s": "",
|
||||
"cmd/compile/internal/gc.Class %v": "",
|
||||
"cmd/compile/internal/gc.Ctype %d": "",
|
||||
"cmd/compile/internal/gc.Ctype %v": "",
|
||||
"cmd/compile/internal/gc.Nodes %#v": "",
|
||||
"cmd/compile/internal/gc.Nodes %+v": "",
|
||||
"cmd/compile/internal/gc.Nodes %.v": "",
|
||||
"cmd/compile/internal/gc.Nodes %v": "",
|
||||
"cmd/compile/internal/gc.Op %#v": "",
|
||||
"cmd/compile/internal/gc.Op %v": "",
|
||||
"cmd/compile/internal/gc.Val %#v": "",
|
||||
"cmd/compile/internal/gc.Val %T": "",
|
||||
"cmd/compile/internal/gc.Val %v": "",
|
||||
"cmd/compile/internal/gc.fmtMode %d": "",
|
||||
"cmd/compile/internal/gc.initKind %d": "",
|
||||
"cmd/compile/internal/gc.itag %v": "",
|
||||
"cmd/compile/internal/ssa.BranchPrediction %d": "",
|
||||
"cmd/compile/internal/ssa.Edge %v": "",
|
||||
"cmd/compile/internal/ssa.GCNode %v": "",
|
||||
"cmd/compile/internal/ssa.ID %d": "",
|
||||
"cmd/compile/internal/ssa.ID %v": "",
|
||||
"cmd/compile/internal/ssa.LocalSlot %s": "",
|
||||
"cmd/compile/internal/ssa.LocalSlot %v": "",
|
||||
"cmd/compile/internal/ssa.Location %s": "",
|
||||
"cmd/compile/internal/ssa.Op %s": "",
|
||||
"cmd/compile/internal/ssa.Op %v": "",
|
||||
"cmd/compile/internal/ssa.Sym %v": "",
|
||||
"cmd/compile/internal/ssa.ValAndOff %s": "",
|
||||
"cmd/compile/internal/ssa.domain %v": "",
|
||||
"cmd/compile/internal/ssa.flagConstant %s": "",
|
||||
"cmd/compile/internal/ssa.posetNode %v": "",
|
||||
"cmd/compile/internal/ssa.posetTestOp %v": "",
|
||||
"cmd/compile/internal/ssa.rbrank %d": "",
|
||||
"cmd/compile/internal/ssa.regMask %d": "",
|
||||
"cmd/compile/internal/ssa.register %d": "",
|
||||
"cmd/compile/internal/ssa.relation %s": "",
|
||||
"cmd/compile/internal/syntax.Error %q": "",
|
||||
"cmd/compile/internal/syntax.Expr %#v": "",
|
||||
"cmd/compile/internal/syntax.LitKind %d": "",
|
||||
"cmd/compile/internal/syntax.Node %T": "",
|
||||
"cmd/compile/internal/syntax.Operator %s": "",
|
||||
"cmd/compile/internal/syntax.Pos %s": "",
|
||||
"cmd/compile/internal/syntax.Pos %v": "",
|
||||
"cmd/compile/internal/syntax.position %s": "",
|
||||
"cmd/compile/internal/syntax.token %q": "",
|
||||
"cmd/compile/internal/syntax.token %s": "",
|
||||
"cmd/compile/internal/types.EType %d": "",
|
||||
"cmd/compile/internal/types.EType %s": "",
|
||||
"cmd/compile/internal/types.EType %v": "",
|
||||
"cmd/internal/obj.ABI %v": "",
|
||||
"error %v": "",
|
||||
"float64 %.2f": "",
|
||||
"float64 %.3f": "",
|
||||
"float64 %.6g": "",
|
||||
"float64 %g": "",
|
||||
"int %#x": "",
|
||||
"int %-12d": "",
|
||||
"int %-6d": "",
|
||||
"int %-8o": "",
|
||||
"int %02d": "",
|
||||
"int %6d": "",
|
||||
"int %c": "",
|
||||
"int %d": "",
|
||||
"int %v": "",
|
||||
"int %x": "",
|
||||
"int16 %d": "",
|
||||
"int16 %x": "",
|
||||
"int32 %#x": "",
|
||||
"int32 %d": "",
|
||||
"int32 %v": "",
|
||||
"int32 %x": "",
|
||||
"int64 %#x": "",
|
||||
"int64 %+d": "",
|
||||
"int64 %-10d": "",
|
||||
"int64 %.5d": "",
|
||||
"int64 %d": "",
|
||||
"int64 %v": "",
|
||||
"int64 %x": "",
|
||||
"int8 %d": "",
|
||||
"int8 %v": "",
|
||||
"int8 %x": "",
|
||||
"interface{} %#v": "",
|
||||
"interface{} %T": "",
|
||||
"interface{} %p": "",
|
||||
"interface{} %q": "",
|
||||
"interface{} %s": "",
|
||||
"interface{} %v": "",
|
||||
"map[*cmd/compile/internal/gc.Node]*cmd/compile/internal/ssa.Value %v": "",
|
||||
"map[*cmd/compile/internal/gc.Node][]*cmd/compile/internal/gc.Node %v": "",
|
||||
"map[cmd/compile/internal/ssa.ID]uint32 %v": "",
|
||||
"map[int64]uint32 %v": "",
|
||||
"math/big.Accuracy %s": "",
|
||||
"reflect.Type %s": "",
|
||||
"rune %#U": "",
|
||||
"rune %c": "",
|
||||
"rune %q": "",
|
||||
"string %-*s": "",
|
||||
"string %-16s": "",
|
||||
"string %-6s": "",
|
||||
"string %q": "",
|
||||
"string %s": "",
|
||||
"string %v": "",
|
||||
"time.Duration %d": "",
|
||||
"time.Duration %v": "",
|
||||
"uint %04x": "",
|
||||
"uint %5d": "",
|
||||
"uint %d": "",
|
||||
"uint %x": "",
|
||||
"uint16 %d": "",
|
||||
"uint16 %x": "",
|
||||
"uint32 %#U": "",
|
||||
"uint32 %#x": "",
|
||||
"uint32 %d": "",
|
||||
"uint32 %v": "",
|
||||
"uint32 %x": "",
|
||||
"uint64 %08x": "",
|
||||
"uint64 %b": "",
|
||||
"uint64 %d": "",
|
||||
"uint64 %x": "",
|
||||
"uint8 %#x": "",
|
||||
"uint8 %d": "",
|
||||
"uint8 %v": "",
|
||||
"uint8 %x": "",
|
||||
"uintptr %d": "",
|
||||
}
|
||||
@@ -1,628 +0,0 @@
|
||||
# Go internal ABI specification
|
||||
|
||||
This document describes Go’s internal application binary interface
|
||||
(ABI), known as ABIInternal.
|
||||
Go's ABI defines the layout of data in memory and the conventions for
|
||||
calling between Go functions.
|
||||
This ABI is *unstable* and will change between Go versions.
|
||||
If you’re writing assembly code, please instead refer to Go’s
|
||||
[assembly documentation](/doc/asm.html), which describes Go’s stable
|
||||
ABI, known as ABI0.
|
||||
|
||||
All functions defined in Go source follow ABIInternal.
|
||||
However, ABIInternal and ABI0 functions are able to call each other
|
||||
through transparent *ABI wrappers*, described in the [internal calling
|
||||
convention proposal](https://golang.org/design/27539-internal-abi).
|
||||
|
||||
Go uses a common ABI design across all architectures.
|
||||
We first describe the common ABI, and then cover per-architecture
|
||||
specifics.
|
||||
|
||||
*Rationale*: For the reasoning behind using a common ABI across
|
||||
architectures instead of the platform ABI, see the [register-based Go
|
||||
calling convention proposal](https://golang.org/design/40724-register-calling).
|
||||
|
||||
## Memory layout
|
||||
|
||||
Go's built-in types have the following sizes and alignments.
|
||||
Many, though not all, of these sizes are guaranteed by the [language
|
||||
specification](/doc/go_spec.html#Size_and_alignment_guarantees).
|
||||
Those that aren't guaranteed may change in future versions of Go (for
|
||||
example, we've considered changing the alignment of int64 on 32-bit).
|
||||
|
||||
| Type | 64-bit | | 32-bit | |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| | Size | Align | Size | Align |
|
||||
| bool, uint8, int8 | 1 | 1 | 1 | 1 |
|
||||
| uint16, int16 | 2 | 2 | 2 | 2 |
|
||||
| uint32, int32 | 4 | 4 | 4 | 4 |
|
||||
| uint64, int64 | 8 | 8 | 8 | 4 |
|
||||
| int, uint | 8 | 8 | 4 | 4 |
|
||||
| float32 | 4 | 4 | 4 | 4 |
|
||||
| float64 | 8 | 8 | 8 | 4 |
|
||||
| complex64 | 8 | 4 | 8 | 4 |
|
||||
| complex128 | 16 | 8 | 16 | 4 |
|
||||
| uintptr, *T, unsafe.Pointer | 8 | 8 | 4 | 4 |
|
||||
|
||||
The types `byte` and `rune` are aliases for `uint8` and `int32`,
|
||||
respectively, and hence have the same size and alignment as these
|
||||
types.
|
||||
|
||||
The layout of `map`, `chan`, and `func` types is equivalent to *T.
|
||||
|
||||
To describe the layout of the remaining composite types, we first
|
||||
define the layout of a *sequence* S of N fields with types
|
||||
t<sub>1</sub>, t<sub>2</sub>, ..., t<sub>N</sub>.
|
||||
We define the byte offset at which each field begins relative to a
|
||||
base address of 0, as well as the size and alignment of the sequence
|
||||
as follows:
|
||||
|
||||
```
|
||||
offset(S, i) = 0 if i = 1
|
||||
= align(offset(S, i-1) + sizeof(t_(i-1)), alignof(t_i))
|
||||
alignof(S) = 1 if N = 0
|
||||
= max(alignof(t_i) | 1 <= i <= N)
|
||||
sizeof(S) = 0 if N = 0
|
||||
= align(offset(S, N) + sizeof(t_N), alignof(S))
|
||||
```
|
||||
|
||||
Where sizeof(T) and alignof(T) are the size and alignment of type T,
|
||||
respectively, and align(x, y) rounds x up to a multiple of y.
|
||||
|
||||
The `interface{}` type is a sequence of 1. a pointer to the runtime type
|
||||
description for the interface's dynamic type and 2. an `unsafe.Pointer`
|
||||
data field.
|
||||
Any other interface type (besides the empty interface) is a sequence
|
||||
of 1. a pointer to the runtime "itab" that gives the method pointers and
|
||||
the type of the data field and 2. an `unsafe.Pointer` data field.
|
||||
An interface can be "direct" or "indirect" depending on the dynamic
|
||||
type: a direct interface stores the value directly in the data field,
|
||||
and an indirect interface stores a pointer to the value in the data
|
||||
field.
|
||||
An interface can only be direct if the value consists of a single
|
||||
pointer word.
|
||||
|
||||
An array type `[N]T` is a sequence of N fields of type T.
|
||||
|
||||
The slice type `[]T` is a sequence of a `*[cap]T` pointer to the slice
|
||||
backing store, an `int` giving the `len` of the slice, and an `int`
|
||||
giving the `cap` of the slice.
|
||||
|
||||
The `string` type is a sequence of a `*[len]byte` pointer to the
|
||||
string backing store, and an `int` giving the `len` of the string.
|
||||
|
||||
A struct type `struct { f1 t1; ...; fM tM }` is laid out as the
|
||||
sequence t1, ..., tM, tP, where tP is either:
|
||||
|
||||
- Type `byte` if sizeof(tM) = 0 and any of sizeof(t*i*) ≠ 0.
|
||||
- Empty (size 0 and align 1) otherwise.
|
||||
|
||||
The padding byte prevents creating a past-the-end pointer by taking
|
||||
the address of the final, empty fN field.
|
||||
|
||||
Note that user-written assembly code should generally not depend on Go
|
||||
type layout and should instead use the constants defined in
|
||||
[`go_asm.h`](/doc/asm.html#data-offsets).
|
||||
|
||||
## Function call argument and result passing
|
||||
|
||||
Function calls pass arguments and results using a combination of the
|
||||
stack and machine registers.
|
||||
Each argument or result is passed either entirely in registers or
|
||||
entirely on the stack.
|
||||
Because access to registers is generally faster than access to the
|
||||
stack, arguments and results are preferentially passed in registers.
|
||||
However, any argument or result that contains a non-trivial array or
|
||||
does not fit entirely in the remaining available registers is passed
|
||||
on the stack.
|
||||
|
||||
Each architecture defines a sequence of integer registers and a
|
||||
sequence of floating-point registers.
|
||||
At a high level, arguments and results are recursively broken down
|
||||
into values of base types and these base values are assigned to
|
||||
registers from these sequences.
|
||||
|
||||
Arguments and results can share the same registers, but do not share
|
||||
the same stack space.
|
||||
Beyond the arguments and results passed on the stack, the caller also
|
||||
reserves spill space on the stack for all register-based arguments
|
||||
(but does not populate this space).
|
||||
|
||||
The receiver, arguments, and results of function or method F are
|
||||
assigned to registers or the stack using the following algorithm:
|
||||
|
||||
1. Let NI and NFP be the length of integer and floating-point register
|
||||
sequences defined by the architecture.
|
||||
Let I and FP be 0; these are the indexes of the next integer and
|
||||
floating-pointer register.
|
||||
Let S, the type sequence defining the stack frame, be empty.
|
||||
1. If F is a method, assign F’s receiver.
|
||||
1. For each argument A of F, assign A.
|
||||
1. Add a pointer-alignment field to S. This has size 0 and the same
|
||||
alignment as `uintptr`.
|
||||
1. Reset I and FP to 0.
|
||||
1. For each result R of F, assign R.
|
||||
1. Add a pointer-alignment field to S.
|
||||
1. For each register-assigned receiver and argument of F, let T be its
|
||||
type and add T to the stack sequence S.
|
||||
This is the argument's (or receiver's) spill space and will be
|
||||
uninitialized at the call.
|
||||
1. Add a pointer-alignment field to S.
|
||||
|
||||
Assigning a receiver, argument, or result V of underlying type T works
|
||||
as follows:
|
||||
|
||||
1. Remember I and FP.
|
||||
1. Try to register-assign V.
|
||||
1. If step 2 failed, reset I and FP to the values from step 1, add T
|
||||
to the stack sequence S, and assign V to this field in S.
|
||||
|
||||
Register-assignment of a value V of underlying type T works as follows:
|
||||
|
||||
1. If T is a boolean or integral type that fits in an integer
|
||||
register, assign V to register I and increment I.
|
||||
1. If T is an integral type that fits in two integer registers, assign
|
||||
the least significant and most significant halves of V to registers
|
||||
I and I+1, respectively, and increment I by 2
|
||||
1. If T is a floating-point type and can be represented without loss
|
||||
of precision in a floating-point register, assign V to register FP
|
||||
and increment FP.
|
||||
1. If T is a complex type, recursively register-assign its real and
|
||||
imaginary parts.
|
||||
1. If T is a pointer type, map type, channel type, or function type,
|
||||
assign V to register I and increment I.
|
||||
1. If T is a string type, interface type, or slice type, recursively
|
||||
register-assign V’s components (2 for strings and interfaces, 3 for
|
||||
slices).
|
||||
1. If T is a struct type, recursively register-assign each field of V.
|
||||
1. If T is an array type of length 0, do nothing.
|
||||
1. If T is an array type of length 1, recursively register-assign its
|
||||
one element.
|
||||
1. If T is an array type of length > 1, fail.
|
||||
1. If I > NI or FP > NFP, fail.
|
||||
1. If any recursive assignment above fails, fail.
|
||||
|
||||
The above algorithm produces an assignment of each receiver, argument,
|
||||
and result to registers or to a field in the stack sequence.
|
||||
The final stack sequence looks like: stack-assigned receiver,
|
||||
stack-assigned arguments, pointer-alignment, stack-assigned results,
|
||||
pointer-alignment, spill space for each register-assigned argument,
|
||||
pointer-alignment.
|
||||
The following diagram shows what this stack frame looks like on the
|
||||
stack, using the typical convention where address 0 is at the bottom:
|
||||
|
||||
+------------------------------+
|
||||
| . . . |
|
||||
| 2nd reg argument spill space |
|
||||
| 1st reg argument spill space |
|
||||
| <pointer-sized alignment> |
|
||||
| . . . |
|
||||
| 2nd stack-assigned result |
|
||||
| 1st stack-assigned result |
|
||||
| <pointer-sized alignment> |
|
||||
| . . . |
|
||||
| 2nd stack-assigned argument |
|
||||
| 1st stack-assigned argument |
|
||||
| stack-assigned receiver |
|
||||
+------------------------------+ ↓ lower addresses
|
||||
|
||||
To perform a call, the caller reserves space starting at the lowest
|
||||
address in its stack frame for the call stack frame, stores arguments
|
||||
in the registers and argument stack fields determined by the above
|
||||
algorithm, and performs the call.
|
||||
At the time of a call, spill space, result stack fields, and result
|
||||
registers are left uninitialized.
|
||||
Upon return, the callee must have stored results to all result
|
||||
registers and result stack fields determined by the above algorithm.
|
||||
|
||||
There are no callee-save registers, so a call may overwrite any
|
||||
register that doesn’t have a fixed meaning, including argument
|
||||
registers.
|
||||
|
||||
### Example
|
||||
|
||||
Consider the function `func f(a1 uint8, a2 [2]uintptr, a3 uint8) (r1
|
||||
struct { x uintptr; y [2]uintptr }, r2 string)` on a 64-bit
|
||||
architecture with hypothetical integer registers R0–R9.
|
||||
|
||||
On entry, `a1` is assigned to `R0`, `a3` is assigned to `R1` and the
|
||||
stack frame is laid out in the following sequence:
|
||||
|
||||
a2 [2]uintptr
|
||||
r1.x uintptr
|
||||
r1.y [2]uintptr
|
||||
a1Spill uint8
|
||||
a2Spill uint8
|
||||
_ [6]uint8 // alignment padding
|
||||
|
||||
In the stack frame, only the `a2` field is initialized on entry; the
|
||||
rest of the frame is left uninitialized.
|
||||
|
||||
On exit, `r2.base` is assigned to `R0`, `r2.len` is assigned to `R1`,
|
||||
and `r1.x` and `r1.y` are initialized in the stack frame.
|
||||
|
||||
There are several things to note in this example.
|
||||
First, `a2` and `r1` are stack-assigned because they contain arrays.
|
||||
The other arguments and results are register-assigned.
|
||||
Result `r2` is decomposed into its components, which are individually
|
||||
register-assigned.
|
||||
On the stack, the stack-assigned arguments appear at lower addresses
|
||||
than the stack-assigned results, which appear at lower addresses than
|
||||
the argument spill area.
|
||||
Only arguments, not results, are assigned a spill area on the stack.
|
||||
|
||||
### Rationale
|
||||
|
||||
Each base value is assigned to its own register to optimize
|
||||
construction and access.
|
||||
An alternative would be to pack multiple sub-word values into
|
||||
registers, or to simply map an argument's in-memory layout to
|
||||
registers (this is common in C ABIs), but this typically adds cost to
|
||||
pack and unpack these values.
|
||||
Modern architectures have more than enough registers to pass all
|
||||
arguments and results this way for nearly all functions (see the
|
||||
appendix), so there’s little downside to spreading base values across
|
||||
registers.
|
||||
|
||||
Arguments that can’t be fully assigned to registers are passed
|
||||
entirely on the stack in case the callee takes the address of that
|
||||
argument.
|
||||
If an argument could be split across the stack and registers and the
|
||||
callee took its address, it would need to be reconstructed in memory,
|
||||
a process that would be proportional to the size of the argument.
|
||||
|
||||
Non-trivial arrays are always passed on the stack because indexing
|
||||
into an array typically requires a computed offset, which generally
|
||||
isn’t possible with registers.
|
||||
Arrays in general are rare in function signatures (only 0.7% of
|
||||
functions in the Go 1.15 standard library and 0.2% in kubelet).
|
||||
We considered allowing array fields to be passed on the stack while
|
||||
the rest of an argument’s fields are passed in registers, but this
|
||||
creates the same problems as other large structs if the callee takes
|
||||
the address of an argument, and would benefit <0.1% of functions in
|
||||
kubelet (and even these very little).
|
||||
|
||||
We make exceptions for 0 and 1-element arrays because these don’t
|
||||
require computed offsets, and 1-element arrays are already decomposed
|
||||
in the compiler’s SSA representation.
|
||||
|
||||
The ABI assignment algorithm above is equivalent to Go’s stack-based
|
||||
ABI0 calling convention if there are zero architecture registers.
|
||||
This is intended to ease the transition to the register-based internal
|
||||
ABI and make it easy for the compiler to generate either calling
|
||||
convention.
|
||||
An architecture may still define register meanings that aren’t
|
||||
compatible with ABI0, but these differences should be easy to account
|
||||
for in the compiler.
|
||||
|
||||
The algorithm reserves spill space for arguments in the caller’s frame
|
||||
so that the compiler can generate a stack growth path that spills into
|
||||
this reserved space.
|
||||
If the callee has to grow the stack, it may not be able to reserve
|
||||
enough additional stack space in its own frame to spill these, which
|
||||
is why it’s important that the caller do so.
|
||||
These slots also act as the home location if these arguments need to
|
||||
be spilled for any other reason, which simplifies traceback printing.
|
||||
|
||||
There are several options for how to lay out the argument spill space.
|
||||
We chose to lay out each argument according to its type's usual memory
|
||||
layout but to separate the spill space from the regular argument
|
||||
space.
|
||||
Using the usual memory layout simplifies the compiler because it
|
||||
already understands this layout.
|
||||
Also, if a function takes the address of a register-assigned argument,
|
||||
the compiler must spill that argument to memory in its usual memory
|
||||
layout and it's more convenient to use the argument spill space for
|
||||
this purpose.
|
||||
|
||||
Alternatively, the spill space could be structured around argument
|
||||
registers.
|
||||
In this approach, the stack growth spill path would spill each
|
||||
argument register to a register-sized stack word.
|
||||
However, if the function takes the address of a register-assigned
|
||||
argument, the compiler would have to reconstruct it in memory layout
|
||||
elsewhere on the stack.
|
||||
|
||||
The spill space could also be interleaved with the stack-assigned
|
||||
arguments so the arguments appear in order whether they are register-
|
||||
or stack-assigned.
|
||||
This would be close to ABI0, except that register-assigned arguments
|
||||
would be uninitialized on the stack and there's no need to reserve
|
||||
stack space for register-assigned results.
|
||||
We expect separating the spill space to perform better because of
|
||||
memory locality.
|
||||
Separating the space is also potentially simpler for `reflect` calls
|
||||
because this allows `reflect` to summarize the spill space as a single
|
||||
number.
|
||||
Finally, the long-term intent is to remove reserved spill slots
|
||||
entirely – allowing most functions to be called without any stack
|
||||
setup and easing the introduction of callee-save registers – and
|
||||
separating the spill space makes that transition easier.
|
||||
|
||||
## Closures
|
||||
|
||||
A func value (e.g., `var x func()`) is a pointer to a closure object.
|
||||
A closure object begins with a pointer-sized program counter
|
||||
representing the entry point of the function, followed by zero or more
|
||||
bytes containing the closed-over environment.
|
||||
|
||||
Closure calls follow the same conventions as static function and
|
||||
method calls, with one addition. Each architecture specifies a
|
||||
*closure context pointer* register and calls to closures store the
|
||||
address of the closure object in the closure context pointer register
|
||||
prior to the call.
|
||||
|
||||
## Software floating-point mode
|
||||
|
||||
In "softfloat" mode, the ABI simply treats the hardware as having zero
|
||||
floating-point registers.
|
||||
As a result, any arguments containing floating-point values will be
|
||||
passed on the stack.
|
||||
|
||||
*Rationale*: Softfloat mode is about compatibility over performance
|
||||
and is not commonly used.
|
||||
Hence, we keep the ABI as simple as possible in this case, rather than
|
||||
adding additional rules for passing floating-point values in integer
|
||||
registers.
|
||||
|
||||
## Architecture specifics
|
||||
|
||||
This section describes per-architecture register mappings, as well as
|
||||
other per-architecture special cases.
|
||||
|
||||
### amd64 architecture
|
||||
|
||||
The amd64 architecture uses the following sequence of 9 registers for
|
||||
integer arguments and results:
|
||||
|
||||
RAX, RBX, RCX, RDI, RSI, R8, R9, R10, R11
|
||||
|
||||
It uses X0 – X14 for floating-point arguments and results.
|
||||
|
||||
*Rationale*: These sequences are chosen from the available registers
|
||||
to be relatively easy to remember.
|
||||
|
||||
Registers R12 and R13 are permanent scratch registers.
|
||||
R15 is a scratch register except in dynamically linked binaries.
|
||||
|
||||
*Rationale*: Some operations such as stack growth and reflection calls
|
||||
need dedicated scratch registers in order to manipulate call frames
|
||||
without corrupting arguments or results.
|
||||
|
||||
Special-purpose registers are as follows:
|
||||
|
||||
| Register | Call meaning | Body meaning |
|
||||
| --- | --- | --- |
|
||||
| RSP | Stack pointer | Fixed |
|
||||
| RBP | Frame pointer | Fixed |
|
||||
| RDX | Closure context pointer | Scratch |
|
||||
| R12 | None | Scratch |
|
||||
| R13 | None | Scratch |
|
||||
| R14 | Current goroutine | Scratch |
|
||||
| R15 | GOT reference temporary | Fixed if dynlink |
|
||||
| X15 | Zero value | Fixed |
|
||||
|
||||
TODO: We may start with the existing TLS-based g and move to R14
|
||||
later.
|
||||
|
||||
*Rationale*: These register meanings are compatible with Go’s
|
||||
stack-based calling convention except for R14 and X15, which will have
|
||||
to be restored on transitions from ABI0 code to ABIInternal code.
|
||||
In ABI0, these are undefined, so transitions from ABIInternal to ABI0
|
||||
can ignore these registers.
|
||||
|
||||
*Rationale*: For the current goroutine pointer, we chose a register
|
||||
that requires an additional REX byte.
|
||||
While this adds one byte to every function prologue, it is hardly ever
|
||||
accessed outside the function prologue and we expect making more
|
||||
single-byte registers available to be a net win.
|
||||
|
||||
*Rationale*: We designate X15 as a fixed zero register because
|
||||
functions often have to bulk zero their stack frames, and this is more
|
||||
efficient with a designated zero register.
|
||||
|
||||
#### Stack layout
|
||||
|
||||
The stack pointer, RSP, grows down and is always aligned to 8 bytes.
|
||||
|
||||
The amd64 architecture does not use a link register.
|
||||
|
||||
A function's stack frame is laid out as follows:
|
||||
|
||||
+------------------------------+
|
||||
| return PC |
|
||||
| RBP on entry |
|
||||
| ... locals ... |
|
||||
| ... outgoing arguments ... |
|
||||
+------------------------------+ ↓ lower addresses
|
||||
|
||||
The "return PC" is pushed as part of the standard amd64 `CALL`
|
||||
operation.
|
||||
On entry, a function subtracts from RSP to open its stack frame and
|
||||
saves the value of RBP directly below the return PC.
|
||||
A leaf function that does not require any stack space may omit the
|
||||
saved RBP.
|
||||
|
||||
The Go ABI's use of RBP as a frame pointer register is compatible with
|
||||
amd64 platform conventions so that Go can inter-operate with platform
|
||||
debuggers and profilers.
|
||||
|
||||
#### Flags
|
||||
|
||||
The direction flag (D) is always cleared (set to the “forward”
|
||||
direction) at a call.
|
||||
The arithmetic status flags are treated like scratch registers and not
|
||||
preserved across calls.
|
||||
All other bits in RFLAGS are system flags.
|
||||
|
||||
The CPU is always in MMX technology state (not x87 mode).
|
||||
|
||||
*Rationale*: Go on amd64 uses the XMM registers and never uses the x87
|
||||
registers, so it makes sense to assume the CPU is in MMX mode.
|
||||
Otherwise, any function that used the XMM registers would have to
|
||||
execute an EMMS instruction before calling another function or
|
||||
returning (this is the case in the SysV ABI).
|
||||
|
||||
At calls, the MXCSR control bits are always set as follows:
|
||||
|
||||
| Flag | Bit | Value | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| FZ | 15 | 0 | Do not flush to zero |
|
||||
| RC | 14/13 | 0 (RN) | Round to nearest |
|
||||
| PM | 12 | 1 | Precision masked |
|
||||
| UM | 11 | 1 | Underflow masked |
|
||||
| OM | 10 | 1 | Overflow masked |
|
||||
| ZM | 9 | 1 | Divide-by-zero masked |
|
||||
| DM | 8 | 1 | Denormal operations masked |
|
||||
| IM | 7 | 1 | Invalid operations masked |
|
||||
| DAZ | 6 | 0 | Do not zero de-normals |
|
||||
|
||||
The MXCSR status bits are callee-save.
|
||||
|
||||
*Rationale*: Having a fixed MXCSR control configuration allows Go
|
||||
functions to use SSE operations without modifying or saving the MXCSR.
|
||||
Functions are allowed to modify it between calls (as long as they
|
||||
restore it), but as of this writing Go code never does.
|
||||
The above fixed configuration matches the process initialization
|
||||
control bits specified by the ELF AMD64 ABI.
|
||||
|
||||
The x87 floating-point control word is not used by Go on amd64.
|
||||
|
||||
## Future directions
|
||||
|
||||
### Spill path improvements
|
||||
|
||||
The ABI currently reserves spill space for argument registers so the
|
||||
compiler can statically generate an argument spill path before calling
|
||||
into `runtime.morestack` to grow the stack.
|
||||
This ensures there will be sufficient spill space even when the stack
|
||||
is nearly exhausted and keeps stack growth and stack scanning
|
||||
essentially unchanged from ABI0.
|
||||
|
||||
However, this wastes stack space (the median wastage is 16 bytes per
|
||||
call), resulting in larger stacks and increased cache footprint.
|
||||
A better approach would be to reserve stack space only when spilling.
|
||||
One way to ensure enough space is available to spill would be for
|
||||
every function to ensure there is enough space for the function's own
|
||||
frame *as well as* the spill space of all functions it calls.
|
||||
For most functions, this would change the threshold for the prologue
|
||||
stack growth check.
|
||||
For `nosplit` functions, this would change the threshold used in the
|
||||
linker's static stack size check.
|
||||
|
||||
Allocating spill space in the callee rather than the caller may also
|
||||
allow for faster reflection calls in the common case where a function
|
||||
takes only register arguments, since it would allow reflection to make
|
||||
these calls directly without allocating any frame.
|
||||
|
||||
The statically-generated spill path also increases code size.
|
||||
It is possible to instead have a generic spill path in the runtime, as
|
||||
part of `morestack`.
|
||||
However, this complicates reserving the spill space, since spilling
|
||||
all possible register arguments would, in most cases, take
|
||||
significantly more space than spilling only those used by a particular
|
||||
function.
|
||||
Some options are to spill to a temporary space and copy back only the
|
||||
registers used by the function, or to grow the stack if necessary
|
||||
before spilling to it (using a temporary space if necessary), or to
|
||||
use a heap-allocated space if insufficient stack space is available.
|
||||
These options all add enough complexity that we will have to make this
|
||||
decision based on the actual code size growth caused by the static
|
||||
spill paths.
|
||||
|
||||
### Clobber sets
|
||||
|
||||
As defined, the ABI does not use callee-save registers.
|
||||
This significantly simplifies the garbage collector and the compiler's
|
||||
register allocator, but at some performance cost.
|
||||
A potentially better balance for Go code would be to use *clobber
|
||||
sets*: for each function, the compiler records the set of registers it
|
||||
clobbers (including those clobbered by functions it calls) and any
|
||||
register not clobbered by function F can remain live across calls to
|
||||
F.
|
||||
|
||||
This is generally a good fit for Go because Go's package DAG allows
|
||||
function metadata like the clobber set to flow up the call graph, even
|
||||
across package boundaries.
|
||||
Clobber sets would require relatively little change to the garbage
|
||||
collector, unlike general callee-save registers.
|
||||
One disadvantage of clobber sets over callee-save registers is that
|
||||
they don't help with indirect function calls or interface method
|
||||
calls, since static information isn't available in these cases.
|
||||
|
||||
### Large aggregates
|
||||
|
||||
Go encourages passing composite values by value, and this simplifies
|
||||
reasoning about mutation and races.
|
||||
However, this comes at a performance cost for large composite values.
|
||||
It may be possible to instead transparently pass large composite
|
||||
values by reference and delay copying until it is actually necessary.
|
||||
|
||||
## Appendix: Register usage analysis
|
||||
|
||||
In order to understand the impacts of the above design on register
|
||||
usage, we
|
||||
[analyzed](https://github.com/aclements/go-misc/tree/master/abi) the
|
||||
impact of the above ABI on a large code base: cmd/kubelet from
|
||||
[Kubernetes](https://github.com/kubernetes/kubernetes) at tag v1.18.8.
|
||||
|
||||
The following table shows the impact of different numbers of available
|
||||
integer and floating-point registers on argument assignment:
|
||||
|
||||
```
|
||||
| | | | stack args | spills | stack total |
|
||||
| ints | floats | % fit | p50 | p95 | p99 | p50 | p95 | p99 | p50 | p95 | p99 |
|
||||
| 0 | 0 | 6.3% | 32 | 152 | 256 | 0 | 0 | 0 | 32 | 152 | 256 |
|
||||
| 0 | 8 | 6.4% | 32 | 152 | 256 | 0 | 0 | 0 | 32 | 152 | 256 |
|
||||
| 1 | 8 | 21.3% | 24 | 144 | 248 | 8 | 8 | 8 | 32 | 152 | 256 |
|
||||
| 2 | 8 | 38.9% | 16 | 128 | 224 | 8 | 16 | 16 | 24 | 136 | 240 |
|
||||
| 3 | 8 | 57.0% | 0 | 120 | 224 | 16 | 24 | 24 | 24 | 136 | 240 |
|
||||
| 4 | 8 | 73.0% | 0 | 120 | 216 | 16 | 32 | 32 | 24 | 136 | 232 |
|
||||
| 5 | 8 | 83.3% | 0 | 112 | 216 | 16 | 40 | 40 | 24 | 136 | 232 |
|
||||
| 6 | 8 | 87.5% | 0 | 112 | 208 | 16 | 48 | 48 | 24 | 136 | 232 |
|
||||
| 7 | 8 | 89.8% | 0 | 112 | 208 | 16 | 48 | 56 | 24 | 136 | 232 |
|
||||
| 8 | 8 | 91.3% | 0 | 112 | 200 | 16 | 56 | 64 | 24 | 136 | 232 |
|
||||
| 9 | 8 | 92.1% | 0 | 112 | 192 | 16 | 56 | 72 | 24 | 136 | 232 |
|
||||
| 10 | 8 | 92.6% | 0 | 104 | 192 | 16 | 56 | 72 | 24 | 136 | 232 |
|
||||
| 11 | 8 | 93.1% | 0 | 104 | 184 | 16 | 56 | 80 | 24 | 128 | 232 |
|
||||
| 12 | 8 | 93.4% | 0 | 104 | 176 | 16 | 56 | 88 | 24 | 128 | 232 |
|
||||
| 13 | 8 | 94.0% | 0 | 88 | 176 | 16 | 56 | 96 | 24 | 128 | 232 |
|
||||
| 14 | 8 | 94.4% | 0 | 80 | 152 | 16 | 64 | 104 | 24 | 128 | 232 |
|
||||
| 15 | 8 | 94.6% | 0 | 80 | 152 | 16 | 64 | 112 | 24 | 128 | 232 |
|
||||
| 16 | 8 | 94.9% | 0 | 16 | 152 | 16 | 64 | 112 | 24 | 128 | 232 |
|
||||
| ∞ | 8 | 99.8% | 0 | 0 | 0 | 24 | 112 | 216 | 24 | 120 | 216 |
|
||||
```
|
||||
|
||||
The first two columns show the number of available integer and
|
||||
floating-point registers.
|
||||
The first row shows the results for 0 integer and 0 floating-point
|
||||
registers, which is equivalent to ABI0.
|
||||
We found that any reasonable number of floating-point registers has
|
||||
the same effect, so we fixed it at 8 for all other rows.
|
||||
|
||||
The “% fit” column gives the fraction of functions where all arguments
|
||||
and results are register-assigned and no arguments are passed on the
|
||||
stack.
|
||||
The three “stack args” columns give the median, 95th and 99th
|
||||
percentile number of bytes of stack arguments.
|
||||
The “spills” columns likewise summarize the number of bytes in
|
||||
on-stack spill space.
|
||||
And “stack total” summarizes the sum of stack arguments and on-stack
|
||||
spill slots.
|
||||
Note that these are three different distributions; for example,
|
||||
there’s no single function that takes 0 stack argument bytes, 16 spill
|
||||
bytes, and 24 total stack bytes.
|
||||
|
||||
From this, we can see that the fraction of functions that fit entirely
|
||||
in registers grows very slowly once it reaches about 90%, though
|
||||
curiously there is a small minority of functions that could benefit
|
||||
from a huge number of registers.
|
||||
Making 9 integer registers available on amd64 puts it in this realm.
|
||||
We also see that the stack space required for most functions is fairly
|
||||
small.
|
||||
While the increasing space required for spills largely balances out
|
||||
the decreasing space required for stack arguments as the number of
|
||||
available registers increases, there is a general reduction in the
|
||||
total stack space required with more available registers.
|
||||
This does, however, suggest that eliminating spill slots in the future
|
||||
would noticeably reduce stack requirements.
|
||||
@@ -1,461 +0,0 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package abi
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/types"
|
||||
"cmd/internal/src"
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
//......................................................................
|
||||
//
|
||||
// Public/exported bits of the ABI utilities.
|
||||
//
|
||||
|
||||
// ABIParamResultInfo stores the results of processing a given
|
||||
// function type to compute stack layout and register assignments. For
|
||||
// each input and output parameter we capture whether the param was
|
||||
// register-assigned (and to which register(s)) or the stack offset
|
||||
// for the param if is not going to be passed in registers according
|
||||
// to the rules in the Go internal ABI specification (1.17).
|
||||
type ABIParamResultInfo struct {
|
||||
inparams []ABIParamAssignment // Includes receiver for method calls. Does NOT include hidden closure pointer.
|
||||
outparams []ABIParamAssignment
|
||||
offsetToSpillArea int64
|
||||
spillAreaSize int64
|
||||
config *ABIConfig // to enable String() method
|
||||
}
|
||||
|
||||
func (a *ABIParamResultInfo) InParams() []ABIParamAssignment {
|
||||
return a.inparams
|
||||
}
|
||||
|
||||
func (a *ABIParamResultInfo) OutParams() []ABIParamAssignment {
|
||||
return a.outparams
|
||||
}
|
||||
|
||||
func (a *ABIParamResultInfo) InParam(i int) ABIParamAssignment {
|
||||
return a.inparams[i]
|
||||
}
|
||||
|
||||
func (a *ABIParamResultInfo) OutParam(i int) ABIParamAssignment {
|
||||
return a.outparams[i]
|
||||
}
|
||||
|
||||
func (a *ABIParamResultInfo) SpillAreaOffset() int64 {
|
||||
return a.offsetToSpillArea
|
||||
}
|
||||
|
||||
func (a *ABIParamResultInfo) SpillAreaSize() int64 {
|
||||
return a.spillAreaSize
|
||||
}
|
||||
|
||||
// RegIndex stores the index into the set of machine registers used by
|
||||
// the ABI on a specific architecture for parameter passing. RegIndex
|
||||
// values 0 through N-1 (where N is the number of integer registers
|
||||
// used for param passing according to the ABI rules) describe integer
|
||||
// registers; values N through M (where M is the number of floating
|
||||
// point registers used). Thus if the ABI says there are 5 integer
|
||||
// registers and 7 floating point registers, then RegIndex value of 4
|
||||
// indicates the 5th integer register, and a RegIndex value of 11
|
||||
// indicates the 7th floating point register.
|
||||
type RegIndex uint8
|
||||
|
||||
// ABIParamAssignment holds information about how a specific param or
|
||||
// result will be passed: in registers (in which case 'Registers' is
|
||||
// populated) or on the stack (in which case 'Offset' is set to a
|
||||
// non-negative stack offset. The values in 'Registers' are indices (as
|
||||
// described above), not architected registers.
|
||||
type ABIParamAssignment struct {
|
||||
Type *types.Type
|
||||
Registers []RegIndex
|
||||
offset int32
|
||||
}
|
||||
|
||||
// Offset returns the stack offset for addressing the parameter that "a" describes.
|
||||
// This will panic if "a" describes a register-allocated parameter.
|
||||
func (a *ABIParamAssignment) Offset() int32 {
|
||||
if len(a.Registers) > 0 {
|
||||
panic("Register allocated parameters have no offset")
|
||||
}
|
||||
return a.offset
|
||||
}
|
||||
|
||||
// SpillOffset returns the offset *within the spill area* for the parameter that "a" describes.
|
||||
// Registers will be spilled here; if a memory home is needed (for a pointer method e.g.)
|
||||
// then that will be the address.
|
||||
// This will panic if "a" describes a stack-allocated parameter.
|
||||
func (a *ABIParamAssignment) SpillOffset() int32 {
|
||||
if len(a.Registers) == 0 {
|
||||
panic("Stack-allocated parameters have no spill offset")
|
||||
}
|
||||
return a.offset
|
||||
}
|
||||
|
||||
// RegAmounts holds a specified number of integer/float registers.
|
||||
type RegAmounts struct {
|
||||
intRegs int
|
||||
floatRegs int
|
||||
}
|
||||
|
||||
// ABIConfig captures the number of registers made available
|
||||
// by the ABI rules for parameter passing and result returning.
|
||||
type ABIConfig struct {
|
||||
// Do we need anything more than this?
|
||||
regAmounts RegAmounts
|
||||
regsForTypeCache map[*types.Type]int
|
||||
}
|
||||
|
||||
// NewABIConfig returns a new ABI configuration for an architecture with
|
||||
// iRegsCount integer/pointer registers and fRegsCount floating point registers.
|
||||
func NewABIConfig(iRegsCount, fRegsCount int) *ABIConfig {
|
||||
return &ABIConfig{regAmounts: RegAmounts{iRegsCount, fRegsCount}, regsForTypeCache: make(map[*types.Type]int)}
|
||||
}
|
||||
|
||||
// NumParamRegs returns the number of parameter registers used for a given type,
|
||||
// without regard for the number available.
|
||||
func (a *ABIConfig) NumParamRegs(t *types.Type) int {
|
||||
if n, ok := a.regsForTypeCache[t]; ok {
|
||||
return n
|
||||
}
|
||||
|
||||
if t.IsScalar() || t.IsPtrShaped() {
|
||||
var n int
|
||||
if t.IsComplex() {
|
||||
n = 2
|
||||
} else {
|
||||
n = (int(t.Size()) + types.RegSize - 1) / types.RegSize
|
||||
}
|
||||
a.regsForTypeCache[t] = n
|
||||
return n
|
||||
}
|
||||
typ := t.Kind()
|
||||
n := 0
|
||||
switch typ {
|
||||
case types.TARRAY:
|
||||
n = a.NumParamRegs(t.Elem()) * int(t.NumElem())
|
||||
case types.TSTRUCT:
|
||||
for _, f := range t.FieldSlice() {
|
||||
n += a.NumParamRegs(f.Type)
|
||||
}
|
||||
case types.TSLICE:
|
||||
n = a.NumParamRegs(synthSlice)
|
||||
case types.TSTRING:
|
||||
n = a.NumParamRegs(synthString)
|
||||
case types.TINTER:
|
||||
n = a.NumParamRegs(synthIface)
|
||||
}
|
||||
a.regsForTypeCache[t] = n
|
||||
return n
|
||||
}
|
||||
|
||||
// ABIAnalyze takes a function type 't' and an ABI rules description
|
||||
// 'config' and analyzes the function to determine how its parameters
|
||||
// and results will be passed (in registers or on the stack), returning
|
||||
// an ABIParamResultInfo object that holds the results of the analysis.
|
||||
func (config *ABIConfig) ABIAnalyze(t *types.Type) ABIParamResultInfo {
|
||||
setup()
|
||||
s := assignState{
|
||||
rTotal: config.regAmounts,
|
||||
}
|
||||
result := ABIParamResultInfo{config: config}
|
||||
|
||||
// Receiver
|
||||
ft := t.FuncType()
|
||||
if t.NumRecvs() != 0 {
|
||||
rfsl := ft.Receiver.FieldSlice()
|
||||
result.inparams = append(result.inparams,
|
||||
s.assignParamOrReturn(rfsl[0].Type, false))
|
||||
}
|
||||
|
||||
// Inputs
|
||||
ifsl := ft.Params.FieldSlice()
|
||||
for _, f := range ifsl {
|
||||
result.inparams = append(result.inparams,
|
||||
s.assignParamOrReturn(f.Type, false))
|
||||
}
|
||||
s.stackOffset = types.Rnd(s.stackOffset, int64(types.RegSize))
|
||||
|
||||
// Outputs
|
||||
s.rUsed = RegAmounts{}
|
||||
ofsl := ft.Results.FieldSlice()
|
||||
for _, f := range ofsl {
|
||||
result.outparams = append(result.outparams, s.assignParamOrReturn(f.Type, true))
|
||||
}
|
||||
// The spill area is at a register-aligned offset and its size is rounded up to a register alignment.
|
||||
// TODO in theory could align offset only to minimum required by spilled data types.
|
||||
result.offsetToSpillArea = alignTo(s.stackOffset, types.RegSize)
|
||||
result.spillAreaSize = alignTo(s.spillOffset, types.RegSize)
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
//......................................................................
|
||||
//
|
||||
// Non-public portions.
|
||||
|
||||
// regString produces a human-readable version of a RegIndex.
|
||||
func (c *RegAmounts) regString(r RegIndex) string {
|
||||
if int(r) < c.intRegs {
|
||||
return fmt.Sprintf("I%d", int(r))
|
||||
} else if int(r) < c.intRegs+c.floatRegs {
|
||||
return fmt.Sprintf("F%d", int(r)-c.intRegs)
|
||||
}
|
||||
return fmt.Sprintf("<?>%d", r)
|
||||
}
|
||||
|
||||
// toString method renders an ABIParamAssignment in human-readable
|
||||
// form, suitable for debugging or unit testing.
|
||||
func (ri *ABIParamAssignment) toString(config *ABIConfig) string {
|
||||
regs := "R{"
|
||||
offname := "spilloffset" // offset is for spill for register(s)
|
||||
if len(ri.Registers) == 0 {
|
||||
offname = "offset" // offset is for memory arg
|
||||
}
|
||||
for _, r := range ri.Registers {
|
||||
regs += " " + config.regAmounts.regString(r)
|
||||
}
|
||||
return fmt.Sprintf("%s } %s: %d typ: %v", regs, offname, ri.offset, ri.Type)
|
||||
}
|
||||
|
||||
// toString method renders an ABIParamResultInfo in human-readable
|
||||
// form, suitable for debugging or unit testing.
|
||||
func (ri *ABIParamResultInfo) String() string {
|
||||
res := ""
|
||||
for k, p := range ri.inparams {
|
||||
res += fmt.Sprintf("IN %d: %s\n", k, p.toString(ri.config))
|
||||
}
|
||||
for k, r := range ri.outparams {
|
||||
res += fmt.Sprintf("OUT %d: %s\n", k, r.toString(ri.config))
|
||||
}
|
||||
res += fmt.Sprintf("offsetToSpillArea: %d spillAreaSize: %d",
|
||||
ri.offsetToSpillArea, ri.spillAreaSize)
|
||||
return res
|
||||
}
|
||||
|
||||
// assignState holds intermediate state during the register assigning process
|
||||
// for a given function signature.
|
||||
type assignState struct {
|
||||
rTotal RegAmounts // total reg amounts from ABI rules
|
||||
rUsed RegAmounts // regs used by params completely assigned so far
|
||||
pUsed RegAmounts // regs used by the current param (or pieces therein)
|
||||
stackOffset int64 // current stack offset
|
||||
spillOffset int64 // current spill offset
|
||||
}
|
||||
|
||||
// align returns a rounded up to t's alignment
|
||||
func align(a int64, t *types.Type) int64 {
|
||||
return alignTo(a, int(t.Align))
|
||||
}
|
||||
|
||||
// alignTo returns a rounded up to t, where t must be 0 or a power of 2.
|
||||
func alignTo(a int64, t int) int64 {
|
||||
if t == 0 {
|
||||
return a
|
||||
}
|
||||
return types.Rnd(a, int64(t))
|
||||
}
|
||||
|
||||
// stackSlot returns a stack offset for a param or result of the
|
||||
// specified type.
|
||||
func (state *assignState) stackSlot(t *types.Type) int64 {
|
||||
rv := align(state.stackOffset, t)
|
||||
state.stackOffset = rv + t.Width
|
||||
return rv
|
||||
}
|
||||
|
||||
// allocateRegs returns a set of register indices for a parameter or result
|
||||
// that we've just determined to be register-assignable. The number of registers
|
||||
// needed is assumed to be stored in state.pUsed.
|
||||
func (state *assignState) allocateRegs() []RegIndex {
|
||||
regs := []RegIndex{}
|
||||
|
||||
// integer
|
||||
for r := state.rUsed.intRegs; r < state.rUsed.intRegs+state.pUsed.intRegs; r++ {
|
||||
regs = append(regs, RegIndex(r))
|
||||
}
|
||||
state.rUsed.intRegs += state.pUsed.intRegs
|
||||
|
||||
// floating
|
||||
for r := state.rUsed.floatRegs; r < state.rUsed.floatRegs+state.pUsed.floatRegs; r++ {
|
||||
regs = append(regs, RegIndex(r+state.rTotal.intRegs))
|
||||
}
|
||||
state.rUsed.floatRegs += state.pUsed.floatRegs
|
||||
|
||||
return regs
|
||||
}
|
||||
|
||||
// regAllocate creates a register ABIParamAssignment object for a param
|
||||
// or result with the specified type, as a final step (this assumes
|
||||
// that all of the safety/suitability analysis is complete).
|
||||
func (state *assignState) regAllocate(t *types.Type, isReturn bool) ABIParamAssignment {
|
||||
spillLoc := int64(-1)
|
||||
if !isReturn {
|
||||
// Spill for register-resident t must be aligned for storage of a t.
|
||||
spillLoc = align(state.spillOffset, t)
|
||||
state.spillOffset = spillLoc + t.Size()
|
||||
}
|
||||
return ABIParamAssignment{
|
||||
Type: t,
|
||||
Registers: state.allocateRegs(),
|
||||
offset: int32(spillLoc),
|
||||
}
|
||||
}
|
||||
|
||||
// stackAllocate creates a stack memory ABIParamAssignment object for
|
||||
// a param or result with the specified type, as a final step (this
|
||||
// assumes that all of the safety/suitability analysis is complete).
|
||||
func (state *assignState) stackAllocate(t *types.Type) ABIParamAssignment {
|
||||
return ABIParamAssignment{
|
||||
Type: t,
|
||||
offset: int32(state.stackSlot(t)),
|
||||
}
|
||||
}
|
||||
|
||||
// intUsed returns the number of integer registers consumed
|
||||
// at a given point within an assignment stage.
|
||||
func (state *assignState) intUsed() int {
|
||||
return state.rUsed.intRegs + state.pUsed.intRegs
|
||||
}
|
||||
|
||||
// floatUsed returns the number of floating point registers consumed at
|
||||
// a given point within an assignment stage.
|
||||
func (state *assignState) floatUsed() int {
|
||||
return state.rUsed.floatRegs + state.pUsed.floatRegs
|
||||
}
|
||||
|
||||
// regassignIntegral examines a param/result of integral type 't' to
|
||||
// determines whether it can be register-assigned. Returns TRUE if we
|
||||
// can register allocate, FALSE otherwise (and updates state
|
||||
// accordingly).
|
||||
func (state *assignState) regassignIntegral(t *types.Type) bool {
|
||||
regsNeeded := int(types.Rnd(t.Width, int64(types.PtrSize)) / int64(types.PtrSize))
|
||||
if t.IsComplex() {
|
||||
regsNeeded = 2
|
||||
}
|
||||
|
||||
// Floating point and complex.
|
||||
if t.IsFloat() || t.IsComplex() {
|
||||
if regsNeeded+state.floatUsed() > state.rTotal.floatRegs {
|
||||
// not enough regs
|
||||
return false
|
||||
}
|
||||
state.pUsed.floatRegs += regsNeeded
|
||||
return true
|
||||
}
|
||||
|
||||
// Non-floating point
|
||||
if regsNeeded+state.intUsed() > state.rTotal.intRegs {
|
||||
// not enough regs
|
||||
return false
|
||||
}
|
||||
state.pUsed.intRegs += regsNeeded
|
||||
return true
|
||||
}
|
||||
|
||||
// regassignArray processes an array type (or array component within some
|
||||
// other enclosing type) to determine if it can be register assigned.
|
||||
// Returns TRUE if we can register allocate, FALSE otherwise.
|
||||
func (state *assignState) regassignArray(t *types.Type) bool {
|
||||
|
||||
nel := t.NumElem()
|
||||
if nel == 0 {
|
||||
return true
|
||||
}
|
||||
if nel > 1 {
|
||||
// Not an array of length 1: stack assign
|
||||
return false
|
||||
}
|
||||
// Visit element
|
||||
return state.regassign(t.Elem())
|
||||
}
|
||||
|
||||
// regassignStruct processes a struct type (or struct component within
|
||||
// some other enclosing type) to determine if it can be register
|
||||
// assigned. Returns TRUE if we can register allocate, FALSE otherwise.
|
||||
func (state *assignState) regassignStruct(t *types.Type) bool {
|
||||
for _, field := range t.FieldSlice() {
|
||||
if !state.regassign(field.Type) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// synthOnce ensures that we only create the synth* fake types once.
|
||||
var synthOnce sync.Once
|
||||
|
||||
// synthSlice, synthString, and syncIface are synthesized struct types
|
||||
// meant to capture the underlying implementations of string/slice/interface.
|
||||
var synthSlice *types.Type
|
||||
var synthString *types.Type
|
||||
var synthIface *types.Type
|
||||
|
||||
// setup performs setup for the register assignment utilities, manufacturing
|
||||
// a small set of synthesized types that we'll need along the way.
|
||||
func setup() {
|
||||
synthOnce.Do(func() {
|
||||
fname := types.BuiltinPkg.Lookup
|
||||
nxp := src.NoXPos
|
||||
unsp := types.Types[types.TUNSAFEPTR]
|
||||
ui := types.Types[types.TUINTPTR]
|
||||
synthSlice = types.NewStruct(types.NoPkg, []*types.Field{
|
||||
types.NewField(nxp, fname("ptr"), unsp),
|
||||
types.NewField(nxp, fname("len"), ui),
|
||||
types.NewField(nxp, fname("cap"), ui),
|
||||
})
|
||||
synthString = types.NewStruct(types.NoPkg, []*types.Field{
|
||||
types.NewField(nxp, fname("data"), unsp),
|
||||
types.NewField(nxp, fname("len"), ui),
|
||||
})
|
||||
synthIface = types.NewStruct(types.NoPkg, []*types.Field{
|
||||
types.NewField(nxp, fname("f1"), unsp),
|
||||
types.NewField(nxp, fname("f2"), unsp),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// regassign examines a given param type (or component within some
|
||||
// composite) to determine if it can be register assigned. Returns
|
||||
// TRUE if we can register allocate, FALSE otherwise.
|
||||
func (state *assignState) regassign(pt *types.Type) bool {
|
||||
typ := pt.Kind()
|
||||
if pt.IsScalar() || pt.IsPtrShaped() {
|
||||
return state.regassignIntegral(pt)
|
||||
}
|
||||
switch typ {
|
||||
case types.TARRAY:
|
||||
return state.regassignArray(pt)
|
||||
case types.TSTRUCT:
|
||||
return state.regassignStruct(pt)
|
||||
case types.TSLICE:
|
||||
return state.regassignStruct(synthSlice)
|
||||
case types.TSTRING:
|
||||
return state.regassignStruct(synthString)
|
||||
case types.TINTER:
|
||||
return state.regassignStruct(synthIface)
|
||||
default:
|
||||
panic("not expected")
|
||||
}
|
||||
}
|
||||
|
||||
// assignParamOrReturn processes a given receiver, param, or result
|
||||
// of type 'pt' to determine whether it can be register assigned.
|
||||
// The result of the analysis is recorded in the result
|
||||
// ABIParamResultInfo held in 'state'.
|
||||
func (state *assignState) assignParamOrReturn(pt *types.Type, isReturn bool) ABIParamAssignment {
|
||||
state.pUsed = RegAmounts{}
|
||||
if pt.Width == types.BADWIDTH {
|
||||
panic("should never happen")
|
||||
} else if pt.Width == 0 {
|
||||
return state.stackAllocate(pt)
|
||||
} else if state.regassign(pt) {
|
||||
return state.regAllocate(pt, isReturn)
|
||||
} else {
|
||||
return state.stackAllocate(pt)
|
||||
}
|
||||
}
|
||||
@@ -5,13 +5,13 @@
|
||||
package amd64
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/ssagen"
|
||||
"cmd/compile/internal/gc"
|
||||
"cmd/internal/obj/x86"
|
||||
)
|
||||
|
||||
var leaptr = x86.ALEAQ
|
||||
|
||||
func Init(arch *ssagen.ArchInfo) {
|
||||
func Init(arch *gc.Arch) {
|
||||
arch.LinkArch = &x86.Linkamd64
|
||||
arch.REGSP = x86.REGSP
|
||||
arch.MAXWIDTH = 1 << 50
|
||||
|
||||
@@ -5,10 +5,7 @@
|
||||
package amd64
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/base"
|
||||
"cmd/compile/internal/ir"
|
||||
"cmd/compile/internal/objw"
|
||||
"cmd/compile/internal/types"
|
||||
"cmd/compile/internal/gc"
|
||||
"cmd/internal/obj"
|
||||
"cmd/internal/obj/x86"
|
||||
"cmd/internal/objabi"
|
||||
@@ -22,8 +19,8 @@ var isPlan9 = objabi.GOOS == "plan9"
|
||||
const (
|
||||
dzBlocks = 16 // number of MOV/ADD blocks
|
||||
dzBlockLen = 4 // number of clears per block
|
||||
dzBlockSize = 23 // size of instructions in a single block
|
||||
dzMovSize = 5 // size of single MOV instruction w/ offset
|
||||
dzBlockSize = 19 // size of instructions in a single block
|
||||
dzMovSize = 4 // size of single MOV instruction w/ offset
|
||||
dzLeaqSize = 4 // size of single LEAQ instruction
|
||||
dzClearStep = 16 // number of bytes cleared by each MOV instruction
|
||||
|
||||
@@ -54,7 +51,7 @@ func dzDI(b int64) int64 {
|
||||
return -dzClearStep * (dzBlockLen - tailSteps)
|
||||
}
|
||||
|
||||
func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
|
||||
func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
|
||||
const (
|
||||
ax = 1 << iota
|
||||
x0
|
||||
@@ -64,67 +61,67 @@ func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.
|
||||
return p
|
||||
}
|
||||
|
||||
if cnt%int64(types.RegSize) != 0 {
|
||||
if cnt%int64(gc.Widthreg) != 0 {
|
||||
// should only happen with nacl
|
||||
if cnt%int64(types.PtrSize) != 0 {
|
||||
base.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
|
||||
if cnt%int64(gc.Widthptr) != 0 {
|
||||
gc.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
|
||||
}
|
||||
if *state&ax == 0 {
|
||||
p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
|
||||
p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
|
||||
*state |= ax
|
||||
}
|
||||
p = pp.Append(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
|
||||
off += int64(types.PtrSize)
|
||||
cnt -= int64(types.PtrSize)
|
||||
p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
|
||||
off += int64(gc.Widthptr)
|
||||
cnt -= int64(gc.Widthptr)
|
||||
}
|
||||
|
||||
if cnt == 8 {
|
||||
if *state&ax == 0 {
|
||||
p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
|
||||
p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
|
||||
*state |= ax
|
||||
}
|
||||
p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
|
||||
} else if !isPlan9 && cnt <= int64(8*types.RegSize) {
|
||||
p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
|
||||
} else if !isPlan9 && cnt <= int64(8*gc.Widthreg) {
|
||||
if *state&x0 == 0 {
|
||||
p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
|
||||
p = pp.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
|
||||
*state |= x0
|
||||
}
|
||||
|
||||
for i := int64(0); i < cnt/16; i++ {
|
||||
p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16)
|
||||
p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16)
|
||||
}
|
||||
|
||||
if cnt%16 != 0 {
|
||||
p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16))
|
||||
p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16))
|
||||
}
|
||||
} else if !isPlan9 && (cnt <= int64(128*types.RegSize)) {
|
||||
} else if !isPlan9 && (cnt <= int64(128*gc.Widthreg)) {
|
||||
if *state&x0 == 0 {
|
||||
p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
|
||||
p = pp.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
|
||||
*state |= x0
|
||||
}
|
||||
p = pp.Append(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0)
|
||||
p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt))
|
||||
p.To.Sym = ir.Syms.Duffzero
|
||||
p = pp.Appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0)
|
||||
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt))
|
||||
p.To.Sym = gc.Duffzero
|
||||
|
||||
if cnt%16 != 0 {
|
||||
p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8))
|
||||
p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8))
|
||||
}
|
||||
} else {
|
||||
if *state&ax == 0 {
|
||||
p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
|
||||
p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
|
||||
*state |= ax
|
||||
}
|
||||
|
||||
p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(types.RegSize), obj.TYPE_REG, x86.REG_CX, 0)
|
||||
p = pp.Append(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
|
||||
p = pp.Append(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
|
||||
p = pp.Append(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
|
||||
p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, x86.REG_CX, 0)
|
||||
p = pp.Appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
|
||||
p = pp.Appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
|
||||
p = pp.Appendpp(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func ginsnop(pp *objw.Progs) *obj.Prog {
|
||||
func ginsnop(pp *gc.Progs) *obj.Prog {
|
||||
// This is a hardware nop (1-byte 0x90) instruction,
|
||||
// even though we describe it as an explicit XCHGL here.
|
||||
// Particularly, this does not zero the high 32 bits
|
||||
|
||||
@@ -8,18 +8,16 @@ import (
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"cmd/compile/internal/base"
|
||||
"cmd/compile/internal/ir"
|
||||
"cmd/compile/internal/gc"
|
||||
"cmd/compile/internal/logopt"
|
||||
"cmd/compile/internal/ssa"
|
||||
"cmd/compile/internal/ssagen"
|
||||
"cmd/compile/internal/types"
|
||||
"cmd/internal/obj"
|
||||
"cmd/internal/obj/x86"
|
||||
)
|
||||
|
||||
// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
|
||||
func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
|
||||
func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
|
||||
flive := b.FlagsLiveAtEnd
|
||||
for _, c := range b.ControlValues() {
|
||||
flive = c.Type.IsFlags() || flive
|
||||
@@ -112,7 +110,7 @@ func moveByType(t *types.Type) obj.As {
|
||||
// dest := dest(To) op src(From)
|
||||
// and also returns the created obj.Prog so it
|
||||
// may be further adjusted (offset, scale, etc).
|
||||
func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog {
|
||||
func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
|
||||
p := s.Prog(op)
|
||||
p.From.Type = obj.TYPE_REG
|
||||
p.To.Type = obj.TYPE_REG
|
||||
@@ -166,35 +164,7 @@ func duff(size int64) (int64, int64) {
|
||||
return off, adj
|
||||
}
|
||||
|
||||
func getgFromTLS(s *ssagen.State, r int16) {
|
||||
// See the comments in cmd/internal/obj/x86/obj6.go
|
||||
// near CanUse1InsnTLS for a detailed explanation of these instructions.
|
||||
if x86.CanUse1InsnTLS(base.Ctxt) {
|
||||
// MOVQ (TLS), r
|
||||
p := s.Prog(x86.AMOVQ)
|
||||
p.From.Type = obj.TYPE_MEM
|
||||
p.From.Reg = x86.REG_TLS
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = r
|
||||
} else {
|
||||
// MOVQ TLS, r
|
||||
// MOVQ (r)(TLS*1), r
|
||||
p := s.Prog(x86.AMOVQ)
|
||||
p.From.Type = obj.TYPE_REG
|
||||
p.From.Reg = x86.REG_TLS
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = r
|
||||
q := s.Prog(x86.AMOVQ)
|
||||
q.From.Type = obj.TYPE_MEM
|
||||
q.From.Reg = r
|
||||
q.From.Index = x86.REG_TLS
|
||||
q.From.Scale = 1
|
||||
q.To.Type = obj.TYPE_REG
|
||||
q.To.Reg = r
|
||||
}
|
||||
}
|
||||
|
||||
func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
|
||||
switch v.Op {
|
||||
case ssa.OpAMD64VFMADD231SD:
|
||||
p := s.Prog(v.Op.Asm())
|
||||
@@ -660,12 +630,12 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = o
|
||||
}
|
||||
ssagen.AddAux(&p.From, v)
|
||||
gc.AddAux(&p.From, v)
|
||||
case ssa.OpAMD64LEAQ, ssa.OpAMD64LEAL, ssa.OpAMD64LEAW:
|
||||
p := s.Prog(v.Op.Asm())
|
||||
p.From.Type = obj.TYPE_MEM
|
||||
p.From.Reg = v.Args[0].Reg()
|
||||
ssagen.AddAux(&p.From, v)
|
||||
gc.AddAux(&p.From, v)
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = v.Reg()
|
||||
case ssa.OpAMD64CMPQ, ssa.OpAMD64CMPL, ssa.OpAMD64CMPW, ssa.OpAMD64CMPB,
|
||||
@@ -701,7 +671,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p := s.Prog(v.Op.Asm())
|
||||
p.From.Type = obj.TYPE_MEM
|
||||
p.From.Reg = v.Args[0].Reg()
|
||||
ssagen.AddAux(&p.From, v)
|
||||
gc.AddAux(&p.From, v)
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = v.Args[1].Reg()
|
||||
case ssa.OpAMD64CMPQconstload, ssa.OpAMD64CMPLconstload, ssa.OpAMD64CMPWconstload, ssa.OpAMD64CMPBconstload:
|
||||
@@ -709,20 +679,20 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p := s.Prog(v.Op.Asm())
|
||||
p.From.Type = obj.TYPE_MEM
|
||||
p.From.Reg = v.Args[0].Reg()
|
||||
ssagen.AddAux2(&p.From, v, sc.Off())
|
||||
gc.AddAux2(&p.From, v, sc.Off())
|
||||
p.To.Type = obj.TYPE_CONST
|
||||
p.To.Offset = sc.Val()
|
||||
case ssa.OpAMD64CMPQloadidx8, ssa.OpAMD64CMPQloadidx1, ssa.OpAMD64CMPLloadidx4, ssa.OpAMD64CMPLloadidx1, ssa.OpAMD64CMPWloadidx2, ssa.OpAMD64CMPWloadidx1, ssa.OpAMD64CMPBloadidx1:
|
||||
p := s.Prog(v.Op.Asm())
|
||||
memIdx(&p.From, v)
|
||||
ssagen.AddAux(&p.From, v)
|
||||
gc.AddAux(&p.From, v)
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = v.Args[2].Reg()
|
||||
case ssa.OpAMD64CMPQconstloadidx8, ssa.OpAMD64CMPQconstloadidx1, ssa.OpAMD64CMPLconstloadidx4, ssa.OpAMD64CMPLconstloadidx1, ssa.OpAMD64CMPWconstloadidx2, ssa.OpAMD64CMPWconstloadidx1, ssa.OpAMD64CMPBconstloadidx1:
|
||||
sc := v.AuxValAndOff()
|
||||
p := s.Prog(v.Op.Asm())
|
||||
memIdx(&p.From, v)
|
||||
ssagen.AddAux2(&p.From, v, sc.Off())
|
||||
gc.AddAux2(&p.From, v, sc.Off())
|
||||
p.To.Type = obj.TYPE_CONST
|
||||
p.To.Offset = sc.Val()
|
||||
case ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst:
|
||||
@@ -762,14 +732,14 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p := s.Prog(v.Op.Asm())
|
||||
p.From.Type = obj.TYPE_MEM
|
||||
p.From.Reg = v.Args[0].Reg()
|
||||
ssagen.AddAux(&p.From, v)
|
||||
gc.AddAux(&p.From, v)
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = v.Reg()
|
||||
case ssa.OpAMD64MOVBloadidx1, ssa.OpAMD64MOVWloadidx1, ssa.OpAMD64MOVLloadidx1, ssa.OpAMD64MOVQloadidx1, ssa.OpAMD64MOVSSloadidx1, ssa.OpAMD64MOVSDloadidx1,
|
||||
ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8, ssa.OpAMD64MOVLloadidx8, ssa.OpAMD64MOVLloadidx4, ssa.OpAMD64MOVSSloadidx4, ssa.OpAMD64MOVWloadidx2:
|
||||
p := s.Prog(v.Op.Asm())
|
||||
memIdx(&p.From, v)
|
||||
ssagen.AddAux(&p.From, v)
|
||||
gc.AddAux(&p.From, v)
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = v.Reg()
|
||||
case ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVSSstore, ssa.OpAMD64MOVSDstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore, ssa.OpAMD64MOVOstore,
|
||||
@@ -781,7 +751,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p.From.Reg = v.Args[1].Reg()
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Reg = v.Args[0].Reg()
|
||||
ssagen.AddAux(&p.To, v)
|
||||
gc.AddAux(&p.To, v)
|
||||
case ssa.OpAMD64MOVBstoreidx1, ssa.OpAMD64MOVWstoreidx1, ssa.OpAMD64MOVLstoreidx1, ssa.OpAMD64MOVQstoreidx1, ssa.OpAMD64MOVSSstoreidx1, ssa.OpAMD64MOVSDstoreidx1,
|
||||
ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8, ssa.OpAMD64MOVLstoreidx8, ssa.OpAMD64MOVSSstoreidx4, ssa.OpAMD64MOVLstoreidx4, ssa.OpAMD64MOVWstoreidx2,
|
||||
ssa.OpAMD64ADDLmodifyidx1, ssa.OpAMD64ADDLmodifyidx4, ssa.OpAMD64ADDLmodifyidx8, ssa.OpAMD64ADDQmodifyidx1, ssa.OpAMD64ADDQmodifyidx8,
|
||||
@@ -793,7 +763,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p.From.Type = obj.TYPE_REG
|
||||
p.From.Reg = v.Args[2].Reg()
|
||||
memIdx(&p.To, v)
|
||||
ssagen.AddAux(&p.To, v)
|
||||
gc.AddAux(&p.To, v)
|
||||
case ssa.OpAMD64ADDQconstmodify, ssa.OpAMD64ADDLconstmodify:
|
||||
sc := v.AuxValAndOff()
|
||||
off := sc.Off()
|
||||
@@ -816,7 +786,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p := s.Prog(asm)
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Reg = v.Args[0].Reg()
|
||||
ssagen.AddAux2(&p.To, v, off)
|
||||
gc.AddAux2(&p.To, v, off)
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
@@ -831,7 +801,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p.From.Offset = val
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Reg = v.Args[0].Reg()
|
||||
ssagen.AddAux2(&p.To, v, off)
|
||||
gc.AddAux2(&p.To, v, off)
|
||||
|
||||
case ssa.OpAMD64MOVQstoreconst, ssa.OpAMD64MOVLstoreconst, ssa.OpAMD64MOVWstoreconst, ssa.OpAMD64MOVBstoreconst:
|
||||
p := s.Prog(v.Op.Asm())
|
||||
@@ -840,21 +810,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p.From.Offset = sc.Val()
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Reg = v.Args[0].Reg()
|
||||
ssagen.AddAux2(&p.To, v, sc.Off())
|
||||
case ssa.OpAMD64MOVOstorezero:
|
||||
if s.ABI != obj.ABIInternal {
|
||||
v.Fatalf("MOVOstorezero can be only used in ABIInternal functions")
|
||||
}
|
||||
if !base.Flag.ABIWrap {
|
||||
// zeroing X15 manually if wrappers are not used
|
||||
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
|
||||
}
|
||||
p := s.Prog(v.Op.Asm())
|
||||
p.From.Type = obj.TYPE_REG
|
||||
p.From.Reg = x86.REG_X15
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Reg = v.Args[0].Reg()
|
||||
ssagen.AddAux(&p.To, v)
|
||||
gc.AddAux2(&p.To, v, sc.Off())
|
||||
case ssa.OpAMD64MOVQstoreconstidx1, ssa.OpAMD64MOVQstoreconstidx8, ssa.OpAMD64MOVLstoreconstidx1, ssa.OpAMD64MOVLstoreconstidx4, ssa.OpAMD64MOVWstoreconstidx1, ssa.OpAMD64MOVWstoreconstidx2, ssa.OpAMD64MOVBstoreconstidx1,
|
||||
ssa.OpAMD64ADDLconstmodifyidx1, ssa.OpAMD64ADDLconstmodifyidx4, ssa.OpAMD64ADDLconstmodifyidx8, ssa.OpAMD64ADDQconstmodifyidx1, ssa.OpAMD64ADDQconstmodifyidx8,
|
||||
ssa.OpAMD64ANDLconstmodifyidx1, ssa.OpAMD64ANDLconstmodifyidx4, ssa.OpAMD64ANDLconstmodifyidx8, ssa.OpAMD64ANDQconstmodifyidx1, ssa.OpAMD64ANDQconstmodifyidx8,
|
||||
@@ -879,7 +835,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p.From.Type = obj.TYPE_NONE
|
||||
}
|
||||
memIdx(&p.To, v)
|
||||
ssagen.AddAux2(&p.To, v, sc.Off())
|
||||
gc.AddAux2(&p.To, v, sc.Off())
|
||||
case ssa.OpAMD64MOVLQSX, ssa.OpAMD64MOVWQSX, ssa.OpAMD64MOVBQSX, ssa.OpAMD64MOVLQZX, ssa.OpAMD64MOVWQZX, ssa.OpAMD64MOVBQZX,
|
||||
ssa.OpAMD64CVTTSS2SL, ssa.OpAMD64CVTTSD2SL, ssa.OpAMD64CVTTSS2SQ, ssa.OpAMD64CVTTSD2SQ,
|
||||
ssa.OpAMD64CVTSS2SD, ssa.OpAMD64CVTSD2SS:
|
||||
@@ -909,7 +865,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p := s.Prog(v.Op.Asm())
|
||||
p.From.Type = obj.TYPE_MEM
|
||||
p.From.Reg = v.Args[1].Reg()
|
||||
ssagen.AddAux(&p.From, v)
|
||||
gc.AddAux(&p.From, v)
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = v.Reg()
|
||||
if v.Reg() != v.Args[0].Reg() {
|
||||
@@ -935,20 +891,13 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p.From.Reg = r
|
||||
p.From.Index = i
|
||||
|
||||
ssagen.AddAux(&p.From, v)
|
||||
gc.AddAux(&p.From, v)
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = v.Reg()
|
||||
if v.Reg() != v.Args[0].Reg() {
|
||||
v.Fatalf("input[0] and output not in same register %s", v.LongString())
|
||||
}
|
||||
case ssa.OpAMD64DUFFZERO:
|
||||
if s.ABI != obj.ABIInternal {
|
||||
v.Fatalf("MOVOconst can be only used in ABIInternal functions")
|
||||
}
|
||||
if !base.Flag.ABIWrap {
|
||||
// zeroing X15 manually if wrappers are not used
|
||||
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
|
||||
}
|
||||
off := duffStart(v.AuxInt)
|
||||
adj := duffAdj(v.AuxInt)
|
||||
var p *obj.Prog
|
||||
@@ -962,12 +911,18 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
}
|
||||
p = s.Prog(obj.ADUFFZERO)
|
||||
p.To.Type = obj.TYPE_ADDR
|
||||
p.To.Sym = ir.Syms.Duffzero
|
||||
p.To.Sym = gc.Duffzero
|
||||
p.To.Offset = off
|
||||
case ssa.OpAMD64MOVOconst:
|
||||
if v.AuxInt != 0 {
|
||||
v.Fatalf("MOVOconst can only do constant=0")
|
||||
}
|
||||
r := v.Reg()
|
||||
opregreg(s, x86.AXORPS, r, r)
|
||||
case ssa.OpAMD64DUFFCOPY:
|
||||
p := s.Prog(obj.ADUFFCOPY)
|
||||
p.To.Type = obj.TYPE_ADDR
|
||||
p.To.Sym = ir.Syms.Duffcopy
|
||||
p.To.Sym = gc.Duffcopy
|
||||
if v.AuxInt%16 != 0 {
|
||||
v.Fatalf("bad DUFFCOPY AuxInt %v", v.AuxInt)
|
||||
}
|
||||
@@ -994,7 +949,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
return
|
||||
}
|
||||
p := s.Prog(loadByType(v.Type))
|
||||
ssagen.AddrAuto(&p.From, v.Args[0])
|
||||
gc.AddrAuto(&p.From, v.Args[0])
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = v.Reg()
|
||||
|
||||
@@ -1006,37 +961,44 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p := s.Prog(storeByType(v.Type))
|
||||
p.From.Type = obj.TYPE_REG
|
||||
p.From.Reg = v.Args[0].Reg()
|
||||
ssagen.AddrAuto(&p.To, v)
|
||||
gc.AddrAuto(&p.To, v)
|
||||
case ssa.OpAMD64LoweredHasCPUFeature:
|
||||
p := s.Prog(x86.AMOVBQZX)
|
||||
p.From.Type = obj.TYPE_MEM
|
||||
ssagen.AddAux(&p.From, v)
|
||||
gc.AddAux(&p.From, v)
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = v.Reg()
|
||||
case ssa.OpAMD64LoweredGetClosurePtr:
|
||||
// Closure pointer is DX.
|
||||
ssagen.CheckLoweredGetClosurePtr(v)
|
||||
gc.CheckLoweredGetClosurePtr(v)
|
||||
case ssa.OpAMD64LoweredGetG:
|
||||
if base.Flag.ABIWrap {
|
||||
v.Fatalf("LoweredGetG should not appear in new ABI")
|
||||
}
|
||||
r := v.Reg()
|
||||
getgFromTLS(s, r)
|
||||
case ssa.OpAMD64CALLstatic:
|
||||
if s.ABI == obj.ABI0 && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABIInternal {
|
||||
// zeroing X15 when entering ABIInternal from ABI0
|
||||
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
|
||||
// set G register from TLS
|
||||
getgFromTLS(s, x86.REG_R14)
|
||||
// See the comments in cmd/internal/obj/x86/obj6.go
|
||||
// near CanUse1InsnTLS for a detailed explanation of these instructions.
|
||||
if x86.CanUse1InsnTLS(gc.Ctxt) {
|
||||
// MOVQ (TLS), r
|
||||
p := s.Prog(x86.AMOVQ)
|
||||
p.From.Type = obj.TYPE_MEM
|
||||
p.From.Reg = x86.REG_TLS
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = r
|
||||
} else {
|
||||
// MOVQ TLS, r
|
||||
// MOVQ (r)(TLS*1), r
|
||||
p := s.Prog(x86.AMOVQ)
|
||||
p.From.Type = obj.TYPE_REG
|
||||
p.From.Reg = x86.REG_TLS
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = r
|
||||
q := s.Prog(x86.AMOVQ)
|
||||
q.From.Type = obj.TYPE_MEM
|
||||
q.From.Reg = r
|
||||
q.From.Index = x86.REG_TLS
|
||||
q.From.Scale = 1
|
||||
q.To.Type = obj.TYPE_REG
|
||||
q.To.Reg = r
|
||||
}
|
||||
s.Call(v)
|
||||
if s.ABI == obj.ABIInternal && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABI0 {
|
||||
// zeroing X15 when entering ABIInternal from ABI0
|
||||
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
|
||||
// set G register from TLS
|
||||
getgFromTLS(s, x86.REG_R14)
|
||||
}
|
||||
case ssa.OpAMD64CALLclosure, ssa.OpAMD64CALLinter:
|
||||
case ssa.OpAMD64CALLstatic, ssa.OpAMD64CALLclosure, ssa.OpAMD64CALLinter:
|
||||
s.Call(v)
|
||||
|
||||
case ssa.OpAMD64LoweredGetCallerPC:
|
||||
@@ -1050,12 +1012,12 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
case ssa.OpAMD64LoweredGetCallerSP:
|
||||
// caller's SP is the address of the first arg
|
||||
mov := x86.AMOVQ
|
||||
if types.PtrSize == 4 {
|
||||
if gc.Widthptr == 4 {
|
||||
mov = x86.AMOVL
|
||||
}
|
||||
p := s.Prog(mov)
|
||||
p.From.Type = obj.TYPE_ADDR
|
||||
p.From.Offset = -base.Ctxt.FixedFrameSize() // 0 on amd64, just to be consistent with other architectures
|
||||
p.From.Offset = -gc.Ctxt.FixedFrameSize() // 0 on amd64, just to be consistent with other architectures
|
||||
p.From.Name = obj.NAME_PARAM
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = v.Reg()
|
||||
@@ -1065,14 +1027,14 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Name = obj.NAME_EXTERN
|
||||
// arg0 is in DI. Set sym to match where regalloc put arg1.
|
||||
p.To.Sym = ssagen.GCWriteBarrierReg[v.Args[1].Reg()]
|
||||
p.To.Sym = gc.GCWriteBarrierReg[v.Args[1].Reg()]
|
||||
|
||||
case ssa.OpAMD64LoweredPanicBoundsA, ssa.OpAMD64LoweredPanicBoundsB, ssa.OpAMD64LoweredPanicBoundsC:
|
||||
p := s.Prog(obj.ACALL)
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Name = obj.NAME_EXTERN
|
||||
p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
|
||||
s.UseArgs(int64(2 * types.PtrSize)) // space used in callee args area by assembly stubs
|
||||
p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
|
||||
s.UseArgs(int64(2 * gc.Widthptr)) // space used in callee args area by assembly stubs
|
||||
|
||||
case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL,
|
||||
ssa.OpAMD64BSWAPQ, ssa.OpAMD64BSWAPL,
|
||||
@@ -1153,7 +1115,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p := s.Prog(v.Op.Asm())
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Reg = v.Args[0].Reg()
|
||||
ssagen.AddAux(&p.To, v)
|
||||
gc.AddAux(&p.To, v)
|
||||
|
||||
case ssa.OpAMD64SETNEF:
|
||||
p := s.Prog(v.Op.Asm())
|
||||
@@ -1202,14 +1164,14 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
if logopt.Enabled() {
|
||||
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
|
||||
}
|
||||
if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
|
||||
base.WarnfAt(v.Pos, "generated nil check")
|
||||
if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
|
||||
gc.Warnl(v.Pos, "generated nil check")
|
||||
}
|
||||
case ssa.OpAMD64MOVBatomicload, ssa.OpAMD64MOVLatomicload, ssa.OpAMD64MOVQatomicload:
|
||||
p := s.Prog(v.Op.Asm())
|
||||
p.From.Type = obj.TYPE_MEM
|
||||
p.From.Reg = v.Args[0].Reg()
|
||||
ssagen.AddAux(&p.From, v)
|
||||
gc.AddAux(&p.From, v)
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = v.Reg0()
|
||||
case ssa.OpAMD64XCHGB, ssa.OpAMD64XCHGL, ssa.OpAMD64XCHGQ:
|
||||
@@ -1222,7 +1184,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p.From.Reg = r
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Reg = v.Args[1].Reg()
|
||||
ssagen.AddAux(&p.To, v)
|
||||
gc.AddAux(&p.To, v)
|
||||
case ssa.OpAMD64XADDLlock, ssa.OpAMD64XADDQlock:
|
||||
r := v.Reg0()
|
||||
if r != v.Args[0].Reg() {
|
||||
@@ -1234,7 +1196,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p.From.Reg = r
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Reg = v.Args[1].Reg()
|
||||
ssagen.AddAux(&p.To, v)
|
||||
gc.AddAux(&p.To, v)
|
||||
case ssa.OpAMD64CMPXCHGLlock, ssa.OpAMD64CMPXCHGQlock:
|
||||
if v.Args[1].Reg() != x86.REG_AX {
|
||||
v.Fatalf("input[1] not in AX %s", v.LongString())
|
||||
@@ -1245,7 +1207,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p.From.Reg = v.Args[2].Reg()
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Reg = v.Args[0].Reg()
|
||||
ssagen.AddAux(&p.To, v)
|
||||
gc.AddAux(&p.To, v)
|
||||
p = s.Prog(x86.ASETEQ)
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = v.Reg0()
|
||||
@@ -1256,20 +1218,20 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p.From.Reg = v.Args[1].Reg()
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Reg = v.Args[0].Reg()
|
||||
ssagen.AddAux(&p.To, v)
|
||||
gc.AddAux(&p.To, v)
|
||||
case ssa.OpClobber:
|
||||
p := s.Prog(x86.AMOVL)
|
||||
p.From.Type = obj.TYPE_CONST
|
||||
p.From.Offset = 0xdeaddead
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Reg = x86.REG_SP
|
||||
ssagen.AddAux(&p.To, v)
|
||||
gc.AddAux(&p.To, v)
|
||||
p = s.Prog(x86.AMOVL)
|
||||
p.From.Type = obj.TYPE_CONST
|
||||
p.From.Offset = 0xdeaddead
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Reg = x86.REG_SP
|
||||
ssagen.AddAux(&p.To, v)
|
||||
gc.AddAux(&p.To, v)
|
||||
p.To.Offset += 4
|
||||
default:
|
||||
v.Fatalf("genValue not implemented: %s", v.LongString())
|
||||
@@ -1295,22 +1257,22 @@ var blockJump = [...]struct {
|
||||
ssa.BlockAMD64NAN: {x86.AJPS, x86.AJPC},
|
||||
}
|
||||
|
||||
var eqfJumps = [2][2]ssagen.IndexJump{
|
||||
var eqfJumps = [2][2]gc.IndexJump{
|
||||
{{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPS, Index: 1}}, // next == b.Succs[0]
|
||||
{{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPC, Index: 0}}, // next == b.Succs[1]
|
||||
}
|
||||
var nefJumps = [2][2]ssagen.IndexJump{
|
||||
var nefJumps = [2][2]gc.IndexJump{
|
||||
{{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPC, Index: 1}}, // next == b.Succs[0]
|
||||
{{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPS, Index: 0}}, // next == b.Succs[1]
|
||||
}
|
||||
|
||||
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
|
||||
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
|
||||
switch b.Kind {
|
||||
case ssa.BlockPlain:
|
||||
if b.Succs[0].Block() != next {
|
||||
p := s.Prog(obj.AJMP)
|
||||
p.To.Type = obj.TYPE_BRANCH
|
||||
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
|
||||
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
|
||||
}
|
||||
case ssa.BlockDefer:
|
||||
// defer returns in rax:
|
||||
@@ -1323,22 +1285,16 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
|
||||
p.To.Reg = x86.REG_AX
|
||||
p = s.Prog(x86.AJNE)
|
||||
p.To.Type = obj.TYPE_BRANCH
|
||||
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
|
||||
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
|
||||
if b.Succs[0].Block() != next {
|
||||
p := s.Prog(obj.AJMP)
|
||||
p.To.Type = obj.TYPE_BRANCH
|
||||
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
|
||||
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
|
||||
}
|
||||
case ssa.BlockExit:
|
||||
case ssa.BlockRet:
|
||||
s.Prog(obj.ARET)
|
||||
case ssa.BlockRetJmp:
|
||||
if s.ABI == obj.ABI0 && b.Aux.(*obj.LSym).ABI() == obj.ABIInternal {
|
||||
// zeroing X15 when entering ABIInternal from ABI0
|
||||
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
|
||||
// set G register from TLS
|
||||
getgFromTLS(s, x86.REG_R14)
|
||||
}
|
||||
p := s.Prog(obj.ARET)
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Name = obj.NAME_EXTERN
|
||||
|
||||
@@ -5,13 +5,13 @@
|
||||
package arm
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/gc"
|
||||
"cmd/compile/internal/ssa"
|
||||
"cmd/compile/internal/ssagen"
|
||||
"cmd/internal/obj/arm"
|
||||
"cmd/internal/objabi"
|
||||
)
|
||||
|
||||
func Init(arch *ssagen.ArchInfo) {
|
||||
func Init(arch *gc.Arch) {
|
||||
arch.LinkArch = &arm.Linkarm
|
||||
arch.REGSP = arm.REGSP
|
||||
arch.MAXWIDTH = (1 << 32) - 1
|
||||
@@ -20,7 +20,7 @@ func Init(arch *ssagen.ArchInfo) {
|
||||
arch.Ginsnop = ginsnop
|
||||
arch.Ginsnopdefer = ginsnop
|
||||
|
||||
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
|
||||
arch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
|
||||
arch.SSAGenValue = ssaGenValue
|
||||
arch.SSAGenBlock = ssaGenBlock
|
||||
}
|
||||
|
||||
@@ -5,51 +5,49 @@
|
||||
package arm
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/ir"
|
||||
"cmd/compile/internal/objw"
|
||||
"cmd/compile/internal/types"
|
||||
"cmd/compile/internal/gc"
|
||||
"cmd/internal/obj"
|
||||
"cmd/internal/obj/arm"
|
||||
)
|
||||
|
||||
func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, r0 *uint32) *obj.Prog {
|
||||
func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, r0 *uint32) *obj.Prog {
|
||||
if cnt == 0 {
|
||||
return p
|
||||
}
|
||||
if *r0 == 0 {
|
||||
p = pp.Append(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0)
|
||||
p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0)
|
||||
*r0 = 1
|
||||
}
|
||||
|
||||
if cnt < int64(4*types.PtrSize) {
|
||||
for i := int64(0); i < cnt; i += int64(types.PtrSize) {
|
||||
p = pp.Append(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, 4+off+i)
|
||||
if cnt < int64(4*gc.Widthptr) {
|
||||
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
|
||||
p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, 4+off+i)
|
||||
}
|
||||
} else if cnt <= int64(128*types.PtrSize) {
|
||||
p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
|
||||
} else if cnt <= int64(128*gc.Widthptr) {
|
||||
p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
|
||||
p.Reg = arm.REGSP
|
||||
p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
|
||||
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
|
||||
p.To.Name = obj.NAME_EXTERN
|
||||
p.To.Sym = ir.Syms.Duffzero
|
||||
p.To.Offset = 4 * (128 - cnt/int64(types.PtrSize))
|
||||
p.To.Sym = gc.Duffzero
|
||||
p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
|
||||
} else {
|
||||
p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
|
||||
p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
|
||||
p.Reg = arm.REGSP
|
||||
p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, arm.REG_R2, 0)
|
||||
p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, arm.REG_R2, 0)
|
||||
p.Reg = arm.REG_R1
|
||||
p = pp.Append(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
|
||||
p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
|
||||
p1 := p
|
||||
p.Scond |= arm.C_PBIT
|
||||
p = pp.Append(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
|
||||
p = pp.Appendpp(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
|
||||
p.Reg = arm.REG_R2
|
||||
p = pp.Append(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
|
||||
p.To.SetTarget(p1)
|
||||
p = pp.Appendpp(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
|
||||
gc.Patch(p, p1)
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func ginsnop(pp *objw.Progs) *obj.Prog {
|
||||
func ginsnop(pp *gc.Progs) *obj.Prog {
|
||||
p := pp.Prog(arm.AAND)
|
||||
p.From.Type = obj.TYPE_REG
|
||||
p.From.Reg = arm.REG_R0
|
||||
|
||||
@@ -9,11 +9,9 @@ import (
|
||||
"math"
|
||||
"math/bits"
|
||||
|
||||
"cmd/compile/internal/base"
|
||||
"cmd/compile/internal/ir"
|
||||
"cmd/compile/internal/gc"
|
||||
"cmd/compile/internal/logopt"
|
||||
"cmd/compile/internal/ssa"
|
||||
"cmd/compile/internal/ssagen"
|
||||
"cmd/compile/internal/types"
|
||||
"cmd/internal/obj"
|
||||
"cmd/internal/obj/arm"
|
||||
@@ -93,7 +91,7 @@ func makeshift(reg int16, typ int64, s int64) shift {
|
||||
}
|
||||
|
||||
// genshift generates a Prog for r = r0 op (r1 shifted by n)
|
||||
func genshift(s *ssagen.State, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
|
||||
func genshift(s *gc.SSAGenState, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
|
||||
p := s.Prog(as)
|
||||
p.From.Type = obj.TYPE_SHIFT
|
||||
p.From.Offset = int64(makeshift(r1, typ, n))
|
||||
@@ -111,7 +109,7 @@ func makeregshift(r1 int16, typ int64, r2 int16) shift {
|
||||
}
|
||||
|
||||
// genregshift generates a Prog for r = r0 op (r1 shifted by r2)
|
||||
func genregshift(s *ssagen.State, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
|
||||
func genregshift(s *gc.SSAGenState, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
|
||||
p := s.Prog(as)
|
||||
p.From.Type = obj.TYPE_SHIFT
|
||||
p.From.Offset = int64(makeregshift(r1, typ, r2))
|
||||
@@ -145,7 +143,7 @@ func getBFC(v uint32) (uint32, uint32) {
|
||||
return 0xffffffff, 0
|
||||
}
|
||||
|
||||
func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
|
||||
switch v.Op {
|
||||
case ssa.OpCopy, ssa.OpARMMOVWreg:
|
||||
if v.Type.IsMemory() {
|
||||
@@ -183,7 +181,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
return
|
||||
}
|
||||
p := s.Prog(loadByType(v.Type))
|
||||
ssagen.AddrAuto(&p.From, v.Args[0])
|
||||
gc.AddrAuto(&p.From, v.Args[0])
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = v.Reg()
|
||||
case ssa.OpStoreReg:
|
||||
@@ -194,7 +192,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p := s.Prog(storeByType(v.Type))
|
||||
p.From.Type = obj.TYPE_REG
|
||||
p.From.Reg = v.Args[0].Reg()
|
||||
ssagen.AddrAuto(&p.To, v)
|
||||
gc.AddrAuto(&p.To, v)
|
||||
case ssa.OpARMADD,
|
||||
ssa.OpARMADC,
|
||||
ssa.OpARMSUB,
|
||||
@@ -545,10 +543,10 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
v.Fatalf("aux is of unknown type %T", v.Aux)
|
||||
case *obj.LSym:
|
||||
wantreg = "SB"
|
||||
ssagen.AddAux(&p.From, v)
|
||||
case *ir.Name:
|
||||
gc.AddAux(&p.From, v)
|
||||
case *gc.Node:
|
||||
wantreg = "SP"
|
||||
ssagen.AddAux(&p.From, v)
|
||||
gc.AddAux(&p.From, v)
|
||||
case nil:
|
||||
// No sym, just MOVW $off(SP), R
|
||||
wantreg = "SP"
|
||||
@@ -568,7 +566,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p := s.Prog(v.Op.Asm())
|
||||
p.From.Type = obj.TYPE_MEM
|
||||
p.From.Reg = v.Args[0].Reg()
|
||||
ssagen.AddAux(&p.From, v)
|
||||
gc.AddAux(&p.From, v)
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = v.Reg()
|
||||
case ssa.OpARMMOVBstore,
|
||||
@@ -581,7 +579,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p.From.Reg = v.Args[1].Reg()
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Reg = v.Args[0].Reg()
|
||||
ssagen.AddAux(&p.To, v)
|
||||
gc.AddAux(&p.To, v)
|
||||
case ssa.OpARMMOVWloadidx, ssa.OpARMMOVBUloadidx, ssa.OpARMMOVBloadidx, ssa.OpARMMOVHUloadidx, ssa.OpARMMOVHloadidx:
|
||||
// this is just shift 0 bits
|
||||
fallthrough
|
||||
@@ -702,7 +700,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p := s.Prog(obj.ACALL)
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Name = obj.NAME_EXTERN
|
||||
p.To.Sym = ir.Syms.Udiv
|
||||
p.To.Sym = gc.Udiv
|
||||
case ssa.OpARMLoweredWB:
|
||||
p := s.Prog(obj.ACALL)
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
@@ -712,39 +710,39 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p := s.Prog(obj.ACALL)
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Name = obj.NAME_EXTERN
|
||||
p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
|
||||
p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
|
||||
s.UseArgs(8) // space used in callee args area by assembly stubs
|
||||
case ssa.OpARMLoweredPanicExtendA, ssa.OpARMLoweredPanicExtendB, ssa.OpARMLoweredPanicExtendC:
|
||||
p := s.Prog(obj.ACALL)
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Name = obj.NAME_EXTERN
|
||||
p.To.Sym = ssagen.ExtendCheckFunc[v.AuxInt]
|
||||
p.To.Sym = gc.ExtendCheckFunc[v.AuxInt]
|
||||
s.UseArgs(12) // space used in callee args area by assembly stubs
|
||||
case ssa.OpARMDUFFZERO:
|
||||
p := s.Prog(obj.ADUFFZERO)
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Name = obj.NAME_EXTERN
|
||||
p.To.Sym = ir.Syms.Duffzero
|
||||
p.To.Sym = gc.Duffzero
|
||||
p.To.Offset = v.AuxInt
|
||||
case ssa.OpARMDUFFCOPY:
|
||||
p := s.Prog(obj.ADUFFCOPY)
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Name = obj.NAME_EXTERN
|
||||
p.To.Sym = ir.Syms.Duffcopy
|
||||
p.To.Sym = gc.Duffcopy
|
||||
p.To.Offset = v.AuxInt
|
||||
case ssa.OpARMLoweredNilCheck:
|
||||
// Issue a load which will fault if arg is nil.
|
||||
p := s.Prog(arm.AMOVB)
|
||||
p.From.Type = obj.TYPE_MEM
|
||||
p.From.Reg = v.Args[0].Reg()
|
||||
ssagen.AddAux(&p.From, v)
|
||||
gc.AddAux(&p.From, v)
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = arm.REGTMP
|
||||
if logopt.Enabled() {
|
||||
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
|
||||
}
|
||||
if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
|
||||
base.WarnfAt(v.Pos, "generated nil check")
|
||||
if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
|
||||
gc.Warnl(v.Pos, "generated nil check")
|
||||
}
|
||||
case ssa.OpARMLoweredZero:
|
||||
// MOVW.P Rarg2, 4(R1)
|
||||
@@ -779,7 +777,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p2.Reg = arm.REG_R1
|
||||
p3 := s.Prog(arm.ABLE)
|
||||
p3.To.Type = obj.TYPE_BRANCH
|
||||
p3.To.SetTarget(p)
|
||||
gc.Patch(p3, p)
|
||||
case ssa.OpARMLoweredMove:
|
||||
// MOVW.P 4(R1), Rtmp
|
||||
// MOVW.P Rtmp, 4(R2)
|
||||
@@ -820,7 +818,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p3.Reg = arm.REG_R1
|
||||
p4 := s.Prog(arm.ABLE)
|
||||
p4.To.Type = obj.TYPE_BRANCH
|
||||
p4.To.SetTarget(p)
|
||||
gc.Patch(p4, p)
|
||||
case ssa.OpARMEqual,
|
||||
ssa.OpARMNotEqual,
|
||||
ssa.OpARMLessThan,
|
||||
@@ -846,12 +844,12 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p.To.Reg = v.Reg()
|
||||
case ssa.OpARMLoweredGetClosurePtr:
|
||||
// Closure pointer is R7 (arm.REGCTXT).
|
||||
ssagen.CheckLoweredGetClosurePtr(v)
|
||||
gc.CheckLoweredGetClosurePtr(v)
|
||||
case ssa.OpARMLoweredGetCallerSP:
|
||||
// caller's SP is FixedFrameSize below the address of the first arg
|
||||
p := s.Prog(arm.AMOVW)
|
||||
p.From.Type = obj.TYPE_ADDR
|
||||
p.From.Offset = -base.Ctxt.FixedFrameSize()
|
||||
p.From.Offset = -gc.Ctxt.FixedFrameSize()
|
||||
p.From.Name = obj.NAME_PARAM
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = v.Reg()
|
||||
@@ -901,24 +899,24 @@ var blockJump = map[ssa.BlockKind]struct {
|
||||
}
|
||||
|
||||
// To model a 'LEnoov' ('<=' without overflow checking) branching
|
||||
var leJumps = [2][2]ssagen.IndexJump{
|
||||
var leJumps = [2][2]gc.IndexJump{
|
||||
{{Jump: arm.ABEQ, Index: 0}, {Jump: arm.ABPL, Index: 1}}, // next == b.Succs[0]
|
||||
{{Jump: arm.ABMI, Index: 0}, {Jump: arm.ABEQ, Index: 0}}, // next == b.Succs[1]
|
||||
}
|
||||
|
||||
// To model a 'GTnoov' ('>' without overflow checking) branching
|
||||
var gtJumps = [2][2]ssagen.IndexJump{
|
||||
var gtJumps = [2][2]gc.IndexJump{
|
||||
{{Jump: arm.ABMI, Index: 1}, {Jump: arm.ABEQ, Index: 1}}, // next == b.Succs[0]
|
||||
{{Jump: arm.ABEQ, Index: 1}, {Jump: arm.ABPL, Index: 0}}, // next == b.Succs[1]
|
||||
}
|
||||
|
||||
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
|
||||
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
|
||||
switch b.Kind {
|
||||
case ssa.BlockPlain:
|
||||
if b.Succs[0].Block() != next {
|
||||
p := s.Prog(obj.AJMP)
|
||||
p.To.Type = obj.TYPE_BRANCH
|
||||
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
|
||||
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
|
||||
}
|
||||
|
||||
case ssa.BlockDefer:
|
||||
@@ -931,11 +929,11 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
|
||||
p.Reg = arm.REG_R0
|
||||
p = s.Prog(arm.ABNE)
|
||||
p.To.Type = obj.TYPE_BRANCH
|
||||
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
|
||||
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
|
||||
if b.Succs[0].Block() != next {
|
||||
p := s.Prog(obj.AJMP)
|
||||
p.To.Type = obj.TYPE_BRANCH
|
||||
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
|
||||
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
|
||||
}
|
||||
|
||||
case ssa.BlockExit:
|
||||
|
||||
@@ -5,12 +5,12 @@
|
||||
package arm64
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/gc"
|
||||
"cmd/compile/internal/ssa"
|
||||
"cmd/compile/internal/ssagen"
|
||||
"cmd/internal/obj/arm64"
|
||||
)
|
||||
|
||||
func Init(arch *ssagen.ArchInfo) {
|
||||
func Init(arch *gc.Arch) {
|
||||
arch.LinkArch = &arm64.Linkarm64
|
||||
arch.REGSP = arm64.REGSP
|
||||
arch.MAXWIDTH = 1 << 50
|
||||
@@ -20,7 +20,7 @@ func Init(arch *ssagen.ArchInfo) {
|
||||
arch.Ginsnop = ginsnop
|
||||
arch.Ginsnopdefer = ginsnop
|
||||
|
||||
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
|
||||
arch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
|
||||
arch.SSAGenValue = ssaGenValue
|
||||
arch.SSAGenBlock = ssaGenBlock
|
||||
}
|
||||
|
||||
@@ -5,9 +5,7 @@
|
||||
package arm64
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/ir"
|
||||
"cmd/compile/internal/objw"
|
||||
"cmd/compile/internal/types"
|
||||
"cmd/compile/internal/gc"
|
||||
"cmd/internal/obj"
|
||||
"cmd/internal/obj/arm64"
|
||||
"cmd/internal/objabi"
|
||||
@@ -24,52 +22,52 @@ func padframe(frame int64) int64 {
|
||||
return frame
|
||||
}
|
||||
|
||||
func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
|
||||
func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
|
||||
if cnt == 0 {
|
||||
return p
|
||||
}
|
||||
if cnt < int64(4*types.PtrSize) {
|
||||
for i := int64(0); i < cnt; i += int64(types.PtrSize) {
|
||||
p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off+i)
|
||||
if cnt < int64(4*gc.Widthptr) {
|
||||
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
|
||||
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off+i)
|
||||
}
|
||||
} else if cnt <= int64(128*types.PtrSize) && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend
|
||||
if cnt%(2*int64(types.PtrSize)) != 0 {
|
||||
p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off)
|
||||
off += int64(types.PtrSize)
|
||||
cnt -= int64(types.PtrSize)
|
||||
} else if cnt <= int64(128*gc.Widthptr) && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend
|
||||
if cnt%(2*int64(gc.Widthptr)) != 0 {
|
||||
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off)
|
||||
off += int64(gc.Widthptr)
|
||||
cnt -= int64(gc.Widthptr)
|
||||
}
|
||||
p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REG_R20, 0)
|
||||
p = pp.Append(p, arm64.AADD, obj.TYPE_CONST, 0, 8+off, obj.TYPE_REG, arm64.REG_R20, 0)
|
||||
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REG_R20, 0)
|
||||
p = pp.Appendpp(p, arm64.AADD, obj.TYPE_CONST, 0, 8+off, obj.TYPE_REG, arm64.REG_R20, 0)
|
||||
p.Reg = arm64.REG_R20
|
||||
p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
|
||||
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
|
||||
p.To.Name = obj.NAME_EXTERN
|
||||
p.To.Sym = ir.Syms.Duffzero
|
||||
p.To.Offset = 4 * (64 - cnt/(2*int64(types.PtrSize)))
|
||||
p.To.Sym = gc.Duffzero
|
||||
p.To.Offset = 4 * (64 - cnt/(2*int64(gc.Widthptr)))
|
||||
} else {
|
||||
// Not using REGTMP, so this is async preemptible (async preemption clobbers REGTMP).
|
||||
// We are at the function entry, where no register is live, so it is okay to clobber
|
||||
// other registers
|
||||
const rtmp = arm64.REG_R20
|
||||
p = pp.Append(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, rtmp, 0)
|
||||
p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
|
||||
p = pp.Append(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT1, 0)
|
||||
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, rtmp, 0)
|
||||
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
|
||||
p = pp.Appendpp(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT1, 0)
|
||||
p.Reg = arm64.REGRT1
|
||||
p = pp.Append(p, arm64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, rtmp, 0)
|
||||
p = pp.Append(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT2, 0)
|
||||
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, rtmp, 0)
|
||||
p = pp.Appendpp(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT2, 0)
|
||||
p.Reg = arm64.REGRT1
|
||||
p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(types.PtrSize))
|
||||
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(gc.Widthptr))
|
||||
p.Scond = arm64.C_XPRE
|
||||
p1 := p
|
||||
p = pp.Append(p, arm64.ACMP, obj.TYPE_REG, arm64.REGRT1, 0, obj.TYPE_NONE, 0, 0)
|
||||
p = pp.Appendpp(p, arm64.ACMP, obj.TYPE_REG, arm64.REGRT1, 0, obj.TYPE_NONE, 0, 0)
|
||||
p.Reg = arm64.REGRT2
|
||||
p = pp.Append(p, arm64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
|
||||
p.To.SetTarget(p1)
|
||||
p = pp.Appendpp(p, arm64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
|
||||
gc.Patch(p, p1)
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func ginsnop(pp *objw.Progs) *obj.Prog {
|
||||
func ginsnop(pp *gc.Progs) *obj.Prog {
|
||||
p := pp.Prog(arm64.AHINT)
|
||||
p.From.Type = obj.TYPE_CONST
|
||||
return p
|
||||
|
||||
@@ -7,11 +7,9 @@ package arm64
|
||||
import (
|
||||
"math"
|
||||
|
||||
"cmd/compile/internal/base"
|
||||
"cmd/compile/internal/ir"
|
||||
"cmd/compile/internal/gc"
|
||||
"cmd/compile/internal/logopt"
|
||||
"cmd/compile/internal/ssa"
|
||||
"cmd/compile/internal/ssagen"
|
||||
"cmd/compile/internal/types"
|
||||
"cmd/internal/obj"
|
||||
"cmd/internal/obj/arm64"
|
||||
@@ -83,7 +81,7 @@ func makeshift(reg int16, typ int64, s int64) int64 {
|
||||
}
|
||||
|
||||
// genshift generates a Prog for r = r0 op (r1 shifted by n)
|
||||
func genshift(s *ssagen.State, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
|
||||
func genshift(s *gc.SSAGenState, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
|
||||
p := s.Prog(as)
|
||||
p.From.Type = obj.TYPE_SHIFT
|
||||
p.From.Offset = makeshift(r1, typ, n)
|
||||
@@ -112,7 +110,7 @@ func genIndexedOperand(v *ssa.Value) obj.Addr {
|
||||
return mop
|
||||
}
|
||||
|
||||
func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
|
||||
switch v.Op {
|
||||
case ssa.OpCopy, ssa.OpARM64MOVDreg:
|
||||
if v.Type.IsMemory() {
|
||||
@@ -150,7 +148,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
return
|
||||
}
|
||||
p := s.Prog(loadByType(v.Type))
|
||||
ssagen.AddrAuto(&p.From, v.Args[0])
|
||||
gc.AddrAuto(&p.From, v.Args[0])
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = v.Reg()
|
||||
case ssa.OpStoreReg:
|
||||
@@ -161,7 +159,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p := s.Prog(storeByType(v.Type))
|
||||
p.From.Type = obj.TYPE_REG
|
||||
p.From.Reg = v.Args[0].Reg()
|
||||
ssagen.AddrAuto(&p.To, v)
|
||||
gc.AddrAuto(&p.To, v)
|
||||
case ssa.OpARM64ADD,
|
||||
ssa.OpARM64SUB,
|
||||
ssa.OpARM64AND,
|
||||
@@ -395,10 +393,10 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
v.Fatalf("aux is of unknown type %T", v.Aux)
|
||||
case *obj.LSym:
|
||||
wantreg = "SB"
|
||||
ssagen.AddAux(&p.From, v)
|
||||
case *ir.Name:
|
||||
gc.AddAux(&p.From, v)
|
||||
case *gc.Node:
|
||||
wantreg = "SP"
|
||||
ssagen.AddAux(&p.From, v)
|
||||
gc.AddAux(&p.From, v)
|
||||
case nil:
|
||||
// No sym, just MOVD $off(SP), R
|
||||
wantreg = "SP"
|
||||
@@ -419,7 +417,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p := s.Prog(v.Op.Asm())
|
||||
p.From.Type = obj.TYPE_MEM
|
||||
p.From.Reg = v.Args[0].Reg()
|
||||
ssagen.AddAux(&p.From, v)
|
||||
gc.AddAux(&p.From, v)
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = v.Reg()
|
||||
case ssa.OpARM64MOVBloadidx,
|
||||
@@ -446,7 +444,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p := s.Prog(v.Op.Asm())
|
||||
p.From.Type = obj.TYPE_MEM
|
||||
p.From.Reg = v.Args[0].Reg()
|
||||
ssagen.AddAux(&p.From, v)
|
||||
gc.AddAux(&p.From, v)
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = v.Reg0()
|
||||
case ssa.OpARM64MOVBstore,
|
||||
@@ -463,7 +461,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p.From.Reg = v.Args[1].Reg()
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Reg = v.Args[0].Reg()
|
||||
ssagen.AddAux(&p.To, v)
|
||||
gc.AddAux(&p.To, v)
|
||||
case ssa.OpARM64MOVBstoreidx,
|
||||
ssa.OpARM64MOVHstoreidx,
|
||||
ssa.OpARM64MOVWstoreidx,
|
||||
@@ -484,7 +482,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p.From.Offset = int64(v.Args[2].Reg())
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Reg = v.Args[0].Reg()
|
||||
ssagen.AddAux(&p.To, v)
|
||||
gc.AddAux(&p.To, v)
|
||||
case ssa.OpARM64MOVBstorezero,
|
||||
ssa.OpARM64MOVHstorezero,
|
||||
ssa.OpARM64MOVWstorezero,
|
||||
@@ -494,7 +492,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p.From.Reg = arm64.REGZERO
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Reg = v.Args[0].Reg()
|
||||
ssagen.AddAux(&p.To, v)
|
||||
gc.AddAux(&p.To, v)
|
||||
case ssa.OpARM64MOVBstorezeroidx,
|
||||
ssa.OpARM64MOVHstorezeroidx,
|
||||
ssa.OpARM64MOVWstorezeroidx,
|
||||
@@ -513,7 +511,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p.From.Offset = int64(arm64.REGZERO)
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Reg = v.Args[0].Reg()
|
||||
ssagen.AddAux(&p.To, v)
|
||||
gc.AddAux(&p.To, v)
|
||||
case ssa.OpARM64BFI,
|
||||
ssa.OpARM64BFXIL:
|
||||
r := v.Reg()
|
||||
@@ -582,7 +580,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p2.From.Type = obj.TYPE_REG
|
||||
p2.From.Reg = arm64.REGTMP
|
||||
p2.To.Type = obj.TYPE_BRANCH
|
||||
p2.To.SetTarget(p)
|
||||
gc.Patch(p2, p)
|
||||
case ssa.OpARM64LoweredAtomicExchange64Variant,
|
||||
ssa.OpARM64LoweredAtomicExchange32Variant:
|
||||
swap := arm64.ASWPALD
|
||||
@@ -636,7 +634,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p3.From.Type = obj.TYPE_REG
|
||||
p3.From.Reg = arm64.REGTMP
|
||||
p3.To.Type = obj.TYPE_BRANCH
|
||||
p3.To.SetTarget(p)
|
||||
gc.Patch(p3, p)
|
||||
case ssa.OpARM64LoweredAtomicAdd64Variant,
|
||||
ssa.OpARM64LoweredAtomicAdd32Variant:
|
||||
// LDADDAL Rarg1, (Rarg0), Rout
|
||||
@@ -700,13 +698,13 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p4.From.Type = obj.TYPE_REG
|
||||
p4.From.Reg = arm64.REGTMP
|
||||
p4.To.Type = obj.TYPE_BRANCH
|
||||
p4.To.SetTarget(p)
|
||||
gc.Patch(p4, p)
|
||||
p5 := s.Prog(arm64.ACSET)
|
||||
p5.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
|
||||
p5.From.Reg = arm64.COND_EQ
|
||||
p5.To.Type = obj.TYPE_REG
|
||||
p5.To.Reg = out
|
||||
p2.To.SetTarget(p5)
|
||||
gc.Patch(p2, p5)
|
||||
case ssa.OpARM64LoweredAtomicCas64Variant,
|
||||
ssa.OpARM64LoweredAtomicCas32Variant:
|
||||
// Rarg0: ptr
|
||||
@@ -794,7 +792,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p3.From.Type = obj.TYPE_REG
|
||||
p3.From.Reg = arm64.REGTMP
|
||||
p3.To.Type = obj.TYPE_BRANCH
|
||||
p3.To.SetTarget(p)
|
||||
gc.Patch(p3, p)
|
||||
case ssa.OpARM64LoweredAtomicAnd8Variant,
|
||||
ssa.OpARM64LoweredAtomicAnd32Variant:
|
||||
atomic_clear := arm64.ALDCLRALW
|
||||
@@ -961,7 +959,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p := s.Prog(obj.ADUFFZERO)
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Name = obj.NAME_EXTERN
|
||||
p.To.Sym = ir.Syms.Duffzero
|
||||
p.To.Sym = gc.Duffzero
|
||||
p.To.Offset = v.AuxInt
|
||||
case ssa.OpARM64LoweredZero:
|
||||
// STP.P (ZR,ZR), 16(R16)
|
||||
@@ -982,12 +980,12 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p2.Reg = arm64.REG_R16
|
||||
p3 := s.Prog(arm64.ABLE)
|
||||
p3.To.Type = obj.TYPE_BRANCH
|
||||
p3.To.SetTarget(p)
|
||||
gc.Patch(p3, p)
|
||||
case ssa.OpARM64DUFFCOPY:
|
||||
p := s.Prog(obj.ADUFFCOPY)
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Name = obj.NAME_EXTERN
|
||||
p.To.Sym = ir.Syms.Duffcopy
|
||||
p.To.Sym = gc.Duffcopy
|
||||
p.To.Offset = v.AuxInt
|
||||
case ssa.OpARM64LoweredMove:
|
||||
// MOVD.P 8(R16), Rtmp
|
||||
@@ -1015,7 +1013,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p3.Reg = arm64.REG_R16
|
||||
p4 := s.Prog(arm64.ABLE)
|
||||
p4.To.Type = obj.TYPE_BRANCH
|
||||
p4.To.SetTarget(p)
|
||||
gc.Patch(p4, p)
|
||||
case ssa.OpARM64CALLstatic, ssa.OpARM64CALLclosure, ssa.OpARM64CALLinter:
|
||||
s.Call(v)
|
||||
case ssa.OpARM64LoweredWB:
|
||||
@@ -1027,21 +1025,21 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p := s.Prog(obj.ACALL)
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Name = obj.NAME_EXTERN
|
||||
p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
|
||||
p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
|
||||
s.UseArgs(16) // space used in callee args area by assembly stubs
|
||||
case ssa.OpARM64LoweredNilCheck:
|
||||
// Issue a load which will fault if arg is nil.
|
||||
p := s.Prog(arm64.AMOVB)
|
||||
p.From.Type = obj.TYPE_MEM
|
||||
p.From.Reg = v.Args[0].Reg()
|
||||
ssagen.AddAux(&p.From, v)
|
||||
gc.AddAux(&p.From, v)
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = arm64.REGTMP
|
||||
if logopt.Enabled() {
|
||||
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
|
||||
}
|
||||
if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Line==1 in generated wrappers
|
||||
base.WarnfAt(v.Pos, "generated nil check")
|
||||
if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Line==1 in generated wrappers
|
||||
gc.Warnl(v.Pos, "generated nil check")
|
||||
}
|
||||
case ssa.OpARM64Equal,
|
||||
ssa.OpARM64NotEqual,
|
||||
@@ -1069,12 +1067,12 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p.To.Reg = v.Reg()
|
||||
case ssa.OpARM64LoweredGetClosurePtr:
|
||||
// Closure pointer is R26 (arm64.REGCTXT).
|
||||
ssagen.CheckLoweredGetClosurePtr(v)
|
||||
gc.CheckLoweredGetClosurePtr(v)
|
||||
case ssa.OpARM64LoweredGetCallerSP:
|
||||
// caller's SP is FixedFrameSize below the address of the first arg
|
||||
p := s.Prog(arm64.AMOVD)
|
||||
p.From.Type = obj.TYPE_ADDR
|
||||
p.From.Offset = -base.Ctxt.FixedFrameSize()
|
||||
p.From.Offset = -gc.Ctxt.FixedFrameSize()
|
||||
p.From.Name = obj.NAME_PARAM
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = v.Reg()
|
||||
@@ -1144,24 +1142,24 @@ var blockJump = map[ssa.BlockKind]struct {
|
||||
}
|
||||
|
||||
// To model a 'LEnoov' ('<=' without overflow checking) branching
|
||||
var leJumps = [2][2]ssagen.IndexJump{
|
||||
var leJumps = [2][2]gc.IndexJump{
|
||||
{{Jump: arm64.ABEQ, Index: 0}, {Jump: arm64.ABPL, Index: 1}}, // next == b.Succs[0]
|
||||
{{Jump: arm64.ABMI, Index: 0}, {Jump: arm64.ABEQ, Index: 0}}, // next == b.Succs[1]
|
||||
}
|
||||
|
||||
// To model a 'GTnoov' ('>' without overflow checking) branching
|
||||
var gtJumps = [2][2]ssagen.IndexJump{
|
||||
var gtJumps = [2][2]gc.IndexJump{
|
||||
{{Jump: arm64.ABMI, Index: 1}, {Jump: arm64.ABEQ, Index: 1}}, // next == b.Succs[0]
|
||||
{{Jump: arm64.ABEQ, Index: 1}, {Jump: arm64.ABPL, Index: 0}}, // next == b.Succs[1]
|
||||
}
|
||||
|
||||
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
|
||||
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
|
||||
switch b.Kind {
|
||||
case ssa.BlockPlain:
|
||||
if b.Succs[0].Block() != next {
|
||||
p := s.Prog(obj.AJMP)
|
||||
p.To.Type = obj.TYPE_BRANCH
|
||||
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
|
||||
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
|
||||
}
|
||||
|
||||
case ssa.BlockDefer:
|
||||
@@ -1174,11 +1172,11 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
|
||||
p.Reg = arm64.REG_R0
|
||||
p = s.Prog(arm64.ABNE)
|
||||
p.To.Type = obj.TYPE_BRANCH
|
||||
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
|
||||
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
|
||||
if b.Succs[0].Block() != next {
|
||||
p := s.Prog(obj.AJMP)
|
||||
p.To.Type = obj.TYPE_BRANCH
|
||||
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
|
||||
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
|
||||
}
|
||||
|
||||
case ssa.BlockExit:
|
||||
|
||||
@@ -1,194 +0,0 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Debug arguments, set by -d flag.
|
||||
|
||||
package base
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"cmd/internal/objabi"
|
||||
)
|
||||
|
||||
// Debug holds the parsed debugging configuration values.
|
||||
var Debug = DebugFlags{
|
||||
Fieldtrack: &objabi.Fieldtrack_enabled,
|
||||
}
|
||||
|
||||
// DebugFlags defines the debugging configuration values (see var Debug).
|
||||
// Each struct field is a different value, named for the lower-case of the field name.
|
||||
// Each field must be an int or string and must have a `help` struct tag.
|
||||
//
|
||||
// The -d option takes a comma-separated list of settings.
|
||||
// Each setting is name=value; for ints, name is short for name=1.
|
||||
type DebugFlags struct {
|
||||
Append int `help:"print information about append compilation"`
|
||||
Checkptr int `help:"instrument unsafe pointer conversions"`
|
||||
Closure int `help:"print information about closure compilation"`
|
||||
DclStack int `help:"run internal dclstack check"`
|
||||
Defer int `help:"print information about defer compilation"`
|
||||
DisableNil int `help:"disable nil checks"`
|
||||
DumpPtrs int `help:"show Node pointers values in dump output"`
|
||||
DwarfInl int `help:"print information about DWARF inlined function creation"`
|
||||
Export int `help:"print export data"`
|
||||
Fieldtrack *int `help:"enable field tracking"`
|
||||
GCProg int `help:"print dump of GC programs"`
|
||||
Libfuzzer int `help:"enable coverage instrumentation for libfuzzer"`
|
||||
LocationLists int `help:"print information about DWARF location list creation"`
|
||||
Nil int `help:"print information about nil checks"`
|
||||
PCTab string `help:"print named pc-value table"`
|
||||
Panic int `help:"show all compiler panics"`
|
||||
Slice int `help:"print information about slice compilation"`
|
||||
SoftFloat int `help:"force compiler to emit soft-float code"`
|
||||
TypeAssert int `help:"print information about type assertion inlining"`
|
||||
TypecheckInl int `help:"eager typechecking of inline function bodies"`
|
||||
WB int `help:"print information about write barriers"`
|
||||
ABIWrap int `help:"print information about ABI wrapper generation"`
|
||||
|
||||
any bool // set when any of the values have been set
|
||||
}
|
||||
|
||||
// Any reports whether any of the debug flags have been set.
|
||||
func (d *DebugFlags) Any() bool { return d.any }
|
||||
|
||||
type debugField struct {
|
||||
name string
|
||||
help string
|
||||
val interface{} // *int or *string
|
||||
}
|
||||
|
||||
var debugTab []debugField
|
||||
|
||||
func init() {
|
||||
v := reflect.ValueOf(&Debug).Elem()
|
||||
t := v.Type()
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
if f.Name == "any" {
|
||||
continue
|
||||
}
|
||||
name := strings.ToLower(f.Name)
|
||||
help := f.Tag.Get("help")
|
||||
if help == "" {
|
||||
panic(fmt.Sprintf("base.Debug.%s is missing help text", f.Name))
|
||||
}
|
||||
ptr := v.Field(i).Addr().Interface()
|
||||
switch ptr.(type) {
|
||||
default:
|
||||
panic(fmt.Sprintf("base.Debug.%s has invalid type %v (must be int or string)", f.Name, f.Type))
|
||||
case *int, *string:
|
||||
// ok
|
||||
case **int:
|
||||
ptr = *ptr.(**int) // record the *int itself
|
||||
}
|
||||
debugTab = append(debugTab, debugField{name, help, ptr})
|
||||
}
|
||||
}
|
||||
|
||||
// DebugSSA is called to set a -d ssa/... option.
|
||||
// If nil, those options are reported as invalid options.
|
||||
// If DebugSSA returns a non-empty string, that text is reported as a compiler error.
|
||||
var DebugSSA func(phase, flag string, val int, valString string) string
|
||||
|
||||
// parseDebug parses the -d debug string argument.
|
||||
func parseDebug(debugstr string) {
|
||||
// parse -d argument
|
||||
if debugstr == "" {
|
||||
return
|
||||
}
|
||||
Debug.any = true
|
||||
Split:
|
||||
for _, name := range strings.Split(debugstr, ",") {
|
||||
if name == "" {
|
||||
continue
|
||||
}
|
||||
// display help about the -d option itself and quit
|
||||
if name == "help" {
|
||||
fmt.Print(debugHelpHeader)
|
||||
maxLen := len("ssa/help")
|
||||
for _, t := range debugTab {
|
||||
if len(t.name) > maxLen {
|
||||
maxLen = len(t.name)
|
||||
}
|
||||
}
|
||||
for _, t := range debugTab {
|
||||
fmt.Printf("\t%-*s\t%s\n", maxLen, t.name, t.help)
|
||||
}
|
||||
// ssa options have their own help
|
||||
fmt.Printf("\t%-*s\t%s\n", maxLen, "ssa/help", "print help about SSA debugging")
|
||||
fmt.Print(debugHelpFooter)
|
||||
os.Exit(0)
|
||||
}
|
||||
val, valstring, haveInt := 1, "", true
|
||||
if i := strings.IndexAny(name, "=:"); i >= 0 {
|
||||
var err error
|
||||
name, valstring = name[:i], name[i+1:]
|
||||
val, err = strconv.Atoi(valstring)
|
||||
if err != nil {
|
||||
val, haveInt = 1, false
|
||||
}
|
||||
}
|
||||
for _, t := range debugTab {
|
||||
if t.name != name {
|
||||
continue
|
||||
}
|
||||
switch vp := t.val.(type) {
|
||||
case nil:
|
||||
// Ignore
|
||||
case *string:
|
||||
*vp = valstring
|
||||
case *int:
|
||||
if !haveInt {
|
||||
log.Fatalf("invalid debug value %v", name)
|
||||
}
|
||||
*vp = val
|
||||
default:
|
||||
panic("bad debugtab type")
|
||||
}
|
||||
continue Split
|
||||
}
|
||||
// special case for ssa for now
|
||||
if DebugSSA != nil && strings.HasPrefix(name, "ssa/") {
|
||||
// expect form ssa/phase/flag
|
||||
// e.g. -d=ssa/generic_cse/time
|
||||
// _ in phase name also matches space
|
||||
phase := name[4:]
|
||||
flag := "debug" // default flag is debug
|
||||
if i := strings.Index(phase, "/"); i >= 0 {
|
||||
flag = phase[i+1:]
|
||||
phase = phase[:i]
|
||||
}
|
||||
err := DebugSSA(phase, flag, val, valstring)
|
||||
if err != "" {
|
||||
log.Fatalf(err)
|
||||
}
|
||||
continue Split
|
||||
}
|
||||
log.Fatalf("unknown debug key -d %s\n", name)
|
||||
}
|
||||
}
|
||||
|
||||
const debugHelpHeader = `usage: -d arg[,arg]* and arg is <key>[=<value>]
|
||||
|
||||
<key> is one of:
|
||||
|
||||
`
|
||||
|
||||
const debugHelpFooter = `
|
||||
<value> is key-specific.
|
||||
|
||||
Key "checkptr" supports values:
|
||||
"0": instrumentation disabled
|
||||
"1": conversions involving unsafe.Pointer are instrumented
|
||||
"2": conversions to unsafe.Pointer force heap allocation
|
||||
|
||||
Key "pctab" supports values:
|
||||
"pctospadj", "pctofile", "pctoline", "pctoinline", "pctopcdata"
|
||||
`
|
||||
@@ -1,459 +0,0 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package base
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"cmd/internal/objabi"
|
||||
"cmd/internal/sys"
|
||||
)
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, "usage: compile [options] file.go...\n")
|
||||
objabi.Flagprint(os.Stderr)
|
||||
Exit(2)
|
||||
}
|
||||
|
||||
// Flag holds the parsed command-line flags.
|
||||
// See ParseFlag for non-zero defaults.
|
||||
var Flag CmdFlags
|
||||
|
||||
// A CountFlag is a counting integer flag.
|
||||
// It accepts -name=value to set the value directly,
|
||||
// but it also accepts -name with no =value to increment the count.
|
||||
type CountFlag int
|
||||
|
||||
// CmdFlags defines the command-line flags (see var Flag).
|
||||
// Each struct field is a different flag, by default named for the lower-case of the field name.
|
||||
// If the flag name is a single letter, the default flag name is left upper-case.
|
||||
// If the flag name is "Lower" followed by a single letter, the default flag name is the lower-case of the last letter.
|
||||
//
|
||||
// If this default flag name can't be made right, the `flag` struct tag can be used to replace it,
|
||||
// but this should be done only in exceptional circumstances: it helps everyone if the flag name
|
||||
// is obvious from the field name when the flag is used elsewhere in the compiler sources.
|
||||
// The `flag:"-"` struct tag makes a field invisible to the flag logic and should also be used sparingly.
|
||||
//
|
||||
// Each field must have a `help` struct tag giving the flag help message.
|
||||
//
|
||||
// The allowed field types are bool, int, string, pointers to those (for values stored elsewhere),
|
||||
// CountFlag (for a counting flag), and func(string) (for a flag that uses special code for parsing).
|
||||
type CmdFlags struct {
|
||||
// Single letters
|
||||
B CountFlag "help:\"disable bounds checking\""
|
||||
C CountFlag "help:\"disable printing of columns in error messages\""
|
||||
D string "help:\"set relative `path` for local imports\""
|
||||
E CountFlag "help:\"debug symbol export\""
|
||||
I func(string) "help:\"add `directory` to import search path\""
|
||||
K CountFlag "help:\"debug missing line numbers\""
|
||||
L CountFlag "help:\"show full file names in error messages\""
|
||||
N CountFlag "help:\"disable optimizations\""
|
||||
S CountFlag "help:\"print assembly listing\""
|
||||
// V is added by objabi.AddVersionFlag
|
||||
W CountFlag "help:\"debug parse tree after type checking\""
|
||||
|
||||
LowerC int "help:\"concurrency during compilation (1 means no concurrency)\""
|
||||
LowerD func(string) "help:\"enable debugging settings; try -d help\""
|
||||
LowerE CountFlag "help:\"no limit on number of errors reported\""
|
||||
LowerH CountFlag "help:\"halt on error\""
|
||||
LowerJ CountFlag "help:\"debug runtime-initialized variables\""
|
||||
LowerL CountFlag "help:\"disable inlining\""
|
||||
LowerM CountFlag "help:\"print optimization decisions\""
|
||||
LowerO string "help:\"write output to `file`\""
|
||||
LowerP *string "help:\"set expected package import `path`\"" // &Ctxt.Pkgpath, set below
|
||||
LowerR CountFlag "help:\"debug generated wrappers\""
|
||||
LowerT bool "help:\"enable tracing for debugging the compiler\""
|
||||
LowerW CountFlag "help:\"debug type checking\""
|
||||
LowerV *bool "help:\"increase debug verbosity\""
|
||||
|
||||
// Special characters
|
||||
Percent int "flag:\"%\" help:\"debug non-static initializers\""
|
||||
CompilingRuntime bool "flag:\"+\" help:\"compiling runtime\""
|
||||
|
||||
// Longer names
|
||||
ABIWrap bool "help:\"enable generation of ABI wrappers\""
|
||||
ABIWrapLimit int "help:\"emit at most N ABI wrappers (for debugging)\""
|
||||
AsmHdr string "help:\"write assembly header to `file`\""
|
||||
Bench string "help:\"append benchmark times to `file`\""
|
||||
BlockProfile string "help:\"write block profile to `file`\""
|
||||
BuildID string "help:\"record `id` as the build id in the export metadata\""
|
||||
CPUProfile string "help:\"write cpu profile to `file`\""
|
||||
Complete bool "help:\"compiling complete package (no C or assembly)\""
|
||||
Dwarf bool "help:\"generate DWARF symbols\""
|
||||
DwarfBASEntries *bool "help:\"use base address selection entries in DWARF\"" // &Ctxt.UseBASEntries, set below
|
||||
DwarfLocationLists *bool "help:\"add location lists to DWARF in optimized mode\"" // &Ctxt.Flag_locationlists, set below
|
||||
Dynlink *bool "help:\"support references to Go symbols defined in other shared libraries\"" // &Ctxt.Flag_dynlink, set below
|
||||
EmbedCfg func(string) "help:\"read go:embed configuration from `file`\""
|
||||
GenDwarfInl int "help:\"generate DWARF inline info records\"" // 0=disabled, 1=funcs, 2=funcs+formals/locals
|
||||
GoVersion string "help:\"required version of the runtime\""
|
||||
ImportCfg func(string) "help:\"read import configuration from `file`\""
|
||||
ImportMap func(string) "help:\"add `definition` of the form source=actual to import map\""
|
||||
InstallSuffix string "help:\"set pkg directory `suffix`\""
|
||||
JSON string "help:\"version,file for JSON compiler/optimizer detail output\""
|
||||
Lang string "help:\"Go language version source code expects\""
|
||||
LinkObj string "help:\"write linker-specific object to `file`\""
|
||||
LinkShared *bool "help:\"generate code that will be linked against Go shared libraries\"" // &Ctxt.Flag_linkshared, set below
|
||||
Live CountFlag "help:\"debug liveness analysis\""
|
||||
MSan bool "help:\"build code compatible with C/C++ memory sanitizer\""
|
||||
MemProfile string "help:\"write memory profile to `file`\""
|
||||
MemProfileRate int64 "help:\"set runtime.MemProfileRate to `rate`\""
|
||||
MutexProfile string "help:\"write mutex profile to `file`\""
|
||||
NoLocalImports bool "help:\"reject local (relative) imports\""
|
||||
Pack bool "help:\"write to file.a instead of file.o\""
|
||||
Race bool "help:\"enable race detector\""
|
||||
Shared *bool "help:\"generate code that can be linked into a shared library\"" // &Ctxt.Flag_shared, set below
|
||||
SmallFrames bool "help:\"reduce the size limit for stack allocated objects\"" // small stacks, to diagnose GC latency; see golang.org/issue/27732
|
||||
Spectre string "help:\"enable spectre mitigations in `list` (all, index, ret)\""
|
||||
Std bool "help:\"compiling standard library\""
|
||||
SymABIs string "help:\"read symbol ABIs from `file`\""
|
||||
TraceProfile string "help:\"write an execution trace to `file`\""
|
||||
TrimPath string "help:\"remove `prefix` from recorded source file paths\""
|
||||
WB bool "help:\"enable write barrier\"" // TODO: remove
|
||||
|
||||
// Configuration derived from flags; not a flag itself.
|
||||
Cfg struct {
|
||||
Embed struct { // set by -embedcfg
|
||||
Patterns map[string][]string
|
||||
Files map[string]string
|
||||
}
|
||||
ImportDirs []string // appended to by -I
|
||||
ImportMap map[string]string // set by -importmap OR -importcfg
|
||||
PackageFile map[string]string // set by -importcfg; nil means not in use
|
||||
SpectreIndex bool // set by -spectre=index or -spectre=all
|
||||
// Whether we are adding any sort of code instrumentation, such as
|
||||
// when the race detector is enabled.
|
||||
Instrumenting bool
|
||||
}
|
||||
}
|
||||
|
||||
// ParseFlags parses the command-line flags into Flag.
|
||||
func ParseFlags() {
|
||||
Flag.I = addImportDir
|
||||
|
||||
Flag.LowerC = 1
|
||||
Flag.LowerD = parseDebug
|
||||
Flag.LowerP = &Ctxt.Pkgpath
|
||||
Flag.LowerV = &Ctxt.Debugvlog
|
||||
|
||||
Flag.ABIWrap = objabi.Regabi_enabled != 0
|
||||
Flag.Dwarf = objabi.GOARCH != "wasm"
|
||||
Flag.DwarfBASEntries = &Ctxt.UseBASEntries
|
||||
Flag.DwarfLocationLists = &Ctxt.Flag_locationlists
|
||||
*Flag.DwarfLocationLists = true
|
||||
Flag.Dynlink = &Ctxt.Flag_dynlink
|
||||
Flag.EmbedCfg = readEmbedCfg
|
||||
Flag.GenDwarfInl = 2
|
||||
Flag.ImportCfg = readImportCfg
|
||||
Flag.ImportMap = addImportMap
|
||||
Flag.LinkShared = &Ctxt.Flag_linkshared
|
||||
Flag.Shared = &Ctxt.Flag_shared
|
||||
Flag.WB = true
|
||||
|
||||
Flag.Cfg.ImportMap = make(map[string]string)
|
||||
|
||||
objabi.AddVersionFlag() // -V
|
||||
registerFlags()
|
||||
objabi.Flagparse(usage)
|
||||
|
||||
if Flag.MSan && !sys.MSanSupported(objabi.GOOS, objabi.GOARCH) {
|
||||
log.Fatalf("%s/%s does not support -msan", objabi.GOOS, objabi.GOARCH)
|
||||
}
|
||||
if Flag.Race && !sys.RaceDetectorSupported(objabi.GOOS, objabi.GOARCH) {
|
||||
log.Fatalf("%s/%s does not support -race", objabi.GOOS, objabi.GOARCH)
|
||||
}
|
||||
if (*Flag.Shared || *Flag.Dynlink || *Flag.LinkShared) && !Ctxt.Arch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X) {
|
||||
log.Fatalf("%s/%s does not support -shared", objabi.GOOS, objabi.GOARCH)
|
||||
}
|
||||
parseSpectre(Flag.Spectre) // left as string for RecordFlags
|
||||
|
||||
Ctxt.Flag_shared = Ctxt.Flag_dynlink || Ctxt.Flag_shared
|
||||
Ctxt.Flag_optimize = Flag.N == 0
|
||||
Ctxt.Debugasm = int(Flag.S)
|
||||
|
||||
if flag.NArg() < 1 {
|
||||
usage()
|
||||
}
|
||||
|
||||
if Flag.GoVersion != "" && Flag.GoVersion != runtime.Version() {
|
||||
fmt.Printf("compile: version %q does not match go tool version %q\n", runtime.Version(), Flag.GoVersion)
|
||||
Exit(2)
|
||||
}
|
||||
|
||||
if Flag.LowerO == "" {
|
||||
p := flag.Arg(0)
|
||||
if i := strings.LastIndex(p, "/"); i >= 0 {
|
||||
p = p[i+1:]
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
if i := strings.LastIndex(p, `\`); i >= 0 {
|
||||
p = p[i+1:]
|
||||
}
|
||||
}
|
||||
if i := strings.LastIndex(p, "."); i >= 0 {
|
||||
p = p[:i]
|
||||
}
|
||||
suffix := ".o"
|
||||
if Flag.Pack {
|
||||
suffix = ".a"
|
||||
}
|
||||
Flag.LowerO = p + suffix
|
||||
}
|
||||
|
||||
if Flag.Race && Flag.MSan {
|
||||
log.Fatal("cannot use both -race and -msan")
|
||||
}
|
||||
if Flag.Race || Flag.MSan {
|
||||
// -race and -msan imply -d=checkptr for now.
|
||||
Debug.Checkptr = 1
|
||||
}
|
||||
|
||||
if Flag.CompilingRuntime && Flag.N != 0 {
|
||||
log.Fatal("cannot disable optimizations while compiling runtime")
|
||||
}
|
||||
if Flag.LowerC < 1 {
|
||||
log.Fatalf("-c must be at least 1, got %d", Flag.LowerC)
|
||||
}
|
||||
if Flag.LowerC > 1 && !concurrentBackendAllowed() {
|
||||
log.Fatalf("cannot use concurrent backend compilation with provided flags; invoked as %v", os.Args)
|
||||
}
|
||||
|
||||
if Flag.CompilingRuntime {
|
||||
// Runtime can't use -d=checkptr, at least not yet.
|
||||
Debug.Checkptr = 0
|
||||
|
||||
// Fuzzing the runtime isn't interesting either.
|
||||
Debug.Libfuzzer = 0
|
||||
}
|
||||
|
||||
// set via a -d flag
|
||||
Ctxt.Debugpcln = Debug.PCTab
|
||||
}
|
||||
|
||||
// registerFlags adds flag registrations for all the fields in Flag.
|
||||
// See the comment on type CmdFlags for the rules.
|
||||
func registerFlags() {
|
||||
var (
|
||||
boolType = reflect.TypeOf(bool(false))
|
||||
intType = reflect.TypeOf(int(0))
|
||||
stringType = reflect.TypeOf(string(""))
|
||||
ptrBoolType = reflect.TypeOf(new(bool))
|
||||
ptrIntType = reflect.TypeOf(new(int))
|
||||
ptrStringType = reflect.TypeOf(new(string))
|
||||
countType = reflect.TypeOf(CountFlag(0))
|
||||
funcType = reflect.TypeOf((func(string))(nil))
|
||||
)
|
||||
|
||||
v := reflect.ValueOf(&Flag).Elem()
|
||||
t := v.Type()
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
if f.Name == "Cfg" {
|
||||
continue
|
||||
}
|
||||
|
||||
var name string
|
||||
if len(f.Name) == 1 {
|
||||
name = f.Name
|
||||
} else if len(f.Name) == 6 && f.Name[:5] == "Lower" && 'A' <= f.Name[5] && f.Name[5] <= 'Z' {
|
||||
name = string(rune(f.Name[5] + 'a' - 'A'))
|
||||
} else {
|
||||
name = strings.ToLower(f.Name)
|
||||
}
|
||||
if tag := f.Tag.Get("flag"); tag != "" {
|
||||
name = tag
|
||||
}
|
||||
|
||||
help := f.Tag.Get("help")
|
||||
if help == "" {
|
||||
panic(fmt.Sprintf("base.Flag.%s is missing help text", f.Name))
|
||||
}
|
||||
|
||||
if k := f.Type.Kind(); (k == reflect.Ptr || k == reflect.Func) && v.Field(i).IsNil() {
|
||||
panic(fmt.Sprintf("base.Flag.%s is uninitialized %v", f.Name, f.Type))
|
||||
}
|
||||
|
||||
switch f.Type {
|
||||
case boolType:
|
||||
p := v.Field(i).Addr().Interface().(*bool)
|
||||
flag.BoolVar(p, name, *p, help)
|
||||
case intType:
|
||||
p := v.Field(i).Addr().Interface().(*int)
|
||||
flag.IntVar(p, name, *p, help)
|
||||
case stringType:
|
||||
p := v.Field(i).Addr().Interface().(*string)
|
||||
flag.StringVar(p, name, *p, help)
|
||||
case ptrBoolType:
|
||||
p := v.Field(i).Interface().(*bool)
|
||||
flag.BoolVar(p, name, *p, help)
|
||||
case ptrIntType:
|
||||
p := v.Field(i).Interface().(*int)
|
||||
flag.IntVar(p, name, *p, help)
|
||||
case ptrStringType:
|
||||
p := v.Field(i).Interface().(*string)
|
||||
flag.StringVar(p, name, *p, help)
|
||||
case countType:
|
||||
p := (*int)(v.Field(i).Addr().Interface().(*CountFlag))
|
||||
objabi.Flagcount(name, help, p)
|
||||
case funcType:
|
||||
f := v.Field(i).Interface().(func(string))
|
||||
objabi.Flagfn1(name, help, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// concurrentFlagOk reports whether the current compiler flags
|
||||
// are compatible with concurrent compilation.
|
||||
func concurrentFlagOk() bool {
|
||||
// TODO(rsc): Many of these are fine. Remove them.
|
||||
return Flag.Percent == 0 &&
|
||||
Flag.E == 0 &&
|
||||
Flag.K == 0 &&
|
||||
Flag.L == 0 &&
|
||||
Flag.LowerH == 0 &&
|
||||
Flag.LowerJ == 0 &&
|
||||
Flag.LowerM == 0 &&
|
||||
Flag.LowerR == 0
|
||||
}
|
||||
|
||||
func concurrentBackendAllowed() bool {
|
||||
if !concurrentFlagOk() {
|
||||
return false
|
||||
}
|
||||
|
||||
// Debug.S by itself is ok, because all printing occurs
|
||||
// while writing the object file, and that is non-concurrent.
|
||||
// Adding Debug_vlog, however, causes Debug.S to also print
|
||||
// while flushing the plist, which happens concurrently.
|
||||
if Ctxt.Debugvlog || Debug.Any() || Flag.Live > 0 {
|
||||
return false
|
||||
}
|
||||
// TODO: Test and delete this condition.
|
||||
if objabi.Fieldtrack_enabled != 0 {
|
||||
return false
|
||||
}
|
||||
// TODO: fix races and enable the following flags
|
||||
if Ctxt.Flag_shared || Ctxt.Flag_dynlink || Flag.Race {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func addImportDir(dir string) {
|
||||
if dir != "" {
|
||||
Flag.Cfg.ImportDirs = append(Flag.Cfg.ImportDirs, dir)
|
||||
}
|
||||
}
|
||||
|
||||
func addImportMap(s string) {
|
||||
if Flag.Cfg.ImportMap == nil {
|
||||
Flag.Cfg.ImportMap = make(map[string]string)
|
||||
}
|
||||
if strings.Count(s, "=") != 1 {
|
||||
log.Fatal("-importmap argument must be of the form source=actual")
|
||||
}
|
||||
i := strings.Index(s, "=")
|
||||
source, actual := s[:i], s[i+1:]
|
||||
if source == "" || actual == "" {
|
||||
log.Fatal("-importmap argument must be of the form source=actual; source and actual must be non-empty")
|
||||
}
|
||||
Flag.Cfg.ImportMap[source] = actual
|
||||
}
|
||||
|
||||
func readImportCfg(file string) {
|
||||
if Flag.Cfg.ImportMap == nil {
|
||||
Flag.Cfg.ImportMap = make(map[string]string)
|
||||
}
|
||||
Flag.Cfg.PackageFile = map[string]string{}
|
||||
data, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
log.Fatalf("-importcfg: %v", err)
|
||||
}
|
||||
|
||||
for lineNum, line := range strings.Split(string(data), "\n") {
|
||||
lineNum++ // 1-based
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" || strings.HasPrefix(line, "#") {
|
||||
continue
|
||||
}
|
||||
|
||||
var verb, args string
|
||||
if i := strings.Index(line, " "); i < 0 {
|
||||
verb = line
|
||||
} else {
|
||||
verb, args = line[:i], strings.TrimSpace(line[i+1:])
|
||||
}
|
||||
var before, after string
|
||||
if i := strings.Index(args, "="); i >= 0 {
|
||||
before, after = args[:i], args[i+1:]
|
||||
}
|
||||
switch verb {
|
||||
default:
|
||||
log.Fatalf("%s:%d: unknown directive %q", file, lineNum, verb)
|
||||
case "importmap":
|
||||
if before == "" || after == "" {
|
||||
log.Fatalf(`%s:%d: invalid importmap: syntax is "importmap old=new"`, file, lineNum)
|
||||
}
|
||||
Flag.Cfg.ImportMap[before] = after
|
||||
case "packagefile":
|
||||
if before == "" || after == "" {
|
||||
log.Fatalf(`%s:%d: invalid packagefile: syntax is "packagefile path=filename"`, file, lineNum)
|
||||
}
|
||||
Flag.Cfg.PackageFile[before] = after
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func readEmbedCfg(file string) {
|
||||
data, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
log.Fatalf("-embedcfg: %v", err)
|
||||
}
|
||||
if err := json.Unmarshal(data, &Flag.Cfg.Embed); err != nil {
|
||||
log.Fatalf("%s: %v", file, err)
|
||||
}
|
||||
if Flag.Cfg.Embed.Patterns == nil {
|
||||
log.Fatalf("%s: invalid embedcfg: missing Patterns", file)
|
||||
}
|
||||
if Flag.Cfg.Embed.Files == nil {
|
||||
log.Fatalf("%s: invalid embedcfg: missing Files", file)
|
||||
}
|
||||
}
|
||||
|
||||
// parseSpectre parses the spectre configuration from the string s.
|
||||
func parseSpectre(s string) {
|
||||
for _, f := range strings.Split(s, ",") {
|
||||
f = strings.TrimSpace(f)
|
||||
switch f {
|
||||
default:
|
||||
log.Fatalf("unknown setting -spectre=%s", f)
|
||||
case "":
|
||||
// nothing
|
||||
case "all":
|
||||
Flag.Cfg.SpectreIndex = true
|
||||
Ctxt.Retpoline = true
|
||||
case "index":
|
||||
Flag.Cfg.SpectreIndex = true
|
||||
case "ret":
|
||||
Ctxt.Retpoline = true
|
||||
}
|
||||
}
|
||||
|
||||
if Flag.Cfg.SpectreIndex {
|
||||
switch objabi.GOARCH {
|
||||
case "amd64":
|
||||
// ok
|
||||
default:
|
||||
log.Fatalf("GOARCH=%s does not support -spectre=index", objabi.GOARCH)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package base
|
||||
|
||||
import (
|
||||
"cmd/internal/obj"
|
||||
)
|
||||
|
||||
var Ctxt *obj.Link
|
||||
|
||||
// TODO(mdempsky): These should probably be obj.Link methods.
|
||||
|
||||
// PkgLinksym returns the linker symbol for name within the given
|
||||
// package prefix. For user packages, prefix should be the package
|
||||
// path encoded with objabi.PathToPrefix.
|
||||
func PkgLinksym(prefix, name string, abi obj.ABI) *obj.LSym {
|
||||
if name == "_" {
|
||||
// TODO(mdempsky): Cleanup callers and Fatalf instead.
|
||||
return linksym(prefix, "_", abi)
|
||||
}
|
||||
return linksym(prefix, prefix+"."+name, abi)
|
||||
}
|
||||
|
||||
// Linkname returns the linker symbol for the given name as it might
|
||||
// appear within a //go:linkname directive.
|
||||
func Linkname(name string, abi obj.ABI) *obj.LSym {
|
||||
return linksym("_", name, abi)
|
||||
}
|
||||
|
||||
// linksym is an internal helper function for implementing the above
|
||||
// exported APIs.
|
||||
func linksym(pkg, name string, abi obj.ABI) *obj.LSym {
|
||||
return Ctxt.LookupABIInit(name, abi, func(r *obj.LSym) { r.Pkg = pkg })
|
||||
}
|
||||
@@ -1,264 +0,0 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package base
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime/debug"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"cmd/internal/objabi"
|
||||
"cmd/internal/src"
|
||||
)
|
||||
|
||||
// An errorMsg is a queued error message, waiting to be printed.
|
||||
type errorMsg struct {
|
||||
pos src.XPos
|
||||
msg string
|
||||
}
|
||||
|
||||
// Pos is the current source position being processed,
|
||||
// printed by Errorf, ErrorfLang, Fatalf, and Warnf.
|
||||
var Pos src.XPos
|
||||
|
||||
var (
|
||||
errorMsgs []errorMsg
|
||||
numErrors int // number of entries in errorMsgs that are errors (as opposed to warnings)
|
||||
numSyntaxErrors int
|
||||
)
|
||||
|
||||
// Errors returns the number of errors reported.
|
||||
func Errors() int {
|
||||
return numErrors
|
||||
}
|
||||
|
||||
// SyntaxErrors returns the number of syntax errors reported
|
||||
func SyntaxErrors() int {
|
||||
return numSyntaxErrors
|
||||
}
|
||||
|
||||
// addErrorMsg adds a new errorMsg (which may be a warning) to errorMsgs.
|
||||
func addErrorMsg(pos src.XPos, format string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
// Only add the position if know the position.
|
||||
// See issue golang.org/issue/11361.
|
||||
if pos.IsKnown() {
|
||||
msg = fmt.Sprintf("%v: %s", FmtPos(pos), msg)
|
||||
}
|
||||
errorMsgs = append(errorMsgs, errorMsg{
|
||||
pos: pos,
|
||||
msg: msg + "\n",
|
||||
})
|
||||
}
|
||||
|
||||
// FmtPos formats pos as a file:line string.
|
||||
func FmtPos(pos src.XPos) string {
|
||||
if Ctxt == nil {
|
||||
return "???"
|
||||
}
|
||||
return Ctxt.OutermostPos(pos).Format(Flag.C == 0, Flag.L == 1)
|
||||
}
|
||||
|
||||
// byPos sorts errors by source position.
|
||||
type byPos []errorMsg
|
||||
|
||||
func (x byPos) Len() int { return len(x) }
|
||||
func (x byPos) Less(i, j int) bool { return x[i].pos.Before(x[j].pos) }
|
||||
func (x byPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
// FlushErrors sorts errors seen so far by line number, prints them to stdout,
|
||||
// and empties the errors array.
|
||||
func FlushErrors() {
|
||||
if Ctxt != nil && Ctxt.Bso != nil {
|
||||
Ctxt.Bso.Flush()
|
||||
}
|
||||
if len(errorMsgs) == 0 {
|
||||
return
|
||||
}
|
||||
sort.Stable(byPos(errorMsgs))
|
||||
for i, err := range errorMsgs {
|
||||
if i == 0 || err.msg != errorMsgs[i-1].msg {
|
||||
fmt.Printf("%s", err.msg)
|
||||
}
|
||||
}
|
||||
errorMsgs = errorMsgs[:0]
|
||||
}
|
||||
|
||||
// lasterror keeps track of the most recently issued error,
|
||||
// to avoid printing multiple error messages on the same line.
|
||||
var lasterror struct {
|
||||
syntax src.XPos // source position of last syntax error
|
||||
other src.XPos // source position of last non-syntax error
|
||||
msg string // error message of last non-syntax error
|
||||
}
|
||||
|
||||
// sameline reports whether two positions a, b are on the same line.
|
||||
func sameline(a, b src.XPos) bool {
|
||||
p := Ctxt.PosTable.Pos(a)
|
||||
q := Ctxt.PosTable.Pos(b)
|
||||
return p.Base() == q.Base() && p.Line() == q.Line()
|
||||
}
|
||||
|
||||
// Errorf reports a formatted error at the current line.
|
||||
func Errorf(format string, args ...interface{}) {
|
||||
ErrorfAt(Pos, format, args...)
|
||||
}
|
||||
|
||||
// ErrorfAt reports a formatted error message at pos.
|
||||
func ErrorfAt(pos src.XPos, format string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
|
||||
if strings.HasPrefix(msg, "syntax error") {
|
||||
numSyntaxErrors++
|
||||
// only one syntax error per line, no matter what error
|
||||
if sameline(lasterror.syntax, pos) {
|
||||
return
|
||||
}
|
||||
lasterror.syntax = pos
|
||||
} else {
|
||||
// only one of multiple equal non-syntax errors per line
|
||||
// (FlushErrors shows only one of them, so we filter them
|
||||
// here as best as we can (they may not appear in order)
|
||||
// so that we don't count them here and exit early, and
|
||||
// then have nothing to show for.)
|
||||
if sameline(lasterror.other, pos) && lasterror.msg == msg {
|
||||
return
|
||||
}
|
||||
lasterror.other = pos
|
||||
lasterror.msg = msg
|
||||
}
|
||||
|
||||
addErrorMsg(pos, "%s", msg)
|
||||
numErrors++
|
||||
|
||||
hcrash()
|
||||
if numErrors >= 10 && Flag.LowerE == 0 {
|
||||
FlushErrors()
|
||||
fmt.Printf("%v: too many errors\n", FmtPos(pos))
|
||||
ErrorExit()
|
||||
}
|
||||
}
|
||||
|
||||
// ErrorfVers reports that a language feature (format, args) requires a later version of Go.
|
||||
func ErrorfVers(lang string, format string, args ...interface{}) {
|
||||
Errorf("%s requires %s or later (-lang was set to %s; check go.mod)", fmt.Sprintf(format, args...), lang, Flag.Lang)
|
||||
}
|
||||
|
||||
// UpdateErrorDot is a clumsy hack that rewrites the last error,
|
||||
// if it was "LINE: undefined: NAME", to be "LINE: undefined: NAME in EXPR".
|
||||
// It is used to give better error messages for dot (selector) expressions.
|
||||
func UpdateErrorDot(line string, name, expr string) {
|
||||
if len(errorMsgs) == 0 {
|
||||
return
|
||||
}
|
||||
e := &errorMsgs[len(errorMsgs)-1]
|
||||
if strings.HasPrefix(e.msg, line) && e.msg == fmt.Sprintf("%v: undefined: %v\n", line, name) {
|
||||
e.msg = fmt.Sprintf("%v: undefined: %v in %v\n", line, name, expr)
|
||||
}
|
||||
}
|
||||
|
||||
// Warnf reports a formatted warning at the current line.
|
||||
// In general the Go compiler does NOT generate warnings,
|
||||
// so this should be used only when the user has opted in
|
||||
// to additional output by setting a particular flag.
|
||||
func Warn(format string, args ...interface{}) {
|
||||
WarnfAt(Pos, format, args...)
|
||||
}
|
||||
|
||||
// WarnfAt reports a formatted warning at pos.
|
||||
// In general the Go compiler does NOT generate warnings,
|
||||
// so this should be used only when the user has opted in
|
||||
// to additional output by setting a particular flag.
|
||||
func WarnfAt(pos src.XPos, format string, args ...interface{}) {
|
||||
addErrorMsg(pos, format, args...)
|
||||
if Flag.LowerM != 0 {
|
||||
FlushErrors()
|
||||
}
|
||||
}
|
||||
|
||||
// Fatalf reports a fatal error - an internal problem - at the current line and exits.
|
||||
// If other errors have already been printed, then Fatalf just quietly exits.
|
||||
// (The internal problem may have been caused by incomplete information
|
||||
// after the already-reported errors, so best to let users fix those and
|
||||
// try again without being bothered about a spurious internal error.)
|
||||
//
|
||||
// But if no errors have been printed, or if -d panic has been specified,
|
||||
// Fatalf prints the error as an "internal compiler error". In a released build,
|
||||
// it prints an error asking to file a bug report. In development builds, it
|
||||
// prints a stack trace.
|
||||
//
|
||||
// If -h has been specified, Fatalf panics to force the usual runtime info dump.
|
||||
func Fatalf(format string, args ...interface{}) {
|
||||
FatalfAt(Pos, format, args...)
|
||||
}
|
||||
|
||||
// FatalfAt reports a fatal error - an internal problem - at pos and exits.
|
||||
// If other errors have already been printed, then FatalfAt just quietly exits.
|
||||
// (The internal problem may have been caused by incomplete information
|
||||
// after the already-reported errors, so best to let users fix those and
|
||||
// try again without being bothered about a spurious internal error.)
|
||||
//
|
||||
// But if no errors have been printed, or if -d panic has been specified,
|
||||
// FatalfAt prints the error as an "internal compiler error". In a released build,
|
||||
// it prints an error asking to file a bug report. In development builds, it
|
||||
// prints a stack trace.
|
||||
//
|
||||
// If -h has been specified, FatalfAt panics to force the usual runtime info dump.
|
||||
func FatalfAt(pos src.XPos, format string, args ...interface{}) {
|
||||
FlushErrors()
|
||||
|
||||
if Debug.Panic != 0 || numErrors == 0 {
|
||||
fmt.Printf("%v: internal compiler error: ", FmtPos(pos))
|
||||
fmt.Printf(format, args...)
|
||||
fmt.Printf("\n")
|
||||
|
||||
// If this is a released compiler version, ask for a bug report.
|
||||
if strings.HasPrefix(objabi.Version, "go") {
|
||||
fmt.Printf("\n")
|
||||
fmt.Printf("Please file a bug report including a short program that triggers the error.\n")
|
||||
fmt.Printf("https://golang.org/issue/new\n")
|
||||
} else {
|
||||
// Not a release; dump a stack trace, too.
|
||||
fmt.Println()
|
||||
os.Stdout.Write(debug.Stack())
|
||||
fmt.Println()
|
||||
}
|
||||
}
|
||||
|
||||
hcrash()
|
||||
ErrorExit()
|
||||
}
|
||||
|
||||
// hcrash crashes the compiler when -h is set, to find out where a message is generated.
|
||||
func hcrash() {
|
||||
if Flag.LowerH != 0 {
|
||||
FlushErrors()
|
||||
if Flag.LowerO != "" {
|
||||
os.Remove(Flag.LowerO)
|
||||
}
|
||||
panic("-h")
|
||||
}
|
||||
}
|
||||
|
||||
// ErrorExit handles an error-status exit.
|
||||
// It flushes any pending errors, removes the output file, and exits.
|
||||
func ErrorExit() {
|
||||
FlushErrors()
|
||||
if Flag.LowerO != "" {
|
||||
os.Remove(Flag.LowerO)
|
||||
}
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
// ExitIfErrors calls ErrorExit if any errors have been reported.
|
||||
func ExitIfErrors() {
|
||||
if Errors() > 0 {
|
||||
ErrorExit()
|
||||
}
|
||||
}
|
||||
|
||||
var AutogeneratedPos src.XPos
|
||||
@@ -1,190 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bitvec
|
||||
|
||||
import (
|
||||
"math/bits"
|
||||
|
||||
"cmd/compile/internal/base"
|
||||
)
|
||||
|
||||
const (
|
||||
wordBits = 32
|
||||
wordMask = wordBits - 1
|
||||
wordShift = 5
|
||||
)
|
||||
|
||||
// A BitVec is a bit vector.
|
||||
type BitVec struct {
|
||||
N int32 // number of bits in vector
|
||||
B []uint32 // words holding bits
|
||||
}
|
||||
|
||||
func New(n int32) BitVec {
|
||||
nword := (n + wordBits - 1) / wordBits
|
||||
return BitVec{n, make([]uint32, nword)}
|
||||
}
|
||||
|
||||
type Bulk struct {
|
||||
words []uint32
|
||||
nbit int32
|
||||
nword int32
|
||||
}
|
||||
|
||||
func NewBulk(nbit int32, count int32) Bulk {
|
||||
nword := (nbit + wordBits - 1) / wordBits
|
||||
size := int64(nword) * int64(count)
|
||||
if int64(int32(size*4)) != size*4 {
|
||||
base.Fatalf("NewBulk too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size)
|
||||
}
|
||||
return Bulk{
|
||||
words: make([]uint32, size),
|
||||
nbit: nbit,
|
||||
nword: nword,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Bulk) Next() BitVec {
|
||||
out := BitVec{b.nbit, b.words[:b.nword]}
|
||||
b.words = b.words[b.nword:]
|
||||
return out
|
||||
}
|
||||
|
||||
func (bv1 BitVec) Eq(bv2 BitVec) bool {
|
||||
if bv1.N != bv2.N {
|
||||
base.Fatalf("bvequal: lengths %d and %d are not equal", bv1.N, bv2.N)
|
||||
}
|
||||
for i, x := range bv1.B {
|
||||
if x != bv2.B[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (dst BitVec) Copy(src BitVec) {
|
||||
copy(dst.B, src.B)
|
||||
}
|
||||
|
||||
func (bv BitVec) Get(i int32) bool {
|
||||
if i < 0 || i >= bv.N {
|
||||
base.Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.N)
|
||||
}
|
||||
mask := uint32(1 << uint(i%wordBits))
|
||||
return bv.B[i>>wordShift]&mask != 0
|
||||
}
|
||||
|
||||
func (bv BitVec) Set(i int32) {
|
||||
if i < 0 || i >= bv.N {
|
||||
base.Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.N)
|
||||
}
|
||||
mask := uint32(1 << uint(i%wordBits))
|
||||
bv.B[i/wordBits] |= mask
|
||||
}
|
||||
|
||||
func (bv BitVec) Unset(i int32) {
|
||||
if i < 0 || i >= bv.N {
|
||||
base.Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.N)
|
||||
}
|
||||
mask := uint32(1 << uint(i%wordBits))
|
||||
bv.B[i/wordBits] &^= mask
|
||||
}
|
||||
|
||||
// bvnext returns the smallest index >= i for which bvget(bv, i) == 1.
|
||||
// If there is no such index, bvnext returns -1.
|
||||
func (bv BitVec) Next(i int32) int32 {
|
||||
if i >= bv.N {
|
||||
return -1
|
||||
}
|
||||
|
||||
// Jump i ahead to next word with bits.
|
||||
if bv.B[i>>wordShift]>>uint(i&wordMask) == 0 {
|
||||
i &^= wordMask
|
||||
i += wordBits
|
||||
for i < bv.N && bv.B[i>>wordShift] == 0 {
|
||||
i += wordBits
|
||||
}
|
||||
}
|
||||
|
||||
if i >= bv.N {
|
||||
return -1
|
||||
}
|
||||
|
||||
// Find 1 bit.
|
||||
w := bv.B[i>>wordShift] >> uint(i&wordMask)
|
||||
i += int32(bits.TrailingZeros32(w))
|
||||
|
||||
return i
|
||||
}
|
||||
|
||||
func (bv BitVec) IsEmpty() bool {
|
||||
for _, x := range bv.B {
|
||||
if x != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (bv BitVec) Not() {
|
||||
for i, x := range bv.B {
|
||||
bv.B[i] = ^x
|
||||
}
|
||||
}
|
||||
|
||||
// union
|
||||
func (dst BitVec) Or(src1, src2 BitVec) {
|
||||
if len(src1.B) == 0 {
|
||||
return
|
||||
}
|
||||
_, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
|
||||
|
||||
for i, x := range src1.B {
|
||||
dst.B[i] = x | src2.B[i]
|
||||
}
|
||||
}
|
||||
|
||||
// intersection
|
||||
func (dst BitVec) And(src1, src2 BitVec) {
|
||||
if len(src1.B) == 0 {
|
||||
return
|
||||
}
|
||||
_, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
|
||||
|
||||
for i, x := range src1.B {
|
||||
dst.B[i] = x & src2.B[i]
|
||||
}
|
||||
}
|
||||
|
||||
// difference
|
||||
func (dst BitVec) AndNot(src1, src2 BitVec) {
|
||||
if len(src1.B) == 0 {
|
||||
return
|
||||
}
|
||||
_, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
|
||||
|
||||
for i, x := range src1.B {
|
||||
dst.B[i] = x &^ src2.B[i]
|
||||
}
|
||||
}
|
||||
|
||||
func (bv BitVec) String() string {
|
||||
s := make([]byte, 2+bv.N)
|
||||
copy(s, "#*")
|
||||
for i := int32(0); i < bv.N; i++ {
|
||||
ch := byte('0')
|
||||
if bv.Get(i) {
|
||||
ch = '1'
|
||||
}
|
||||
s[2+i] = ch
|
||||
}
|
||||
return string(s)
|
||||
}
|
||||
|
||||
func (bv BitVec) Clear() {
|
||||
for i := range bv.B {
|
||||
bv.B[i] = 0
|
||||
}
|
||||
}
|
||||
@@ -1,152 +0,0 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package deadcode
|
||||
|
||||
import (
|
||||
"go/constant"
|
||||
|
||||
"cmd/compile/internal/base"
|
||||
"cmd/compile/internal/ir"
|
||||
)
|
||||
|
||||
func Func(fn *ir.Func) {
|
||||
stmts(&fn.Body)
|
||||
|
||||
if len(fn.Body) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
for _, n := range fn.Body {
|
||||
if len(n.Init()) > 0 {
|
||||
return
|
||||
}
|
||||
switch n.Op() {
|
||||
case ir.OIF:
|
||||
n := n.(*ir.IfStmt)
|
||||
if !ir.IsConst(n.Cond, constant.Bool) || len(n.Body) > 0 || len(n.Else) > 0 {
|
||||
return
|
||||
}
|
||||
case ir.OFOR:
|
||||
n := n.(*ir.ForStmt)
|
||||
if !ir.IsConst(n.Cond, constant.Bool) || ir.BoolVal(n.Cond) {
|
||||
return
|
||||
}
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
fn.Body = []ir.Node{ir.NewBlockStmt(base.Pos, nil)}
|
||||
}
|
||||
|
||||
func stmts(nn *ir.Nodes) {
|
||||
var lastLabel = -1
|
||||
for i, n := range *nn {
|
||||
if n != nil && n.Op() == ir.OLABEL {
|
||||
lastLabel = i
|
||||
}
|
||||
}
|
||||
for i, n := range *nn {
|
||||
// Cut is set to true when all nodes after i'th position
|
||||
// should be removed.
|
||||
// In other words, it marks whole slice "tail" as dead.
|
||||
cut := false
|
||||
if n == nil {
|
||||
continue
|
||||
}
|
||||
if n.Op() == ir.OIF {
|
||||
n := n.(*ir.IfStmt)
|
||||
n.Cond = expr(n.Cond)
|
||||
if ir.IsConst(n.Cond, constant.Bool) {
|
||||
var body ir.Nodes
|
||||
if ir.BoolVal(n.Cond) {
|
||||
n.Else = ir.Nodes{}
|
||||
body = n.Body
|
||||
} else {
|
||||
n.Body = ir.Nodes{}
|
||||
body = n.Else
|
||||
}
|
||||
// If "then" or "else" branch ends with panic or return statement,
|
||||
// it is safe to remove all statements after this node.
|
||||
// isterminating is not used to avoid goto-related complications.
|
||||
// We must be careful not to deadcode-remove labels, as they
|
||||
// might be the target of a goto. See issue 28616.
|
||||
if body := body; len(body) != 0 {
|
||||
switch body[(len(body) - 1)].Op() {
|
||||
case ir.ORETURN, ir.OTAILCALL, ir.OPANIC:
|
||||
if i > lastLabel {
|
||||
cut = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(n.Init()) != 0 {
|
||||
stmts(n.(ir.InitNode).PtrInit())
|
||||
}
|
||||
switch n.Op() {
|
||||
case ir.OBLOCK:
|
||||
n := n.(*ir.BlockStmt)
|
||||
stmts(&n.List)
|
||||
case ir.OFOR:
|
||||
n := n.(*ir.ForStmt)
|
||||
stmts(&n.Body)
|
||||
case ir.OIF:
|
||||
n := n.(*ir.IfStmt)
|
||||
stmts(&n.Body)
|
||||
stmts(&n.Else)
|
||||
case ir.ORANGE:
|
||||
n := n.(*ir.RangeStmt)
|
||||
stmts(&n.Body)
|
||||
case ir.OSELECT:
|
||||
n := n.(*ir.SelectStmt)
|
||||
for _, cas := range n.Cases {
|
||||
stmts(&cas.Body)
|
||||
}
|
||||
case ir.OSWITCH:
|
||||
n := n.(*ir.SwitchStmt)
|
||||
for _, cas := range n.Cases {
|
||||
stmts(&cas.Body)
|
||||
}
|
||||
}
|
||||
|
||||
if cut {
|
||||
*nn = (*nn)[:i+1]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func expr(n ir.Node) ir.Node {
|
||||
// Perform dead-code elimination on short-circuited boolean
|
||||
// expressions involving constants with the intent of
|
||||
// producing a constant 'if' condition.
|
||||
switch n.Op() {
|
||||
case ir.OANDAND:
|
||||
n := n.(*ir.LogicalExpr)
|
||||
n.X = expr(n.X)
|
||||
n.Y = expr(n.Y)
|
||||
if ir.IsConst(n.X, constant.Bool) {
|
||||
if ir.BoolVal(n.X) {
|
||||
return n.Y // true && x => x
|
||||
} else {
|
||||
return n.X // false && x => false
|
||||
}
|
||||
}
|
||||
case ir.OOROR:
|
||||
n := n.(*ir.LogicalExpr)
|
||||
n.X = expr(n.X)
|
||||
n.Y = expr(n.Y)
|
||||
if ir.IsConst(n.X, constant.Bool) {
|
||||
if ir.BoolVal(n.X) {
|
||||
return n.X // true || x => true
|
||||
} else {
|
||||
return n.Y // false || x => x
|
||||
}
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package devirtualize implements a simple "devirtualization"
|
||||
// optimization pass, which replaces interface method calls with
|
||||
// direct concrete-type method calls where possible.
|
||||
package devirtualize
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/base"
|
||||
"cmd/compile/internal/ir"
|
||||
"cmd/compile/internal/typecheck"
|
||||
"cmd/compile/internal/types"
|
||||
)
|
||||
|
||||
// Func devirtualizes calls within fn where possible.
|
||||
func Func(fn *ir.Func) {
|
||||
ir.CurFunc = fn
|
||||
ir.VisitList(fn.Body, func(n ir.Node) {
|
||||
if call, ok := n.(*ir.CallExpr); ok {
|
||||
Call(call)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Call devirtualizes the given call if possible.
|
||||
func Call(call *ir.CallExpr) {
|
||||
if call.Op() != ir.OCALLINTER {
|
||||
return
|
||||
}
|
||||
sel := call.X.(*ir.SelectorExpr)
|
||||
r := ir.StaticValue(sel.X)
|
||||
if r.Op() != ir.OCONVIFACE {
|
||||
return
|
||||
}
|
||||
recv := r.(*ir.ConvExpr)
|
||||
|
||||
typ := recv.X.Type()
|
||||
if typ.IsInterface() {
|
||||
return
|
||||
}
|
||||
|
||||
dt := ir.NewTypeAssertExpr(sel.Pos(), sel.X, nil)
|
||||
dt.SetType(typ)
|
||||
x := typecheck.Callee(ir.NewSelectorExpr(sel.Pos(), ir.OXDOT, dt, sel.Sel))
|
||||
switch x.Op() {
|
||||
case ir.ODOTMETH:
|
||||
x := x.(*ir.SelectorExpr)
|
||||
if base.Flag.LowerM != 0 {
|
||||
base.WarnfAt(call.Pos(), "devirtualizing %v to %v", sel, typ)
|
||||
}
|
||||
call.SetOp(ir.OCALLMETH)
|
||||
call.X = x
|
||||
case ir.ODOTINTER:
|
||||
// Promoted method from embedded interface-typed field (#42279).
|
||||
x := x.(*ir.SelectorExpr)
|
||||
if base.Flag.LowerM != 0 {
|
||||
base.WarnfAt(call.Pos(), "partially devirtualizing %v to %v", sel, typ)
|
||||
}
|
||||
call.SetOp(ir.OCALLINTER)
|
||||
call.X = x
|
||||
default:
|
||||
// TODO(mdempsky): Turn back into Fatalf after more testing.
|
||||
if base.Flag.LowerM != 0 {
|
||||
base.WarnfAt(call.Pos(), "failed to devirtualize %v (%v)", x, x.Op())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Duplicated logic from typecheck for function call return
|
||||
// value types.
|
||||
//
|
||||
// Receiver parameter size may have changed; need to update
|
||||
// call.Type to get correct stack offsets for result
|
||||
// parameters.
|
||||
types.CheckSize(x.Type())
|
||||
switch ft := x.Type(); ft.NumResults() {
|
||||
case 0:
|
||||
case 1:
|
||||
call.SetType(ft.Results().Field(0).Type)
|
||||
default:
|
||||
call.SetType(ft.Results())
|
||||
}
|
||||
}
|
||||
@@ -1,458 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package dwarfgen
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"cmd/compile/internal/base"
|
||||
"cmd/compile/internal/ir"
|
||||
"cmd/compile/internal/reflectdata"
|
||||
"cmd/compile/internal/ssa"
|
||||
"cmd/compile/internal/ssagen"
|
||||
"cmd/compile/internal/types"
|
||||
"cmd/internal/dwarf"
|
||||
"cmd/internal/obj"
|
||||
"cmd/internal/objabi"
|
||||
"cmd/internal/src"
|
||||
)
|
||||
|
||||
func Info(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) {
|
||||
fn := curfn.(*ir.Func)
|
||||
|
||||
if fn.Nname != nil {
|
||||
expect := fn.Linksym()
|
||||
if fnsym.ABI() == obj.ABI0 {
|
||||
expect = fn.LinksymABI(obj.ABI0)
|
||||
}
|
||||
if fnsym != expect {
|
||||
base.Fatalf("unexpected fnsym: %v != %v", fnsym, expect)
|
||||
}
|
||||
}
|
||||
|
||||
// Back when there were two different *Funcs for a function, this code
|
||||
// was not consistent about whether a particular *Node being processed
|
||||
// was an ODCLFUNC or ONAME node. Partly this is because inlined function
|
||||
// bodies have no ODCLFUNC node, which was it's own inconsistency.
|
||||
// In any event, the handling of the two different nodes for DWARF purposes
|
||||
// was subtly different, likely in unintended ways. CL 272253 merged the
|
||||
// two nodes' Func fields, so that code sees the same *Func whether it is
|
||||
// holding the ODCLFUNC or the ONAME. This resulted in changes in the
|
||||
// DWARF output. To preserve the existing DWARF output and leave an
|
||||
// intentional change for a future CL, this code does the following when
|
||||
// fn.Op == ONAME:
|
||||
//
|
||||
// 1. Disallow use of createComplexVars in createDwarfVars.
|
||||
// It was not possible to reach that code for an ONAME before,
|
||||
// because the DebugInfo was set only on the ODCLFUNC Func.
|
||||
// Calling into it in the ONAME case causes an index out of bounds panic.
|
||||
//
|
||||
// 2. Do not populate apdecls. fn.Func.Dcl was in the ODCLFUNC Func,
|
||||
// not the ONAME Func. Populating apdecls for the ONAME case results
|
||||
// in selected being populated after createSimpleVars is called in
|
||||
// createDwarfVars, and then that causes the loop to skip all the entries
|
||||
// in dcl, meaning that the RecordAutoType calls don't happen.
|
||||
//
|
||||
// These two adjustments keep toolstash -cmp working for now.
|
||||
// Deciding the right answer is, as they say, future work.
|
||||
//
|
||||
// We can tell the difference between the old ODCLFUNC and ONAME
|
||||
// cases by looking at the infosym.Name. If it's empty, DebugInfo is
|
||||
// being called from (*obj.Link).populateDWARF, which used to use
|
||||
// the ODCLFUNC. If it's non-empty (the name will end in $abstract),
|
||||
// DebugInfo is being called from (*obj.Link).DwarfAbstractFunc,
|
||||
// which used to use the ONAME form.
|
||||
isODCLFUNC := infosym.Name == ""
|
||||
|
||||
var apdecls []*ir.Name
|
||||
// Populate decls for fn.
|
||||
if isODCLFUNC {
|
||||
for _, n := range fn.Dcl {
|
||||
if n.Op() != ir.ONAME { // might be OTYPE or OLITERAL
|
||||
continue
|
||||
}
|
||||
switch n.Class {
|
||||
case ir.PAUTO:
|
||||
if !n.Used() {
|
||||
// Text == nil -> generating abstract function
|
||||
if fnsym.Func().Text != nil {
|
||||
base.Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)")
|
||||
}
|
||||
continue
|
||||
}
|
||||
case ir.PPARAM, ir.PPARAMOUT:
|
||||
default:
|
||||
continue
|
||||
}
|
||||
apdecls = append(apdecls, n)
|
||||
fnsym.Func().RecordAutoType(reflectdata.TypeLinksym(n.Type()))
|
||||
}
|
||||
}
|
||||
|
||||
decls, dwarfVars := createDwarfVars(fnsym, isODCLFUNC, fn, apdecls)
|
||||
|
||||
// For each type referenced by the functions auto vars but not
|
||||
// already referenced by a dwarf var, attach an R_USETYPE relocation to
|
||||
// the function symbol to insure that the type included in DWARF
|
||||
// processing during linking.
|
||||
typesyms := []*obj.LSym{}
|
||||
for t, _ := range fnsym.Func().Autot {
|
||||
typesyms = append(typesyms, t)
|
||||
}
|
||||
sort.Sort(obj.BySymName(typesyms))
|
||||
for _, sym := range typesyms {
|
||||
r := obj.Addrel(infosym)
|
||||
r.Sym = sym
|
||||
r.Type = objabi.R_USETYPE
|
||||
}
|
||||
fnsym.Func().Autot = nil
|
||||
|
||||
var varScopes []ir.ScopeID
|
||||
for _, decl := range decls {
|
||||
pos := declPos(decl)
|
||||
varScopes = append(varScopes, findScope(fn.Marks, pos))
|
||||
}
|
||||
|
||||
scopes := assembleScopes(fnsym, fn, dwarfVars, varScopes)
|
||||
var inlcalls dwarf.InlCalls
|
||||
if base.Flag.GenDwarfInl > 0 {
|
||||
inlcalls = assembleInlines(fnsym, dwarfVars)
|
||||
}
|
||||
return scopes, inlcalls
|
||||
}
|
||||
|
||||
func declPos(decl *ir.Name) src.XPos {
|
||||
return decl.Canonical().Pos()
|
||||
}
|
||||
|
||||
// createDwarfVars process fn, returning a list of DWARF variables and the
|
||||
// Nodes they represent.
|
||||
func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var) {
|
||||
// Collect a raw list of DWARF vars.
|
||||
var vars []*dwarf.Var
|
||||
var decls []*ir.Name
|
||||
var selected ir.NameSet
|
||||
if base.Ctxt.Flag_locationlists && base.Ctxt.Flag_optimize && fn.DebugInfo != nil && complexOK {
|
||||
decls, vars, selected = createComplexVars(fnsym, fn)
|
||||
} else {
|
||||
decls, vars, selected = createSimpleVars(fnsym, apDecls)
|
||||
}
|
||||
|
||||
dcl := apDecls
|
||||
if fnsym.WasInlined() {
|
||||
dcl = preInliningDcls(fnsym)
|
||||
}
|
||||
|
||||
// If optimization is enabled, the list above will typically be
|
||||
// missing some of the original pre-optimization variables in the
|
||||
// function (they may have been promoted to registers, folded into
|
||||
// constants, dead-coded away, etc). Input arguments not eligible
|
||||
// for SSA optimization are also missing. Here we add back in entries
|
||||
// for selected missing vars. Note that the recipe below creates a
|
||||
// conservative location. The idea here is that we want to
|
||||
// communicate to the user that "yes, there is a variable named X
|
||||
// in this function, but no, I don't have enough information to
|
||||
// reliably report its contents."
|
||||
// For non-SSA-able arguments, however, the correct information
|
||||
// is known -- they have a single home on the stack.
|
||||
for _, n := range dcl {
|
||||
if selected.Has(n) {
|
||||
continue
|
||||
}
|
||||
c := n.Sym().Name[0]
|
||||
if c == '.' || n.Type().IsUntyped() {
|
||||
continue
|
||||
}
|
||||
if n.Class == ir.PPARAM && !ssagen.TypeOK(n.Type()) {
|
||||
// SSA-able args get location lists, and may move in and
|
||||
// out of registers, so those are handled elsewhere.
|
||||
// Autos and named output params seem to get handled
|
||||
// with VARDEF, which creates location lists.
|
||||
// Args not of SSA-able type are treated here; they
|
||||
// are homed on the stack in a single place for the
|
||||
// entire call.
|
||||
vars = append(vars, createSimpleVar(fnsym, n))
|
||||
decls = append(decls, n)
|
||||
continue
|
||||
}
|
||||
typename := dwarf.InfoPrefix + types.TypeSymName(n.Type())
|
||||
decls = append(decls, n)
|
||||
abbrev := dwarf.DW_ABRV_AUTO_LOCLIST
|
||||
isReturnValue := (n.Class == ir.PPARAMOUT)
|
||||
if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT {
|
||||
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
|
||||
}
|
||||
if n.Esc() == ir.EscHeap {
|
||||
// The variable in question has been promoted to the heap.
|
||||
// Its address is in n.Heapaddr.
|
||||
// TODO(thanm): generate a better location expression
|
||||
}
|
||||
inlIndex := 0
|
||||
if base.Flag.GenDwarfInl > 1 {
|
||||
if n.InlFormal() || n.InlLocal() {
|
||||
inlIndex = posInlIndex(n.Pos()) + 1
|
||||
if n.InlFormal() {
|
||||
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
|
||||
}
|
||||
}
|
||||
}
|
||||
declpos := base.Ctxt.InnermostPos(n.Pos())
|
||||
vars = append(vars, &dwarf.Var{
|
||||
Name: n.Sym().Name,
|
||||
IsReturnValue: isReturnValue,
|
||||
Abbrev: abbrev,
|
||||
StackOffset: int32(n.FrameOffset()),
|
||||
Type: base.Ctxt.Lookup(typename),
|
||||
DeclFile: declpos.RelFilename(),
|
||||
DeclLine: declpos.RelLine(),
|
||||
DeclCol: declpos.Col(),
|
||||
InlIndex: int32(inlIndex),
|
||||
ChildIndex: -1,
|
||||
})
|
||||
// Record go type of to insure that it gets emitted by the linker.
|
||||
fnsym.Func().RecordAutoType(reflectdata.TypeLinksym(n.Type()))
|
||||
}
|
||||
|
||||
return decls, vars
|
||||
}
|
||||
|
||||
// Given a function that was inlined at some point during the
|
||||
// compilation, return a sorted list of nodes corresponding to the
|
||||
// autos/locals in that function prior to inlining. If this is a
|
||||
// function that is not local to the package being compiled, then the
|
||||
// names of the variables may have been "versioned" to avoid conflicts
|
||||
// with local vars; disregard this versioning when sorting.
|
||||
func preInliningDcls(fnsym *obj.LSym) []*ir.Name {
|
||||
fn := base.Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*ir.Func)
|
||||
var rdcl []*ir.Name
|
||||
for _, n := range fn.Inl.Dcl {
|
||||
c := n.Sym().Name[0]
|
||||
// Avoid reporting "_" parameters, since if there are more than
|
||||
// one, it can result in a collision later on, as in #23179.
|
||||
if unversion(n.Sym().Name) == "_" || c == '.' || n.Type().IsUntyped() {
|
||||
continue
|
||||
}
|
||||
rdcl = append(rdcl, n)
|
||||
}
|
||||
return rdcl
|
||||
}
|
||||
|
||||
// createSimpleVars creates a DWARF entry for every variable declared in the
|
||||
// function, claiming that they are permanently on the stack.
|
||||
func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var, ir.NameSet) {
|
||||
var vars []*dwarf.Var
|
||||
var decls []*ir.Name
|
||||
var selected ir.NameSet
|
||||
for _, n := range apDecls {
|
||||
if ir.IsAutoTmp(n) {
|
||||
continue
|
||||
}
|
||||
|
||||
decls = append(decls, n)
|
||||
vars = append(vars, createSimpleVar(fnsym, n))
|
||||
selected.Add(n)
|
||||
}
|
||||
return decls, vars, selected
|
||||
}
|
||||
|
||||
func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var {
|
||||
var abbrev int
|
||||
var offs int64
|
||||
|
||||
switch n.Class {
|
||||
case ir.PAUTO:
|
||||
offs = n.FrameOffset()
|
||||
abbrev = dwarf.DW_ABRV_AUTO
|
||||
if base.Ctxt.FixedFrameSize() == 0 {
|
||||
offs -= int64(types.PtrSize)
|
||||
}
|
||||
if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
|
||||
// There is a word space for FP on ARM64 even if the frame pointer is disabled
|
||||
offs -= int64(types.PtrSize)
|
||||
}
|
||||
|
||||
case ir.PPARAM, ir.PPARAMOUT:
|
||||
abbrev = dwarf.DW_ABRV_PARAM
|
||||
offs = n.FrameOffset() + base.Ctxt.FixedFrameSize()
|
||||
default:
|
||||
base.Fatalf("createSimpleVar unexpected class %v for node %v", n.Class, n)
|
||||
}
|
||||
|
||||
typename := dwarf.InfoPrefix + types.TypeSymName(n.Type())
|
||||
delete(fnsym.Func().Autot, reflectdata.TypeLinksym(n.Type()))
|
||||
inlIndex := 0
|
||||
if base.Flag.GenDwarfInl > 1 {
|
||||
if n.InlFormal() || n.InlLocal() {
|
||||
inlIndex = posInlIndex(n.Pos()) + 1
|
||||
if n.InlFormal() {
|
||||
abbrev = dwarf.DW_ABRV_PARAM
|
||||
}
|
||||
}
|
||||
}
|
||||
declpos := base.Ctxt.InnermostPos(declPos(n))
|
||||
return &dwarf.Var{
|
||||
Name: n.Sym().Name,
|
||||
IsReturnValue: n.Class == ir.PPARAMOUT,
|
||||
IsInlFormal: n.InlFormal(),
|
||||
Abbrev: abbrev,
|
||||
StackOffset: int32(offs),
|
||||
Type: base.Ctxt.Lookup(typename),
|
||||
DeclFile: declpos.RelFilename(),
|
||||
DeclLine: declpos.RelLine(),
|
||||
DeclCol: declpos.Col(),
|
||||
InlIndex: int32(inlIndex),
|
||||
ChildIndex: -1,
|
||||
}
|
||||
}
|
||||
|
||||
// createComplexVars creates recomposed DWARF vars with location lists,
|
||||
// suitable for describing optimized code.
|
||||
func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Name, []*dwarf.Var, ir.NameSet) {
|
||||
debugInfo := fn.DebugInfo.(*ssa.FuncDebug)
|
||||
|
||||
// Produce a DWARF variable entry for each user variable.
|
||||
var decls []*ir.Name
|
||||
var vars []*dwarf.Var
|
||||
var ssaVars ir.NameSet
|
||||
|
||||
for varID, dvar := range debugInfo.Vars {
|
||||
n := dvar
|
||||
ssaVars.Add(n)
|
||||
for _, slot := range debugInfo.VarSlots[varID] {
|
||||
ssaVars.Add(debugInfo.Slots[slot].N)
|
||||
}
|
||||
|
||||
if dvar := createComplexVar(fnsym, fn, ssa.VarID(varID)); dvar != nil {
|
||||
decls = append(decls, n)
|
||||
vars = append(vars, dvar)
|
||||
}
|
||||
}
|
||||
|
||||
return decls, vars, ssaVars
|
||||
}
|
||||
|
||||
// createComplexVar builds a single DWARF variable entry and location list.
|
||||
func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var {
|
||||
debug := fn.DebugInfo.(*ssa.FuncDebug)
|
||||
n := debug.Vars[varID]
|
||||
|
||||
var abbrev int
|
||||
switch n.Class {
|
||||
case ir.PAUTO:
|
||||
abbrev = dwarf.DW_ABRV_AUTO_LOCLIST
|
||||
case ir.PPARAM, ir.PPARAMOUT:
|
||||
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
gotype := reflectdata.TypeLinksym(n.Type())
|
||||
delete(fnsym.Func().Autot, gotype)
|
||||
typename := dwarf.InfoPrefix + gotype.Name[len("type."):]
|
||||
inlIndex := 0
|
||||
if base.Flag.GenDwarfInl > 1 {
|
||||
if n.InlFormal() || n.InlLocal() {
|
||||
inlIndex = posInlIndex(n.Pos()) + 1
|
||||
if n.InlFormal() {
|
||||
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
|
||||
}
|
||||
}
|
||||
}
|
||||
declpos := base.Ctxt.InnermostPos(n.Pos())
|
||||
dvar := &dwarf.Var{
|
||||
Name: n.Sym().Name,
|
||||
IsReturnValue: n.Class == ir.PPARAMOUT,
|
||||
IsInlFormal: n.InlFormal(),
|
||||
Abbrev: abbrev,
|
||||
Type: base.Ctxt.Lookup(typename),
|
||||
// The stack offset is used as a sorting key, so for decomposed
|
||||
// variables just give it the first one. It's not used otherwise.
|
||||
// This won't work well if the first slot hasn't been assigned a stack
|
||||
// location, but it's not obvious how to do better.
|
||||
StackOffset: ssagen.StackOffset(debug.Slots[debug.VarSlots[varID][0]]),
|
||||
DeclFile: declpos.RelFilename(),
|
||||
DeclLine: declpos.RelLine(),
|
||||
DeclCol: declpos.Col(),
|
||||
InlIndex: int32(inlIndex),
|
||||
ChildIndex: -1,
|
||||
}
|
||||
list := debug.LocationLists[varID]
|
||||
if len(list) != 0 {
|
||||
dvar.PutLocationList = func(listSym, startPC dwarf.Sym) {
|
||||
debug.PutLocationList(list, base.Ctxt, listSym.(*obj.LSym), startPC.(*obj.LSym))
|
||||
}
|
||||
}
|
||||
return dvar
|
||||
}
|
||||
|
||||
// RecordFlags records the specified command-line flags to be placed
|
||||
// in the DWARF info.
|
||||
func RecordFlags(flags ...string) {
|
||||
if base.Ctxt.Pkgpath == "" {
|
||||
// We can't record the flags if we don't know what the
|
||||
// package name is.
|
||||
return
|
||||
}
|
||||
|
||||
type BoolFlag interface {
|
||||
IsBoolFlag() bool
|
||||
}
|
||||
type CountFlag interface {
|
||||
IsCountFlag() bool
|
||||
}
|
||||
var cmd bytes.Buffer
|
||||
for _, name := range flags {
|
||||
f := flag.Lookup(name)
|
||||
if f == nil {
|
||||
continue
|
||||
}
|
||||
getter := f.Value.(flag.Getter)
|
||||
if getter.String() == f.DefValue {
|
||||
// Flag has default value, so omit it.
|
||||
continue
|
||||
}
|
||||
if bf, ok := f.Value.(BoolFlag); ok && bf.IsBoolFlag() {
|
||||
val, ok := getter.Get().(bool)
|
||||
if ok && val {
|
||||
fmt.Fprintf(&cmd, " -%s", f.Name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if cf, ok := f.Value.(CountFlag); ok && cf.IsCountFlag() {
|
||||
val, ok := getter.Get().(int)
|
||||
if ok && val == 1 {
|
||||
fmt.Fprintf(&cmd, " -%s", f.Name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(&cmd, " -%s=%v", f.Name, getter.Get())
|
||||
}
|
||||
|
||||
if cmd.Len() == 0 {
|
||||
return
|
||||
}
|
||||
s := base.Ctxt.Lookup(dwarf.CUInfoPrefix + "producer." + base.Ctxt.Pkgpath)
|
||||
s.Type = objabi.SDWARFCUINFO
|
||||
// Sometimes (for example when building tests) we can link
|
||||
// together two package main archives. So allow dups.
|
||||
s.Set(obj.AttrDuplicateOK, true)
|
||||
base.Ctxt.Data = append(base.Ctxt.Data, s)
|
||||
s.P = cmd.Bytes()[1:]
|
||||
}
|
||||
|
||||
// RecordPackageName records the name of the package being
|
||||
// compiled, so that the linker can save it in the compile unit's DIE.
|
||||
func RecordPackageName() {
|
||||
s := base.Ctxt.Lookup(dwarf.CUInfoPrefix + "packagename." + base.Ctxt.Pkgpath)
|
||||
s.Type = objabi.SDWARFCUINFO
|
||||
// Sometimes (for example when building tests) we can link
|
||||
// together two package main archives. So allow dups.
|
||||
s.Set(obj.AttrDuplicateOK, true)
|
||||
base.Ctxt.Data = append(base.Ctxt.Data, s)
|
||||
s.P = []byte(types.LocalPkg.Name)
|
||||
}
|
||||
@@ -1,94 +0,0 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package dwarfgen
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/base"
|
||||
"cmd/compile/internal/ir"
|
||||
"cmd/internal/src"
|
||||
)
|
||||
|
||||
// A ScopeMarker tracks scope nesting and boundaries for later use
|
||||
// during DWARF generation.
|
||||
type ScopeMarker struct {
|
||||
parents []ir.ScopeID
|
||||
marks []ir.Mark
|
||||
}
|
||||
|
||||
// checkPos validates the given position and returns the current scope.
|
||||
func (m *ScopeMarker) checkPos(pos src.XPos) ir.ScopeID {
|
||||
if !pos.IsKnown() {
|
||||
base.Fatalf("unknown scope position")
|
||||
}
|
||||
|
||||
if len(m.marks) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
last := &m.marks[len(m.marks)-1]
|
||||
if xposBefore(pos, last.Pos) {
|
||||
base.FatalfAt(pos, "non-monotonic scope positions\n\t%v: previous scope position", base.FmtPos(last.Pos))
|
||||
}
|
||||
return last.Scope
|
||||
}
|
||||
|
||||
// Push records a transition to a new child scope of the current scope.
|
||||
func (m *ScopeMarker) Push(pos src.XPos) {
|
||||
current := m.checkPos(pos)
|
||||
|
||||
m.parents = append(m.parents, current)
|
||||
child := ir.ScopeID(len(m.parents))
|
||||
|
||||
m.marks = append(m.marks, ir.Mark{Pos: pos, Scope: child})
|
||||
}
|
||||
|
||||
// Pop records a transition back to the current scope's parent.
|
||||
func (m *ScopeMarker) Pop(pos src.XPos) {
|
||||
current := m.checkPos(pos)
|
||||
|
||||
parent := m.parents[current-1]
|
||||
|
||||
m.marks = append(m.marks, ir.Mark{Pos: pos, Scope: parent})
|
||||
}
|
||||
|
||||
// Unpush removes the current scope, which must be empty.
|
||||
func (m *ScopeMarker) Unpush() {
|
||||
i := len(m.marks) - 1
|
||||
current := m.marks[i].Scope
|
||||
|
||||
if current != ir.ScopeID(len(m.parents)) {
|
||||
base.FatalfAt(m.marks[i].Pos, "current scope is not empty")
|
||||
}
|
||||
|
||||
m.parents = m.parents[:current-1]
|
||||
m.marks = m.marks[:i]
|
||||
}
|
||||
|
||||
// WriteTo writes the recorded scope marks to the given function,
|
||||
// and resets the marker for reuse.
|
||||
func (m *ScopeMarker) WriteTo(fn *ir.Func) {
|
||||
m.compactMarks()
|
||||
|
||||
fn.Parents = make([]ir.ScopeID, len(m.parents))
|
||||
copy(fn.Parents, m.parents)
|
||||
m.parents = m.parents[:0]
|
||||
|
||||
fn.Marks = make([]ir.Mark, len(m.marks))
|
||||
copy(fn.Marks, m.marks)
|
||||
m.marks = m.marks[:0]
|
||||
}
|
||||
|
||||
func (m *ScopeMarker) compactMarks() {
|
||||
n := 0
|
||||
for _, next := range m.marks {
|
||||
if n > 0 && next.Pos == m.marks[n-1].Pos {
|
||||
m.marks[n-1].Scope = next.Scope
|
||||
continue
|
||||
}
|
||||
m.marks[n] = next
|
||||
n++
|
||||
}
|
||||
m.marks = m.marks[:n]
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
959
src/cmd/compile/internal/gc/alg.go
Normal file
959
src/cmd/compile/internal/gc/alg.go
Normal file
@@ -0,0 +1,959 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gc
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/types"
|
||||
"cmd/internal/obj"
|
||||
"fmt"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// AlgKind describes the kind of algorithms used for comparing and
|
||||
// hashing a Type.
|
||||
type AlgKind int
|
||||
|
||||
//go:generate stringer -type AlgKind -trimprefix A
|
||||
|
||||
const (
|
||||
// These values are known by runtime.
|
||||
ANOEQ AlgKind = iota
|
||||
AMEM0
|
||||
AMEM8
|
||||
AMEM16
|
||||
AMEM32
|
||||
AMEM64
|
||||
AMEM128
|
||||
ASTRING
|
||||
AINTER
|
||||
ANILINTER
|
||||
AFLOAT32
|
||||
AFLOAT64
|
||||
ACPLX64
|
||||
ACPLX128
|
||||
|
||||
// Type can be compared/hashed as regular memory.
|
||||
AMEM AlgKind = 100
|
||||
|
||||
// Type needs special comparison/hashing functions.
|
||||
ASPECIAL AlgKind = -1
|
||||
)
|
||||
|
||||
// IsComparable reports whether t is a comparable type.
|
||||
func IsComparable(t *types.Type) bool {
|
||||
a, _ := algtype1(t)
|
||||
return a != ANOEQ
|
||||
}
|
||||
|
||||
// IsRegularMemory reports whether t can be compared/hashed as regular memory.
|
||||
func IsRegularMemory(t *types.Type) bool {
|
||||
a, _ := algtype1(t)
|
||||
return a == AMEM
|
||||
}
|
||||
|
||||
// IncomparableField returns an incomparable Field of struct Type t, if any.
|
||||
func IncomparableField(t *types.Type) *types.Field {
|
||||
for _, f := range t.FieldSlice() {
|
||||
if !IsComparable(f.Type) {
|
||||
return f
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EqCanPanic reports whether == on type t could panic (has an interface somewhere).
|
||||
// t must be comparable.
|
||||
func EqCanPanic(t *types.Type) bool {
|
||||
switch t.Etype {
|
||||
default:
|
||||
return false
|
||||
case TINTER:
|
||||
return true
|
||||
case TARRAY:
|
||||
return EqCanPanic(t.Elem())
|
||||
case TSTRUCT:
|
||||
for _, f := range t.FieldSlice() {
|
||||
if !f.Sym.IsBlank() && EqCanPanic(f.Type) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// algtype is like algtype1, except it returns the fixed-width AMEMxx variants
|
||||
// instead of the general AMEM kind when possible.
|
||||
func algtype(t *types.Type) AlgKind {
|
||||
a, _ := algtype1(t)
|
||||
if a == AMEM {
|
||||
switch t.Width {
|
||||
case 0:
|
||||
return AMEM0
|
||||
case 1:
|
||||
return AMEM8
|
||||
case 2:
|
||||
return AMEM16
|
||||
case 4:
|
||||
return AMEM32
|
||||
case 8:
|
||||
return AMEM64
|
||||
case 16:
|
||||
return AMEM128
|
||||
}
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
// algtype1 returns the AlgKind used for comparing and hashing Type t.
|
||||
// If it returns ANOEQ, it also returns the component type of t that
|
||||
// makes it incomparable.
|
||||
func algtype1(t *types.Type) (AlgKind, *types.Type) {
|
||||
if t.Broke() {
|
||||
return AMEM, nil
|
||||
}
|
||||
if t.Noalg() {
|
||||
return ANOEQ, t
|
||||
}
|
||||
|
||||
switch t.Etype {
|
||||
case TANY, TFORW:
|
||||
// will be defined later.
|
||||
return ANOEQ, t
|
||||
|
||||
case TINT8, TUINT8, TINT16, TUINT16,
|
||||
TINT32, TUINT32, TINT64, TUINT64,
|
||||
TINT, TUINT, TUINTPTR,
|
||||
TBOOL, TPTR,
|
||||
TCHAN, TUNSAFEPTR:
|
||||
return AMEM, nil
|
||||
|
||||
case TFUNC, TMAP:
|
||||
return ANOEQ, t
|
||||
|
||||
case TFLOAT32:
|
||||
return AFLOAT32, nil
|
||||
|
||||
case TFLOAT64:
|
||||
return AFLOAT64, nil
|
||||
|
||||
case TCOMPLEX64:
|
||||
return ACPLX64, nil
|
||||
|
||||
case TCOMPLEX128:
|
||||
return ACPLX128, nil
|
||||
|
||||
case TSTRING:
|
||||
return ASTRING, nil
|
||||
|
||||
case TINTER:
|
||||
if t.IsEmptyInterface() {
|
||||
return ANILINTER, nil
|
||||
}
|
||||
return AINTER, nil
|
||||
|
||||
case TSLICE:
|
||||
return ANOEQ, t
|
||||
|
||||
case TARRAY:
|
||||
a, bad := algtype1(t.Elem())
|
||||
switch a {
|
||||
case AMEM:
|
||||
return AMEM, nil
|
||||
case ANOEQ:
|
||||
return ANOEQ, bad
|
||||
}
|
||||
|
||||
switch t.NumElem() {
|
||||
case 0:
|
||||
// We checked above that the element type is comparable.
|
||||
return AMEM, nil
|
||||
case 1:
|
||||
// Single-element array is same as its lone element.
|
||||
return a, nil
|
||||
}
|
||||
|
||||
return ASPECIAL, nil
|
||||
|
||||
case TSTRUCT:
|
||||
fields := t.FieldSlice()
|
||||
|
||||
// One-field struct is same as that one field alone.
|
||||
if len(fields) == 1 && !fields[0].Sym.IsBlank() {
|
||||
return algtype1(fields[0].Type)
|
||||
}
|
||||
|
||||
ret := AMEM
|
||||
for i, f := range fields {
|
||||
// All fields must be comparable.
|
||||
a, bad := algtype1(f.Type)
|
||||
if a == ANOEQ {
|
||||
return ANOEQ, bad
|
||||
}
|
||||
|
||||
// Blank fields, padded fields, fields with non-memory
|
||||
// equality need special compare.
|
||||
if a != AMEM || f.Sym.IsBlank() || ispaddedfield(t, i) {
|
||||
ret = ASPECIAL
|
||||
}
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
Fatalf("algtype1: unexpected type %v", t)
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// genhash returns a symbol which is the closure used to compute
|
||||
// the hash of a value of type t.
|
||||
// Note: the generated function must match runtime.typehash exactly.
|
||||
func genhash(t *types.Type) *obj.LSym {
|
||||
switch algtype(t) {
|
||||
default:
|
||||
// genhash is only called for types that have equality
|
||||
Fatalf("genhash %v", t)
|
||||
case AMEM0:
|
||||
return sysClosure("memhash0")
|
||||
case AMEM8:
|
||||
return sysClosure("memhash8")
|
||||
case AMEM16:
|
||||
return sysClosure("memhash16")
|
||||
case AMEM32:
|
||||
return sysClosure("memhash32")
|
||||
case AMEM64:
|
||||
return sysClosure("memhash64")
|
||||
case AMEM128:
|
||||
return sysClosure("memhash128")
|
||||
case ASTRING:
|
||||
return sysClosure("strhash")
|
||||
case AINTER:
|
||||
return sysClosure("interhash")
|
||||
case ANILINTER:
|
||||
return sysClosure("nilinterhash")
|
||||
case AFLOAT32:
|
||||
return sysClosure("f32hash")
|
||||
case AFLOAT64:
|
||||
return sysClosure("f64hash")
|
||||
case ACPLX64:
|
||||
return sysClosure("c64hash")
|
||||
case ACPLX128:
|
||||
return sysClosure("c128hash")
|
||||
case AMEM:
|
||||
// For other sizes of plain memory, we build a closure
|
||||
// that calls memhash_varlen. The size of the memory is
|
||||
// encoded in the first slot of the closure.
|
||||
closure := typeLookup(fmt.Sprintf(".hashfunc%d", t.Width)).Linksym()
|
||||
if len(closure.P) > 0 { // already generated
|
||||
return closure
|
||||
}
|
||||
if memhashvarlen == nil {
|
||||
memhashvarlen = sysfunc("memhash_varlen")
|
||||
}
|
||||
ot := 0
|
||||
ot = dsymptr(closure, ot, memhashvarlen, 0)
|
||||
ot = duintptr(closure, ot, uint64(t.Width)) // size encoded in closure
|
||||
ggloblsym(closure, int32(ot), obj.DUPOK|obj.RODATA)
|
||||
return closure
|
||||
case ASPECIAL:
|
||||
break
|
||||
}
|
||||
|
||||
closure := typesymprefix(".hashfunc", t).Linksym()
|
||||
if len(closure.P) > 0 { // already generated
|
||||
return closure
|
||||
}
|
||||
|
||||
// Generate hash functions for subtypes.
|
||||
// There are cases where we might not use these hashes,
|
||||
// but in that case they will get dead-code eliminated.
|
||||
// (And the closure generated by genhash will also get
|
||||
// dead-code eliminated, as we call the subtype hashers
|
||||
// directly.)
|
||||
switch t.Etype {
|
||||
case types.TARRAY:
|
||||
genhash(t.Elem())
|
||||
case types.TSTRUCT:
|
||||
for _, f := range t.FieldSlice() {
|
||||
genhash(f.Type)
|
||||
}
|
||||
}
|
||||
|
||||
sym := typesymprefix(".hash", t)
|
||||
if Debug.r != 0 {
|
||||
fmt.Printf("genhash %v %v %v\n", closure, sym, t)
|
||||
}
|
||||
|
||||
lineno = autogeneratedPos // less confusing than end of input
|
||||
dclcontext = PEXTERN
|
||||
|
||||
// func sym(p *T, h uintptr) uintptr
|
||||
tfn := nod(OTFUNC, nil, nil)
|
||||
tfn.List.Set2(
|
||||
namedfield("p", types.NewPtr(t)),
|
||||
namedfield("h", types.Types[TUINTPTR]),
|
||||
)
|
||||
tfn.Rlist.Set1(anonfield(types.Types[TUINTPTR]))
|
||||
|
||||
fn := dclfunc(sym, tfn)
|
||||
np := asNode(tfn.Type.Params().Field(0).Nname)
|
||||
nh := asNode(tfn.Type.Params().Field(1).Nname)
|
||||
|
||||
switch t.Etype {
|
||||
case types.TARRAY:
|
||||
// An array of pure memory would be handled by the
|
||||
// standard algorithm, so the element type must not be
|
||||
// pure memory.
|
||||
hashel := hashfor(t.Elem())
|
||||
|
||||
n := nod(ORANGE, nil, nod(ODEREF, np, nil))
|
||||
ni := newname(lookup("i"))
|
||||
ni.Type = types.Types[TINT]
|
||||
n.List.Set1(ni)
|
||||
n.SetColas(true)
|
||||
colasdefn(n.List.Slice(), n)
|
||||
ni = n.List.First()
|
||||
|
||||
// h = hashel(&p[i], h)
|
||||
call := nod(OCALL, hashel, nil)
|
||||
|
||||
nx := nod(OINDEX, np, ni)
|
||||
nx.SetBounded(true)
|
||||
na := nod(OADDR, nx, nil)
|
||||
call.List.Append(na)
|
||||
call.List.Append(nh)
|
||||
n.Nbody.Append(nod(OAS, nh, call))
|
||||
|
||||
fn.Nbody.Append(n)
|
||||
|
||||
case types.TSTRUCT:
|
||||
// Walk the struct using memhash for runs of AMEM
|
||||
// and calling specific hash functions for the others.
|
||||
for i, fields := 0, t.FieldSlice(); i < len(fields); {
|
||||
f := fields[i]
|
||||
|
||||
// Skip blank fields.
|
||||
if f.Sym.IsBlank() {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// Hash non-memory fields with appropriate hash function.
|
||||
if !IsRegularMemory(f.Type) {
|
||||
hashel := hashfor(f.Type)
|
||||
call := nod(OCALL, hashel, nil)
|
||||
nx := nodSym(OXDOT, np, f.Sym) // TODO: fields from other packages?
|
||||
na := nod(OADDR, nx, nil)
|
||||
call.List.Append(na)
|
||||
call.List.Append(nh)
|
||||
fn.Nbody.Append(nod(OAS, nh, call))
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// Otherwise, hash a maximal length run of raw memory.
|
||||
size, next := memrun(t, i)
|
||||
|
||||
// h = hashel(&p.first, size, h)
|
||||
hashel := hashmem(f.Type)
|
||||
call := nod(OCALL, hashel, nil)
|
||||
nx := nodSym(OXDOT, np, f.Sym) // TODO: fields from other packages?
|
||||
na := nod(OADDR, nx, nil)
|
||||
call.List.Append(na)
|
||||
call.List.Append(nh)
|
||||
call.List.Append(nodintconst(size))
|
||||
fn.Nbody.Append(nod(OAS, nh, call))
|
||||
|
||||
i = next
|
||||
}
|
||||
}
|
||||
|
||||
r := nod(ORETURN, nil, nil)
|
||||
r.List.Append(nh)
|
||||
fn.Nbody.Append(r)
|
||||
|
||||
if Debug.r != 0 {
|
||||
dumplist("genhash body", fn.Nbody)
|
||||
}
|
||||
|
||||
funcbody()
|
||||
|
||||
fn.Func.SetDupok(true)
|
||||
fn = typecheck(fn, ctxStmt)
|
||||
|
||||
Curfn = fn
|
||||
typecheckslice(fn.Nbody.Slice(), ctxStmt)
|
||||
Curfn = nil
|
||||
|
||||
if debug_dclstack != 0 {
|
||||
testdclstack()
|
||||
}
|
||||
|
||||
fn.Func.SetNilCheckDisabled(true)
|
||||
xtop = append(xtop, fn)
|
||||
|
||||
// Build closure. It doesn't close over any variables, so
|
||||
// it contains just the function pointer.
|
||||
dsymptr(closure, 0, sym.Linksym(), 0)
|
||||
ggloblsym(closure, int32(Widthptr), obj.DUPOK|obj.RODATA)
|
||||
|
||||
return closure
|
||||
}
|
||||
|
||||
func hashfor(t *types.Type) *Node {
|
||||
var sym *types.Sym
|
||||
|
||||
switch a, _ := algtype1(t); a {
|
||||
case AMEM:
|
||||
Fatalf("hashfor with AMEM type")
|
||||
case AINTER:
|
||||
sym = Runtimepkg.Lookup("interhash")
|
||||
case ANILINTER:
|
||||
sym = Runtimepkg.Lookup("nilinterhash")
|
||||
case ASTRING:
|
||||
sym = Runtimepkg.Lookup("strhash")
|
||||
case AFLOAT32:
|
||||
sym = Runtimepkg.Lookup("f32hash")
|
||||
case AFLOAT64:
|
||||
sym = Runtimepkg.Lookup("f64hash")
|
||||
case ACPLX64:
|
||||
sym = Runtimepkg.Lookup("c64hash")
|
||||
case ACPLX128:
|
||||
sym = Runtimepkg.Lookup("c128hash")
|
||||
default:
|
||||
// Note: the caller of hashfor ensured that this symbol
|
||||
// exists and has a body by calling genhash for t.
|
||||
sym = typesymprefix(".hash", t)
|
||||
}
|
||||
|
||||
n := newname(sym)
|
||||
setNodeNameFunc(n)
|
||||
n.Type = functype(nil, []*Node{
|
||||
anonfield(types.NewPtr(t)),
|
||||
anonfield(types.Types[TUINTPTR]),
|
||||
}, []*Node{
|
||||
anonfield(types.Types[TUINTPTR]),
|
||||
})
|
||||
return n
|
||||
}
|
||||
|
||||
// sysClosure returns a closure which will call the
|
||||
// given runtime function (with no closed-over variables).
|
||||
func sysClosure(name string) *obj.LSym {
|
||||
s := sysvar(name + "·f")
|
||||
if len(s.P) == 0 {
|
||||
f := sysfunc(name)
|
||||
dsymptr(s, 0, f, 0)
|
||||
ggloblsym(s, int32(Widthptr), obj.DUPOK|obj.RODATA)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// geneq returns a symbol which is the closure used to compute
|
||||
// equality for two objects of type t.
|
||||
func geneq(t *types.Type) *obj.LSym {
|
||||
switch algtype(t) {
|
||||
case ANOEQ:
|
||||
// The runtime will panic if it tries to compare
|
||||
// a type with a nil equality function.
|
||||
return nil
|
||||
case AMEM0:
|
||||
return sysClosure("memequal0")
|
||||
case AMEM8:
|
||||
return sysClosure("memequal8")
|
||||
case AMEM16:
|
||||
return sysClosure("memequal16")
|
||||
case AMEM32:
|
||||
return sysClosure("memequal32")
|
||||
case AMEM64:
|
||||
return sysClosure("memequal64")
|
||||
case AMEM128:
|
||||
return sysClosure("memequal128")
|
||||
case ASTRING:
|
||||
return sysClosure("strequal")
|
||||
case AINTER:
|
||||
return sysClosure("interequal")
|
||||
case ANILINTER:
|
||||
return sysClosure("nilinterequal")
|
||||
case AFLOAT32:
|
||||
return sysClosure("f32equal")
|
||||
case AFLOAT64:
|
||||
return sysClosure("f64equal")
|
||||
case ACPLX64:
|
||||
return sysClosure("c64equal")
|
||||
case ACPLX128:
|
||||
return sysClosure("c128equal")
|
||||
case AMEM:
|
||||
// make equality closure. The size of the type
|
||||
// is encoded in the closure.
|
||||
closure := typeLookup(fmt.Sprintf(".eqfunc%d", t.Width)).Linksym()
|
||||
if len(closure.P) != 0 {
|
||||
return closure
|
||||
}
|
||||
if memequalvarlen == nil {
|
||||
memequalvarlen = sysvar("memequal_varlen") // asm func
|
||||
}
|
||||
ot := 0
|
||||
ot = dsymptr(closure, ot, memequalvarlen, 0)
|
||||
ot = duintptr(closure, ot, uint64(t.Width))
|
||||
ggloblsym(closure, int32(ot), obj.DUPOK|obj.RODATA)
|
||||
return closure
|
||||
case ASPECIAL:
|
||||
break
|
||||
}
|
||||
|
||||
closure := typesymprefix(".eqfunc", t).Linksym()
|
||||
if len(closure.P) > 0 { // already generated
|
||||
return closure
|
||||
}
|
||||
sym := typesymprefix(".eq", t)
|
||||
if Debug.r != 0 {
|
||||
fmt.Printf("geneq %v\n", t)
|
||||
}
|
||||
|
||||
// Autogenerate code for equality of structs and arrays.
|
||||
|
||||
lineno = autogeneratedPos // less confusing than end of input
|
||||
dclcontext = PEXTERN
|
||||
|
||||
// func sym(p, q *T) bool
|
||||
tfn := nod(OTFUNC, nil, nil)
|
||||
tfn.List.Set2(
|
||||
namedfield("p", types.NewPtr(t)),
|
||||
namedfield("q", types.NewPtr(t)),
|
||||
)
|
||||
tfn.Rlist.Set1(namedfield("r", types.Types[TBOOL]))
|
||||
|
||||
fn := dclfunc(sym, tfn)
|
||||
np := asNode(tfn.Type.Params().Field(0).Nname)
|
||||
nq := asNode(tfn.Type.Params().Field(1).Nname)
|
||||
nr := asNode(tfn.Type.Results().Field(0).Nname)
|
||||
|
||||
// Label to jump to if an equality test fails.
|
||||
neq := autolabel(".neq")
|
||||
|
||||
// We reach here only for types that have equality but
|
||||
// cannot be handled by the standard algorithms,
|
||||
// so t must be either an array or a struct.
|
||||
switch t.Etype {
|
||||
default:
|
||||
Fatalf("geneq %v", t)
|
||||
|
||||
case TARRAY:
|
||||
nelem := t.NumElem()
|
||||
|
||||
// checkAll generates code to check the equality of all array elements.
|
||||
// If unroll is greater than nelem, checkAll generates:
|
||||
//
|
||||
// if eq(p[0], q[0]) && eq(p[1], q[1]) && ... {
|
||||
// } else {
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// And so on.
|
||||
//
|
||||
// Otherwise it generates:
|
||||
//
|
||||
// for i := 0; i < nelem; i++ {
|
||||
// if eq(p[i], q[i]) {
|
||||
// } else {
|
||||
// goto neq
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// TODO(josharian): consider doing some loop unrolling
|
||||
// for larger nelem as well, processing a few elements at a time in a loop.
|
||||
checkAll := func(unroll int64, last bool, eq func(pi, qi *Node) *Node) {
|
||||
// checkIdx generates a node to check for equality at index i.
|
||||
checkIdx := func(i *Node) *Node {
|
||||
// pi := p[i]
|
||||
pi := nod(OINDEX, np, i)
|
||||
pi.SetBounded(true)
|
||||
pi.Type = t.Elem()
|
||||
// qi := q[i]
|
||||
qi := nod(OINDEX, nq, i)
|
||||
qi.SetBounded(true)
|
||||
qi.Type = t.Elem()
|
||||
return eq(pi, qi)
|
||||
}
|
||||
|
||||
if nelem <= unroll {
|
||||
if last {
|
||||
// Do last comparison in a different manner.
|
||||
nelem--
|
||||
}
|
||||
// Generate a series of checks.
|
||||
for i := int64(0); i < nelem; i++ {
|
||||
// if check {} else { goto neq }
|
||||
nif := nod(OIF, checkIdx(nodintconst(i)), nil)
|
||||
nif.Rlist.Append(nodSym(OGOTO, nil, neq))
|
||||
fn.Nbody.Append(nif)
|
||||
}
|
||||
if last {
|
||||
fn.Nbody.Append(nod(OAS, nr, checkIdx(nodintconst(nelem))))
|
||||
}
|
||||
} else {
|
||||
// Generate a for loop.
|
||||
// for i := 0; i < nelem; i++
|
||||
i := temp(types.Types[TINT])
|
||||
init := nod(OAS, i, nodintconst(0))
|
||||
cond := nod(OLT, i, nodintconst(nelem))
|
||||
post := nod(OAS, i, nod(OADD, i, nodintconst(1)))
|
||||
loop := nod(OFOR, cond, post)
|
||||
loop.Ninit.Append(init)
|
||||
// if eq(pi, qi) {} else { goto neq }
|
||||
nif := nod(OIF, checkIdx(i), nil)
|
||||
nif.Rlist.Append(nodSym(OGOTO, nil, neq))
|
||||
loop.Nbody.Append(nif)
|
||||
fn.Nbody.Append(loop)
|
||||
if last {
|
||||
fn.Nbody.Append(nod(OAS, nr, nodbool(true)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch t.Elem().Etype {
|
||||
case TSTRING:
|
||||
// Do two loops. First, check that all the lengths match (cheap).
|
||||
// Second, check that all the contents match (expensive).
|
||||
// TODO: when the array size is small, unroll the length match checks.
|
||||
checkAll(3, false, func(pi, qi *Node) *Node {
|
||||
// Compare lengths.
|
||||
eqlen, _ := eqstring(pi, qi)
|
||||
return eqlen
|
||||
})
|
||||
checkAll(1, true, func(pi, qi *Node) *Node {
|
||||
// Compare contents.
|
||||
_, eqmem := eqstring(pi, qi)
|
||||
return eqmem
|
||||
})
|
||||
case TFLOAT32, TFLOAT64:
|
||||
checkAll(2, true, func(pi, qi *Node) *Node {
|
||||
// p[i] == q[i]
|
||||
return nod(OEQ, pi, qi)
|
||||
})
|
||||
// TODO: pick apart structs, do them piecemeal too
|
||||
default:
|
||||
checkAll(1, true, func(pi, qi *Node) *Node {
|
||||
// p[i] == q[i]
|
||||
return nod(OEQ, pi, qi)
|
||||
})
|
||||
}
|
||||
|
||||
case TSTRUCT:
|
||||
// Build a list of conditions to satisfy.
|
||||
// The conditions are a list-of-lists. Conditions are reorderable
|
||||
// within each inner list. The outer lists must be evaluated in order.
|
||||
var conds [][]*Node
|
||||
conds = append(conds, []*Node{})
|
||||
and := func(n *Node) {
|
||||
i := len(conds) - 1
|
||||
conds[i] = append(conds[i], n)
|
||||
}
|
||||
|
||||
// Walk the struct using memequal for runs of AMEM
|
||||
// and calling specific equality tests for the others.
|
||||
for i, fields := 0, t.FieldSlice(); i < len(fields); {
|
||||
f := fields[i]
|
||||
|
||||
// Skip blank-named fields.
|
||||
if f.Sym.IsBlank() {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// Compare non-memory fields with field equality.
|
||||
if !IsRegularMemory(f.Type) {
|
||||
if EqCanPanic(f.Type) {
|
||||
// Enforce ordering by starting a new set of reorderable conditions.
|
||||
conds = append(conds, []*Node{})
|
||||
}
|
||||
p := nodSym(OXDOT, np, f.Sym)
|
||||
q := nodSym(OXDOT, nq, f.Sym)
|
||||
switch {
|
||||
case f.Type.IsString():
|
||||
eqlen, eqmem := eqstring(p, q)
|
||||
and(eqlen)
|
||||
and(eqmem)
|
||||
default:
|
||||
and(nod(OEQ, p, q))
|
||||
}
|
||||
if EqCanPanic(f.Type) {
|
||||
// Also enforce ordering after something that can panic.
|
||||
conds = append(conds, []*Node{})
|
||||
}
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// Find maximal length run of memory-only fields.
|
||||
size, next := memrun(t, i)
|
||||
|
||||
// TODO(rsc): All the calls to newname are wrong for
|
||||
// cross-package unexported fields.
|
||||
if s := fields[i:next]; len(s) <= 2 {
|
||||
// Two or fewer fields: use plain field equality.
|
||||
for _, f := range s {
|
||||
and(eqfield(np, nq, f.Sym))
|
||||
}
|
||||
} else {
|
||||
// More than two fields: use memequal.
|
||||
and(eqmem(np, nq, f.Sym, size))
|
||||
}
|
||||
i = next
|
||||
}
|
||||
|
||||
// Sort conditions to put runtime calls last.
|
||||
// Preserve the rest of the ordering.
|
||||
var flatConds []*Node
|
||||
for _, c := range conds {
|
||||
isCall := func(n *Node) bool {
|
||||
return n.Op == OCALL || n.Op == OCALLFUNC
|
||||
}
|
||||
sort.SliceStable(c, func(i, j int) bool {
|
||||
return !isCall(c[i]) && isCall(c[j])
|
||||
})
|
||||
flatConds = append(flatConds, c...)
|
||||
}
|
||||
|
||||
if len(flatConds) == 0 {
|
||||
fn.Nbody.Append(nod(OAS, nr, nodbool(true)))
|
||||
} else {
|
||||
for _, c := range flatConds[:len(flatConds)-1] {
|
||||
// if cond {} else { goto neq }
|
||||
n := nod(OIF, c, nil)
|
||||
n.Rlist.Append(nodSym(OGOTO, nil, neq))
|
||||
fn.Nbody.Append(n)
|
||||
}
|
||||
fn.Nbody.Append(nod(OAS, nr, flatConds[len(flatConds)-1]))
|
||||
}
|
||||
}
|
||||
|
||||
// ret:
|
||||
// return
|
||||
ret := autolabel(".ret")
|
||||
fn.Nbody.Append(nodSym(OLABEL, nil, ret))
|
||||
fn.Nbody.Append(nod(ORETURN, nil, nil))
|
||||
|
||||
// neq:
|
||||
// r = false
|
||||
// return (or goto ret)
|
||||
fn.Nbody.Append(nodSym(OLABEL, nil, neq))
|
||||
fn.Nbody.Append(nod(OAS, nr, nodbool(false)))
|
||||
if EqCanPanic(t) || hasCall(fn) {
|
||||
// Epilogue is large, so share it with the equal case.
|
||||
fn.Nbody.Append(nodSym(OGOTO, nil, ret))
|
||||
} else {
|
||||
// Epilogue is small, so don't bother sharing.
|
||||
fn.Nbody.Append(nod(ORETURN, nil, nil))
|
||||
}
|
||||
// TODO(khr): the epilogue size detection condition above isn't perfect.
|
||||
// We should really do a generic CL that shares epilogues across
|
||||
// the board. See #24936.
|
||||
|
||||
if Debug.r != 0 {
|
||||
dumplist("geneq body", fn.Nbody)
|
||||
}
|
||||
|
||||
funcbody()
|
||||
|
||||
fn.Func.SetDupok(true)
|
||||
fn = typecheck(fn, ctxStmt)
|
||||
|
||||
Curfn = fn
|
||||
typecheckslice(fn.Nbody.Slice(), ctxStmt)
|
||||
Curfn = nil
|
||||
|
||||
if debug_dclstack != 0 {
|
||||
testdclstack()
|
||||
}
|
||||
|
||||
// Disable checknils while compiling this code.
|
||||
// We are comparing a struct or an array,
|
||||
// neither of which can be nil, and our comparisons
|
||||
// are shallow.
|
||||
fn.Func.SetNilCheckDisabled(true)
|
||||
xtop = append(xtop, fn)
|
||||
|
||||
// Generate a closure which points at the function we just generated.
|
||||
dsymptr(closure, 0, sym.Linksym(), 0)
|
||||
ggloblsym(closure, int32(Widthptr), obj.DUPOK|obj.RODATA)
|
||||
return closure
|
||||
}
|
||||
|
||||
func hasCall(n *Node) bool {
|
||||
if n.Op == OCALL || n.Op == OCALLFUNC {
|
||||
return true
|
||||
}
|
||||
if n.Left != nil && hasCall(n.Left) {
|
||||
return true
|
||||
}
|
||||
if n.Right != nil && hasCall(n.Right) {
|
||||
return true
|
||||
}
|
||||
for _, x := range n.Ninit.Slice() {
|
||||
if hasCall(x) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
for _, x := range n.Nbody.Slice() {
|
||||
if hasCall(x) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
for _, x := range n.List.Slice() {
|
||||
if hasCall(x) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
for _, x := range n.Rlist.Slice() {
|
||||
if hasCall(x) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// eqfield returns the node
|
||||
// p.field == q.field
|
||||
func eqfield(p *Node, q *Node, field *types.Sym) *Node {
|
||||
nx := nodSym(OXDOT, p, field)
|
||||
ny := nodSym(OXDOT, q, field)
|
||||
ne := nod(OEQ, nx, ny)
|
||||
return ne
|
||||
}
|
||||
|
||||
// eqstring returns the nodes
|
||||
// len(s) == len(t)
|
||||
// and
|
||||
// memequal(s.ptr, t.ptr, len(s))
|
||||
// which can be used to construct string equality comparison.
|
||||
// eqlen must be evaluated before eqmem, and shortcircuiting is required.
|
||||
func eqstring(s, t *Node) (eqlen, eqmem *Node) {
|
||||
s = conv(s, types.Types[TSTRING])
|
||||
t = conv(t, types.Types[TSTRING])
|
||||
sptr := nod(OSPTR, s, nil)
|
||||
tptr := nod(OSPTR, t, nil)
|
||||
slen := conv(nod(OLEN, s, nil), types.Types[TUINTPTR])
|
||||
tlen := conv(nod(OLEN, t, nil), types.Types[TUINTPTR])
|
||||
|
||||
fn := syslook("memequal")
|
||||
fn = substArgTypes(fn, types.Types[TUINT8], types.Types[TUINT8])
|
||||
call := nod(OCALL, fn, nil)
|
||||
call.List.Append(sptr, tptr, slen.copy())
|
||||
call = typecheck(call, ctxExpr|ctxMultiOK)
|
||||
|
||||
cmp := nod(OEQ, slen, tlen)
|
||||
cmp = typecheck(cmp, ctxExpr)
|
||||
cmp.Type = types.Types[TBOOL]
|
||||
return cmp, call
|
||||
}
|
||||
|
||||
// eqinterface returns the nodes
|
||||
// s.tab == t.tab (or s.typ == t.typ, as appropriate)
|
||||
// and
|
||||
// ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate)
|
||||
// which can be used to construct interface equality comparison.
|
||||
// eqtab must be evaluated before eqdata, and shortcircuiting is required.
|
||||
func eqinterface(s, t *Node) (eqtab, eqdata *Node) {
|
||||
if !types.Identical(s.Type, t.Type) {
|
||||
Fatalf("eqinterface %v %v", s.Type, t.Type)
|
||||
}
|
||||
// func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool)
|
||||
// func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool)
|
||||
var fn *Node
|
||||
if s.Type.IsEmptyInterface() {
|
||||
fn = syslook("efaceeq")
|
||||
} else {
|
||||
fn = syslook("ifaceeq")
|
||||
}
|
||||
|
||||
stab := nod(OITAB, s, nil)
|
||||
ttab := nod(OITAB, t, nil)
|
||||
sdata := nod(OIDATA, s, nil)
|
||||
tdata := nod(OIDATA, t, nil)
|
||||
sdata.Type = types.Types[TUNSAFEPTR]
|
||||
tdata.Type = types.Types[TUNSAFEPTR]
|
||||
sdata.SetTypecheck(1)
|
||||
tdata.SetTypecheck(1)
|
||||
|
||||
call := nod(OCALL, fn, nil)
|
||||
call.List.Append(stab, sdata, tdata)
|
||||
call = typecheck(call, ctxExpr|ctxMultiOK)
|
||||
|
||||
cmp := nod(OEQ, stab, ttab)
|
||||
cmp = typecheck(cmp, ctxExpr)
|
||||
cmp.Type = types.Types[TBOOL]
|
||||
return cmp, call
|
||||
}
|
||||
|
||||
// eqmem returns the node
|
||||
// memequal(&p.field, &q.field [, size])
|
||||
func eqmem(p *Node, q *Node, field *types.Sym, size int64) *Node {
|
||||
nx := nod(OADDR, nodSym(OXDOT, p, field), nil)
|
||||
ny := nod(OADDR, nodSym(OXDOT, q, field), nil)
|
||||
nx = typecheck(nx, ctxExpr)
|
||||
ny = typecheck(ny, ctxExpr)
|
||||
|
||||
fn, needsize := eqmemfunc(size, nx.Type.Elem())
|
||||
call := nod(OCALL, fn, nil)
|
||||
call.List.Append(nx)
|
||||
call.List.Append(ny)
|
||||
if needsize {
|
||||
call.List.Append(nodintconst(size))
|
||||
}
|
||||
|
||||
return call
|
||||
}
|
||||
|
||||
func eqmemfunc(size int64, t *types.Type) (fn *Node, needsize bool) {
|
||||
switch size {
|
||||
default:
|
||||
fn = syslook("memequal")
|
||||
needsize = true
|
||||
case 1, 2, 4, 8, 16:
|
||||
buf := fmt.Sprintf("memequal%d", int(size)*8)
|
||||
fn = syslook(buf)
|
||||
}
|
||||
|
||||
fn = substArgTypes(fn, t, t)
|
||||
return fn, needsize
|
||||
}
|
||||
|
||||
// memrun finds runs of struct fields for which memory-only algs are appropriate.
|
||||
// t is the parent struct type, and start is the field index at which to start the run.
|
||||
// size is the length in bytes of the memory included in the run.
|
||||
// next is the index just after the end of the memory run.
|
||||
func memrun(t *types.Type, start int) (size int64, next int) {
|
||||
next = start
|
||||
for {
|
||||
next++
|
||||
if next == t.NumFields() {
|
||||
break
|
||||
}
|
||||
// Stop run after a padded field.
|
||||
if ispaddedfield(t, next-1) {
|
||||
break
|
||||
}
|
||||
// Also, stop before a blank or non-memory field.
|
||||
if f := t.Field(next); f.Sym.IsBlank() || !IsRegularMemory(f.Type) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return t.Field(next-1).End() - t.Field(start).Offset, next
|
||||
}
|
||||
|
||||
// ispaddedfield reports whether the i'th field of struct type t is followed
|
||||
// by padding.
|
||||
func ispaddedfield(t *types.Type, i int) bool {
|
||||
if !t.IsStruct() {
|
||||
Fatalf("ispaddedfield called non-struct %v", t)
|
||||
}
|
||||
end := t.Width
|
||||
if i+1 < t.NumFields() {
|
||||
end = t.Field(i + 1).Offset
|
||||
}
|
||||
return t.Field(i).End() != end
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by "stringer -type AlgKind -trimprefix A alg.go"; DO NOT EDIT.
|
||||
// Code generated by "stringer -type AlgKind -trimprefix A"; DO NOT EDIT.
|
||||
|
||||
package types
|
||||
package gc
|
||||
|
||||
import "strconv"
|
||||
|
||||
@@ -2,64 +2,18 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package types
|
||||
package gc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmd/compile/internal/types"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"cmd/compile/internal/base"
|
||||
"cmd/internal/src"
|
||||
)
|
||||
|
||||
var PtrSize int
|
||||
|
||||
var RegSize int
|
||||
|
||||
// Slices in the runtime are represented by three components:
|
||||
//
|
||||
// type slice struct {
|
||||
// ptr unsafe.Pointer
|
||||
// len int
|
||||
// cap int
|
||||
// }
|
||||
//
|
||||
// Strings in the runtime are represented by two components:
|
||||
//
|
||||
// type string struct {
|
||||
// ptr unsafe.Pointer
|
||||
// len int
|
||||
// }
|
||||
//
|
||||
// These variables are the offsets of fields and sizes of these structs.
|
||||
var (
|
||||
SlicePtrOffset int64
|
||||
SliceLenOffset int64
|
||||
SliceCapOffset int64
|
||||
|
||||
SliceSize int64
|
||||
StringSize int64
|
||||
)
|
||||
|
||||
var SkipSizeForTracing bool
|
||||
|
||||
// typePos returns the position associated with t.
|
||||
// This is where t was declared or where it appeared as a type expression.
|
||||
func typePos(t *Type) src.XPos {
|
||||
if pos := t.Pos(); pos.IsKnown() {
|
||||
return pos
|
||||
}
|
||||
base.Fatalf("bad type: %v", t)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// MaxWidth is the maximum size of a value on the target architecture.
|
||||
var MaxWidth int64
|
||||
|
||||
// CalcSizeDisabled indicates whether it is safe
|
||||
// to calculate Types' widths and alignments. See CalcSize.
|
||||
var CalcSizeDisabled bool
|
||||
// sizeCalculationDisabled indicates whether it is safe
|
||||
// to calculate Types' widths and alignments. See dowidth.
|
||||
var sizeCalculationDisabled bool
|
||||
|
||||
// machine size and rounding alignment is dictated around
|
||||
// the size of a pointer, set in betypeinit (see ../amd64/galign.go).
|
||||
@@ -67,25 +21,25 @@ var defercalc int
|
||||
|
||||
func Rnd(o int64, r int64) int64 {
|
||||
if r < 1 || r > 8 || r&(r-1) != 0 {
|
||||
base.Fatalf("rnd %d", r)
|
||||
Fatalf("rnd %d", r)
|
||||
}
|
||||
return (o + r - 1) &^ (r - 1)
|
||||
}
|
||||
|
||||
// expandiface computes the method set for interface type t by
|
||||
// expanding embedded interfaces.
|
||||
func expandiface(t *Type) {
|
||||
seen := make(map[*Sym]*Field)
|
||||
var methods []*Field
|
||||
func expandiface(t *types.Type) {
|
||||
seen := make(map[*types.Sym]*types.Field)
|
||||
var methods []*types.Field
|
||||
|
||||
addMethod := func(m *Field, explicit bool) {
|
||||
addMethod := func(m *types.Field, explicit bool) {
|
||||
switch prev := seen[m.Sym]; {
|
||||
case prev == nil:
|
||||
seen[m.Sym] = m
|
||||
case AllowsGoVersion(t.Pkg(), 1, 14) && !explicit && Identical(m.Type, prev.Type):
|
||||
case langSupported(1, 14, t.Pkg()) && !explicit && types.Identical(m.Type, prev.Type):
|
||||
return
|
||||
default:
|
||||
base.ErrorfAt(m.Pos, "duplicate method %s", m.Sym.Name)
|
||||
yyerrorl(m.Pos, "duplicate method %s", m.Sym.Name)
|
||||
}
|
||||
methods = append(methods, m)
|
||||
}
|
||||
@@ -95,7 +49,7 @@ func expandiface(t *Type) {
|
||||
continue
|
||||
}
|
||||
|
||||
CheckSize(m.Type)
|
||||
checkwidth(m.Type)
|
||||
addMethod(m, true)
|
||||
}
|
||||
|
||||
@@ -105,7 +59,7 @@ func expandiface(t *Type) {
|
||||
}
|
||||
|
||||
if !m.Type.IsInterface() {
|
||||
base.ErrorfAt(m.Pos, "interface contains embedded non-interface %v", m.Type)
|
||||
yyerrorl(m.Pos, "interface contains embedded non-interface %v", m.Type)
|
||||
m.SetBroke(true)
|
||||
t.SetBroke(true)
|
||||
// Add to fields so that error messages
|
||||
@@ -120,27 +74,30 @@ func expandiface(t *Type) {
|
||||
// (including broken ones, if any) and add to t's
|
||||
// method set.
|
||||
for _, t1 := range m.Type.Fields().Slice() {
|
||||
// Use m.Pos rather than t1.Pos to preserve embedding position.
|
||||
f := NewField(m.Pos, t1.Sym, t1.Type)
|
||||
f := types.NewField()
|
||||
f.Pos = m.Pos // preserve embedding position
|
||||
f.Sym = t1.Sym
|
||||
f.Type = t1.Type
|
||||
f.SetBroke(t1.Broke())
|
||||
addMethod(f, false)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(MethodsByName(methods))
|
||||
sort.Sort(methcmp(methods))
|
||||
|
||||
if int64(len(methods)) >= MaxWidth/int64(PtrSize) {
|
||||
base.ErrorfAt(typePos(t), "interface too large")
|
||||
if int64(len(methods)) >= thearch.MAXWIDTH/int64(Widthptr) {
|
||||
yyerrorl(typePos(t), "interface too large")
|
||||
}
|
||||
for i, m := range methods {
|
||||
m.Offset = int64(i) * int64(PtrSize)
|
||||
m.Offset = int64(i) * int64(Widthptr)
|
||||
}
|
||||
|
||||
// Access fields directly to avoid recursively calling CalcSize
|
||||
// Access fields directly to avoid recursively calling dowidth
|
||||
// within Type.Fields().
|
||||
t.Extra.(*Interface).Fields.Set(methods)
|
||||
t.Extra.(*types.Interface).Fields.Set(methods)
|
||||
}
|
||||
|
||||
func calcStructOffset(errtype *Type, t *Type, o int64, flag int) int64 {
|
||||
func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 {
|
||||
starto := o
|
||||
maxalign := int32(flag)
|
||||
if maxalign < 1 {
|
||||
@@ -154,7 +111,7 @@ func calcStructOffset(errtype *Type, t *Type, o int64, flag int) int64 {
|
||||
continue
|
||||
}
|
||||
|
||||
CalcSize(f.Type)
|
||||
dowidth(f.Type)
|
||||
if int32(f.Type.Align) > maxalign {
|
||||
maxalign = int32(f.Type.Align)
|
||||
}
|
||||
@@ -162,33 +119,38 @@ func calcStructOffset(errtype *Type, t *Type, o int64, flag int) int64 {
|
||||
o = Rnd(o, int64(f.Type.Align))
|
||||
}
|
||||
f.Offset = o
|
||||
if f.Nname != nil {
|
||||
if n := asNode(f.Nname); n != nil {
|
||||
// addrescapes has similar code to update these offsets.
|
||||
// Usually addrescapes runs after calcStructOffset,
|
||||
// Usually addrescapes runs after widstruct,
|
||||
// in which case we could drop this,
|
||||
// but function closure functions are the exception.
|
||||
// NOTE(rsc): This comment may be stale.
|
||||
// It's possible the ordering has changed and this is
|
||||
// now the common case. I'm not sure.
|
||||
f.Nname.(VarObject).RecordFrameOffset(o)
|
||||
if n.Name.Param.Stackcopy != nil {
|
||||
n.Name.Param.Stackcopy.Xoffset = o
|
||||
n.Xoffset = 0
|
||||
} else {
|
||||
n.Xoffset = o
|
||||
}
|
||||
}
|
||||
|
||||
w := f.Type.Width
|
||||
if w < 0 {
|
||||
base.Fatalf("invalid width %d", f.Type.Width)
|
||||
Fatalf("invalid width %d", f.Type.Width)
|
||||
}
|
||||
if w == 0 {
|
||||
lastzero = o
|
||||
}
|
||||
o += w
|
||||
maxwidth := MaxWidth
|
||||
maxwidth := thearch.MAXWIDTH
|
||||
// On 32-bit systems, reflect tables impose an additional constraint
|
||||
// that each field start offset must fit in 31 bits.
|
||||
if maxwidth < 1<<32 {
|
||||
maxwidth = 1<<31 - 1
|
||||
}
|
||||
if o >= maxwidth {
|
||||
base.ErrorfAt(typePos(errtype), "type %L too large", errtype)
|
||||
yyerrorl(typePos(errtype), "type %L too large", errtype)
|
||||
o = 8 // small but nonzero
|
||||
}
|
||||
}
|
||||
@@ -220,22 +182,15 @@ func calcStructOffset(errtype *Type, t *Type, o int64, flag int) int64 {
|
||||
// path points to a slice used for tracking the sequence of types
|
||||
// visited. Using a pointer to a slice allows the slice capacity to
|
||||
// grow and limit reallocations.
|
||||
func findTypeLoop(t *Type, path *[]*Type) bool {
|
||||
func findTypeLoop(t *types.Type, path *[]*types.Type) bool {
|
||||
// We implement a simple DFS loop-finding algorithm. This
|
||||
// could be faster, but type cycles are rare.
|
||||
|
||||
if t.Sym() != nil {
|
||||
if t.Sym != nil {
|
||||
// Declared type. Check for loops and otherwise
|
||||
// recurse on the type expression used in the type
|
||||
// declaration.
|
||||
|
||||
// Type imported from package, so it can't be part of
|
||||
// a type loop (otherwise that package should have
|
||||
// failed to compile).
|
||||
if t.Sym().Pkg != LocalPkg {
|
||||
return false
|
||||
}
|
||||
|
||||
for i, x := range *path {
|
||||
if x == t {
|
||||
*path = (*path)[i:]
|
||||
@@ -244,14 +199,14 @@ func findTypeLoop(t *Type, path *[]*Type) bool {
|
||||
}
|
||||
|
||||
*path = append(*path, t)
|
||||
if findTypeLoop(t.Obj().(TypeObject).TypeDefn(), path) {
|
||||
if p := asNode(t.Nod).Name.Param; p != nil && findTypeLoop(p.Ntype.Type, path) {
|
||||
return true
|
||||
}
|
||||
*path = (*path)[:len(*path)-1]
|
||||
} else {
|
||||
// Anonymous type. Recurse on contained types.
|
||||
|
||||
switch t.Kind() {
|
||||
switch t.Etype {
|
||||
case TARRAY:
|
||||
if findTypeLoop(t.Elem(), path) {
|
||||
return true
|
||||
@@ -276,14 +231,14 @@ func findTypeLoop(t *Type, path *[]*Type) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func reportTypeLoop(t *Type) {
|
||||
func reportTypeLoop(t *types.Type) {
|
||||
if t.Broke() {
|
||||
return
|
||||
}
|
||||
|
||||
var l []*Type
|
||||
var l []*types.Type
|
||||
if !findTypeLoop(t, &l) {
|
||||
base.Fatalf("failed to find type loop for: %v", t)
|
||||
Fatalf("failed to find type loop for: %v", t)
|
||||
}
|
||||
|
||||
// Rotate loop so that the earliest type declaration is first.
|
||||
@@ -298,26 +253,25 @@ func reportTypeLoop(t *Type) {
|
||||
var msg bytes.Buffer
|
||||
fmt.Fprintf(&msg, "invalid recursive type %v\n", l[0])
|
||||
for _, t := range l {
|
||||
fmt.Fprintf(&msg, "\t%v: %v refers to\n", base.FmtPos(typePos(t)), t)
|
||||
fmt.Fprintf(&msg, "\t%v: %v refers to\n", linestr(typePos(t)), t)
|
||||
t.SetBroke(true)
|
||||
}
|
||||
fmt.Fprintf(&msg, "\t%v: %v", base.FmtPos(typePos(l[0])), l[0])
|
||||
base.ErrorfAt(typePos(l[0]), msg.String())
|
||||
fmt.Fprintf(&msg, "\t%v: %v", linestr(typePos(l[0])), l[0])
|
||||
yyerrorl(typePos(l[0]), msg.String())
|
||||
}
|
||||
|
||||
// CalcSize calculates and stores the size and alignment for t.
|
||||
// If CalcSizeDisabled is set, and the size/alignment
|
||||
// dowidth calculates and stores the size and alignment for t.
|
||||
// If sizeCalculationDisabled is set, and the size/alignment
|
||||
// have not already been calculated, it calls Fatal.
|
||||
// This is used to prevent data races in the back end.
|
||||
func CalcSize(t *Type) {
|
||||
// Calling CalcSize when typecheck tracing enabled is not safe.
|
||||
func dowidth(t *types.Type) {
|
||||
// Calling dowidth when typecheck tracing enabled is not safe.
|
||||
// See issue #33658.
|
||||
if base.EnableTrace && SkipSizeForTracing {
|
||||
if enableTrace && skipDowidthForTracing {
|
||||
return
|
||||
}
|
||||
if PtrSize == 0 {
|
||||
// Assume this is a test.
|
||||
return
|
||||
if Widthptr == 0 {
|
||||
Fatalf("dowidth without betypeinit")
|
||||
}
|
||||
|
||||
if t == nil {
|
||||
@@ -335,13 +289,13 @@ func CalcSize(t *Type) {
|
||||
return
|
||||
}
|
||||
|
||||
if CalcSizeDisabled {
|
||||
if sizeCalculationDisabled {
|
||||
if t.Broke() {
|
||||
// break infinite recursion from Fatal call below
|
||||
return
|
||||
}
|
||||
t.SetBroke(true)
|
||||
base.Fatalf("width not calculated: %v", t)
|
||||
Fatalf("width not calculated: %v", t)
|
||||
}
|
||||
|
||||
// break infinite recursion if the broken recursive type
|
||||
@@ -350,33 +304,33 @@ func CalcSize(t *Type) {
|
||||
return
|
||||
}
|
||||
|
||||
// defer CheckSize calls until after we're done
|
||||
DeferCheckSize()
|
||||
// defer checkwidth calls until after we're done
|
||||
defercheckwidth()
|
||||
|
||||
lno := base.Pos
|
||||
if pos := t.Pos(); pos.IsKnown() {
|
||||
base.Pos = pos
|
||||
lno := lineno
|
||||
if asNode(t.Nod) != nil {
|
||||
lineno = asNode(t.Nod).Pos
|
||||
}
|
||||
|
||||
t.Width = -2
|
||||
t.Align = 0 // 0 means use t.Width, below
|
||||
|
||||
et := t.Kind()
|
||||
et := t.Etype
|
||||
switch et {
|
||||
case TFUNC, TCHAN, TMAP, TSTRING:
|
||||
break
|
||||
|
||||
// SimType == 0 during bootstrap
|
||||
// simtype == 0 during bootstrap
|
||||
default:
|
||||
if SimType[t.Kind()] != 0 {
|
||||
et = SimType[t.Kind()]
|
||||
if simtype[t.Etype] != 0 {
|
||||
et = simtype[t.Etype]
|
||||
}
|
||||
}
|
||||
|
||||
var w int64
|
||||
switch et {
|
||||
default:
|
||||
base.Fatalf("CalcSize: unknown type: %v", t)
|
||||
Fatalf("dowidth: unknown type: %v", t)
|
||||
|
||||
// compiler-specific stuff
|
||||
case TINT8, TUINT8, TBOOL:
|
||||
@@ -391,7 +345,7 @@ func CalcSize(t *Type) {
|
||||
|
||||
case TINT64, TUINT64, TFLOAT64:
|
||||
w = 8
|
||||
t.Align = uint8(RegSize)
|
||||
t.Align = uint8(Widthreg)
|
||||
|
||||
case TCOMPLEX64:
|
||||
w = 8
|
||||
@@ -399,68 +353,68 @@ func CalcSize(t *Type) {
|
||||
|
||||
case TCOMPLEX128:
|
||||
w = 16
|
||||
t.Align = uint8(RegSize)
|
||||
t.Align = uint8(Widthreg)
|
||||
|
||||
case TPTR:
|
||||
w = int64(PtrSize)
|
||||
CheckSize(t.Elem())
|
||||
w = int64(Widthptr)
|
||||
checkwidth(t.Elem())
|
||||
|
||||
case TUNSAFEPTR:
|
||||
w = int64(PtrSize)
|
||||
w = int64(Widthptr)
|
||||
|
||||
case TINTER: // implemented as 2 pointers
|
||||
w = 2 * int64(PtrSize)
|
||||
t.Align = uint8(PtrSize)
|
||||
w = 2 * int64(Widthptr)
|
||||
t.Align = uint8(Widthptr)
|
||||
expandiface(t)
|
||||
|
||||
case TCHAN: // implemented as pointer
|
||||
w = int64(PtrSize)
|
||||
w = int64(Widthptr)
|
||||
|
||||
CheckSize(t.Elem())
|
||||
checkwidth(t.Elem())
|
||||
|
||||
// make fake type to check later to
|
||||
// trigger channel argument check.
|
||||
t1 := NewChanArgs(t)
|
||||
CheckSize(t1)
|
||||
t1 := types.NewChanArgs(t)
|
||||
checkwidth(t1)
|
||||
|
||||
case TCHANARGS:
|
||||
t1 := t.ChanArgs()
|
||||
CalcSize(t1) // just in case
|
||||
dowidth(t1) // just in case
|
||||
if t1.Elem().Width >= 1<<16 {
|
||||
base.ErrorfAt(typePos(t1), "channel element type too large (>64kB)")
|
||||
yyerrorl(typePos(t1), "channel element type too large (>64kB)")
|
||||
}
|
||||
w = 1 // anything will do
|
||||
|
||||
case TMAP: // implemented as pointer
|
||||
w = int64(PtrSize)
|
||||
CheckSize(t.Elem())
|
||||
CheckSize(t.Key())
|
||||
w = int64(Widthptr)
|
||||
checkwidth(t.Elem())
|
||||
checkwidth(t.Key())
|
||||
|
||||
case TFORW: // should have been filled in
|
||||
reportTypeLoop(t)
|
||||
w = 1 // anything will do
|
||||
|
||||
case TANY:
|
||||
// not a real type; should be replaced before use.
|
||||
base.Fatalf("CalcSize any")
|
||||
// dummy type; should be replaced before use.
|
||||
Fatalf("dowidth any")
|
||||
|
||||
case TSTRING:
|
||||
if StringSize == 0 {
|
||||
base.Fatalf("early CalcSize string")
|
||||
if sizeofString == 0 {
|
||||
Fatalf("early dowidth string")
|
||||
}
|
||||
w = StringSize
|
||||
t.Align = uint8(PtrSize)
|
||||
w = sizeofString
|
||||
t.Align = uint8(Widthptr)
|
||||
|
||||
case TARRAY:
|
||||
if t.Elem() == nil {
|
||||
break
|
||||
}
|
||||
|
||||
CalcSize(t.Elem())
|
||||
dowidth(t.Elem())
|
||||
if t.Elem().Width != 0 {
|
||||
cap := (uint64(MaxWidth) - 1) / uint64(t.Elem().Width)
|
||||
cap := (uint64(thearch.MAXWIDTH) - 1) / uint64(t.Elem().Width)
|
||||
if uint64(t.NumElem()) > cap {
|
||||
base.ErrorfAt(typePos(t), "type %L larger than address space", t)
|
||||
yyerrorl(typePos(t), "type %L larger than address space", t)
|
||||
}
|
||||
}
|
||||
w = t.NumElem() * t.Elem().Width
|
||||
@@ -470,62 +424,55 @@ func CalcSize(t *Type) {
|
||||
if t.Elem() == nil {
|
||||
break
|
||||
}
|
||||
w = SliceSize
|
||||
CheckSize(t.Elem())
|
||||
t.Align = uint8(PtrSize)
|
||||
w = sizeofSlice
|
||||
checkwidth(t.Elem())
|
||||
t.Align = uint8(Widthptr)
|
||||
|
||||
case TSTRUCT:
|
||||
if t.IsFuncArgStruct() {
|
||||
base.Fatalf("CalcSize fn struct %v", t)
|
||||
Fatalf("dowidth fn struct %v", t)
|
||||
}
|
||||
w = calcStructOffset(t, t, 0, 1)
|
||||
w = widstruct(t, t, 0, 1)
|
||||
|
||||
// make fake type to check later to
|
||||
// trigger function argument computation.
|
||||
case TFUNC:
|
||||
t1 := NewFuncArgs(t)
|
||||
CheckSize(t1)
|
||||
w = int64(PtrSize) // width of func type is pointer
|
||||
t1 := types.NewFuncArgs(t)
|
||||
checkwidth(t1)
|
||||
w = int64(Widthptr) // width of func type is pointer
|
||||
|
||||
// function is 3 cated structures;
|
||||
// compute their widths as side-effect.
|
||||
case TFUNCARGS:
|
||||
t1 := t.FuncArgs()
|
||||
w = calcStructOffset(t1, t1.Recvs(), 0, 0)
|
||||
w = calcStructOffset(t1, t1.Params(), w, RegSize)
|
||||
w = calcStructOffset(t1, t1.Results(), w, RegSize)
|
||||
t1.Extra.(*Func).Argwid = w
|
||||
if w%int64(RegSize) != 0 {
|
||||
base.Warn("bad type %v %d\n", t1, w)
|
||||
w = widstruct(t1, t1.Recvs(), 0, 0)
|
||||
w = widstruct(t1, t1.Params(), w, Widthreg)
|
||||
w = widstruct(t1, t1.Results(), w, Widthreg)
|
||||
t1.Extra.(*types.Func).Argwid = w
|
||||
if w%int64(Widthreg) != 0 {
|
||||
Warn("bad type %v %d\n", t1, w)
|
||||
}
|
||||
t.Align = 1
|
||||
}
|
||||
|
||||
if PtrSize == 4 && w != int64(int32(w)) {
|
||||
base.ErrorfAt(typePos(t), "type %v too large", t)
|
||||
if Widthptr == 4 && w != int64(int32(w)) {
|
||||
yyerrorl(typePos(t), "type %v too large", t)
|
||||
}
|
||||
|
||||
t.Width = w
|
||||
if t.Align == 0 {
|
||||
if w == 0 || w > 8 || w&(w-1) != 0 {
|
||||
base.Fatalf("invalid alignment for %v", t)
|
||||
Fatalf("invalid alignment for %v", t)
|
||||
}
|
||||
t.Align = uint8(w)
|
||||
}
|
||||
|
||||
base.Pos = lno
|
||||
lineno = lno
|
||||
|
||||
ResumeCheckSize()
|
||||
resumecheckwidth()
|
||||
}
|
||||
|
||||
// CalcStructSize calculates the size of s,
|
||||
// filling in s.Width and s.Align,
|
||||
// even if size calculation is otherwise disabled.
|
||||
func CalcStructSize(s *Type) {
|
||||
s.Width = calcStructOffset(s, s, 0, 1) // sets align
|
||||
}
|
||||
|
||||
// when a type's width should be known, we call CheckSize
|
||||
// when a type's width should be known, we call checkwidth
|
||||
// to compute it. during a declaration like
|
||||
//
|
||||
// type T *struct { next T }
|
||||
@@ -534,16 +481,16 @@ func CalcStructSize(s *Type) {
|
||||
// until after T has been initialized to be a pointer to that struct.
|
||||
// similarly, during import processing structs may be used
|
||||
// before their definition. in those situations, calling
|
||||
// DeferCheckSize() stops width calculations until
|
||||
// ResumeCheckSize() is called, at which point all the
|
||||
// CalcSizes that were deferred are executed.
|
||||
// CalcSize should only be called when the type's size
|
||||
// is needed immediately. CheckSize makes sure the
|
||||
// defercheckwidth() stops width calculations until
|
||||
// resumecheckwidth() is called, at which point all the
|
||||
// checkwidths that were deferred are executed.
|
||||
// dowidth should only be called when the type's size
|
||||
// is needed immediately. checkwidth makes sure the
|
||||
// size is evaluated eventually.
|
||||
|
||||
var deferredTypeStack []*Type
|
||||
var deferredTypeStack []*types.Type
|
||||
|
||||
func CheckSize(t *Type) {
|
||||
func checkwidth(t *types.Type) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
@@ -551,11 +498,11 @@ func CheckSize(t *Type) {
|
||||
// function arg structs should not be checked
|
||||
// outside of the enclosing function.
|
||||
if t.IsFuncArgStruct() {
|
||||
base.Fatalf("CheckSize %v", t)
|
||||
Fatalf("checkwidth %v", t)
|
||||
}
|
||||
|
||||
if defercalc == 0 {
|
||||
CalcSize(t)
|
||||
dowidth(t)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -566,68 +513,19 @@ func CheckSize(t *Type) {
|
||||
}
|
||||
}
|
||||
|
||||
func DeferCheckSize() {
|
||||
func defercheckwidth() {
|
||||
defercalc++
|
||||
}
|
||||
|
||||
func ResumeCheckSize() {
|
||||
func resumecheckwidth() {
|
||||
if defercalc == 1 {
|
||||
for len(deferredTypeStack) > 0 {
|
||||
t := deferredTypeStack[len(deferredTypeStack)-1]
|
||||
deferredTypeStack = deferredTypeStack[:len(deferredTypeStack)-1]
|
||||
t.SetDeferwidth(false)
|
||||
CalcSize(t)
|
||||
dowidth(t)
|
||||
}
|
||||
}
|
||||
|
||||
defercalc--
|
||||
}
|
||||
|
||||
// PtrDataSize returns the length in bytes of the prefix of t
|
||||
// containing pointer data. Anything after this offset is scalar data.
|
||||
func PtrDataSize(t *Type) int64 {
|
||||
if !t.HasPointers() {
|
||||
return 0
|
||||
}
|
||||
|
||||
switch t.Kind() {
|
||||
case TPTR,
|
||||
TUNSAFEPTR,
|
||||
TFUNC,
|
||||
TCHAN,
|
||||
TMAP:
|
||||
return int64(PtrSize)
|
||||
|
||||
case TSTRING:
|
||||
// struct { byte *str; intgo len; }
|
||||
return int64(PtrSize)
|
||||
|
||||
case TINTER:
|
||||
// struct { Itab *tab; void *data; } or
|
||||
// struct { Type *type; void *data; }
|
||||
// Note: see comment in typebits.Set
|
||||
return 2 * int64(PtrSize)
|
||||
|
||||
case TSLICE:
|
||||
// struct { byte *array; uintgo len; uintgo cap; }
|
||||
return int64(PtrSize)
|
||||
|
||||
case TARRAY:
|
||||
// haspointers already eliminated t.NumElem() == 0.
|
||||
return (t.NumElem()-1)*t.Elem().Width + PtrDataSize(t.Elem())
|
||||
|
||||
case TSTRUCT:
|
||||
// Find the last field that has pointers.
|
||||
var lastPtrField *Field
|
||||
for _, t1 := range t.Fields().Slice() {
|
||||
if t1.Type.HasPointers() {
|
||||
lastPtrField = t1
|
||||
}
|
||||
}
|
||||
return lastPtrField.Offset + PtrDataSize(lastPtrField.Type)
|
||||
|
||||
default:
|
||||
base.Fatalf("PtrDataSize: unexpected type, %v", t)
|
||||
return 0
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package test
|
||||
package gc
|
||||
|
||||
import "testing"
|
||||
|
||||
177
src/cmd/compile/internal/gc/bexport.go
Normal file
177
src/cmd/compile/internal/gc/bexport.go
Normal file
@@ -0,0 +1,177 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gc
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/types"
|
||||
)
|
||||
|
||||
type exporter struct {
|
||||
marked map[*types.Type]bool // types already seen by markType
|
||||
}
|
||||
|
||||
// markType recursively visits types reachable from t to identify
|
||||
// functions whose inline bodies may be needed.
|
||||
func (p *exporter) markType(t *types.Type) {
|
||||
if p.marked[t] {
|
||||
return
|
||||
}
|
||||
p.marked[t] = true
|
||||
|
||||
// If this is a named type, mark all of its associated
|
||||
// methods. Skip interface types because t.Methods contains
|
||||
// only their unexpanded method set (i.e., exclusive of
|
||||
// interface embeddings), and the switch statement below
|
||||
// handles their full method set.
|
||||
if t.Sym != nil && t.Etype != TINTER {
|
||||
for _, m := range t.Methods().Slice() {
|
||||
if types.IsExported(m.Sym.Name) {
|
||||
p.markType(m.Type)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Recursively mark any types that can be produced given a
|
||||
// value of type t: dereferencing a pointer; indexing or
|
||||
// iterating over an array, slice, or map; receiving from a
|
||||
// channel; accessing a struct field or interface method; or
|
||||
// calling a function.
|
||||
//
|
||||
// Notably, we don't mark function parameter types, because
|
||||
// the user already needs some way to construct values of
|
||||
// those types.
|
||||
switch t.Etype {
|
||||
case TPTR, TARRAY, TSLICE:
|
||||
p.markType(t.Elem())
|
||||
|
||||
case TCHAN:
|
||||
if t.ChanDir().CanRecv() {
|
||||
p.markType(t.Elem())
|
||||
}
|
||||
|
||||
case TMAP:
|
||||
p.markType(t.Key())
|
||||
p.markType(t.Elem())
|
||||
|
||||
case TSTRUCT:
|
||||
for _, f := range t.FieldSlice() {
|
||||
if types.IsExported(f.Sym.Name) || f.Embedded != 0 {
|
||||
p.markType(f.Type)
|
||||
}
|
||||
}
|
||||
|
||||
case TFUNC:
|
||||
// If t is the type of a function or method, then
|
||||
// t.Nname() is its ONAME. Mark its inline body and
|
||||
// any recursively called functions for export.
|
||||
inlFlood(asNode(t.Nname()))
|
||||
|
||||
for _, f := range t.Results().FieldSlice() {
|
||||
p.markType(f.Type)
|
||||
}
|
||||
|
||||
case TINTER:
|
||||
for _, f := range t.FieldSlice() {
|
||||
if types.IsExported(f.Sym.Name) {
|
||||
p.markType(f.Type)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Export format
|
||||
|
||||
// Tags. Must be < 0.
|
||||
const (
|
||||
// Objects
|
||||
packageTag = -(iota + 1)
|
||||
constTag
|
||||
typeTag
|
||||
varTag
|
||||
funcTag
|
||||
endTag
|
||||
|
||||
// Types
|
||||
namedTag
|
||||
arrayTag
|
||||
sliceTag
|
||||
dddTag
|
||||
structTag
|
||||
pointerTag
|
||||
signatureTag
|
||||
interfaceTag
|
||||
mapTag
|
||||
chanTag
|
||||
|
||||
// Values
|
||||
falseTag
|
||||
trueTag
|
||||
int64Tag
|
||||
floatTag
|
||||
fractionTag // not used by gc
|
||||
complexTag
|
||||
stringTag
|
||||
nilTag
|
||||
unknownTag // not used by gc (only appears in packages with errors)
|
||||
|
||||
// Type aliases
|
||||
aliasTag
|
||||
)
|
||||
|
||||
var predecl []*types.Type // initialized lazily
|
||||
|
||||
func predeclared() []*types.Type {
|
||||
if predecl == nil {
|
||||
// initialize lazily to be sure that all
|
||||
// elements have been initialized before
|
||||
predecl = []*types.Type{
|
||||
// basic types
|
||||
types.Types[TBOOL],
|
||||
types.Types[TINT],
|
||||
types.Types[TINT8],
|
||||
types.Types[TINT16],
|
||||
types.Types[TINT32],
|
||||
types.Types[TINT64],
|
||||
types.Types[TUINT],
|
||||
types.Types[TUINT8],
|
||||
types.Types[TUINT16],
|
||||
types.Types[TUINT32],
|
||||
types.Types[TUINT64],
|
||||
types.Types[TUINTPTR],
|
||||
types.Types[TFLOAT32],
|
||||
types.Types[TFLOAT64],
|
||||
types.Types[TCOMPLEX64],
|
||||
types.Types[TCOMPLEX128],
|
||||
types.Types[TSTRING],
|
||||
|
||||
// basic type aliases
|
||||
types.Bytetype,
|
||||
types.Runetype,
|
||||
|
||||
// error
|
||||
types.Errortype,
|
||||
|
||||
// untyped types
|
||||
types.UntypedBool,
|
||||
types.UntypedInt,
|
||||
types.UntypedRune,
|
||||
types.UntypedFloat,
|
||||
types.UntypedComplex,
|
||||
types.UntypedString,
|
||||
types.Types[TNIL],
|
||||
|
||||
// package unsafe
|
||||
types.Types[TUNSAFEPTR],
|
||||
|
||||
// invalid type (package contains errors)
|
||||
types.Types[Txxx],
|
||||
|
||||
// any type, for builtin export data
|
||||
types.Types[TANY],
|
||||
}
|
||||
}
|
||||
return predecl
|
||||
}
|
||||
24
src/cmd/compile/internal/gc/bimport.go
Normal file
24
src/cmd/compile/internal/gc/bimport.go
Normal file
@@ -0,0 +1,24 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gc
|
||||
|
||||
import (
|
||||
"cmd/internal/src"
|
||||
)
|
||||
|
||||
// numImport tracks how often a package with a given name is imported.
|
||||
// It is used to provide a better error message (by using the package
|
||||
// path to disambiguate) if a package that appears multiple times with
|
||||
// the same name appears in an error message.
|
||||
var numImport = make(map[string]int)
|
||||
|
||||
func npos(pos src.XPos, n *Node) *Node {
|
||||
n.Pos = pos
|
||||
return n
|
||||
}
|
||||
|
||||
func builtinCall(op Op) *Node {
|
||||
return nod(OCALL, mkname(builtinpkg.Lookup(goopnames[op])), nil)
|
||||
}
|
||||
@@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ir
|
||||
package gc
|
||||
|
||||
type bitset8 uint8
|
||||
|
||||
@@ -14,18 +14,6 @@ func (f *bitset8) set(mask uint8, b bool) {
|
||||
}
|
||||
}
|
||||
|
||||
func (f bitset8) get2(shift uint8) uint8 {
|
||||
return uint8(f>>shift) & 3
|
||||
}
|
||||
|
||||
// set2 sets two bits in f using the bottom two bits of b.
|
||||
func (f *bitset8) set2(shift uint8, b uint8) {
|
||||
// Clear old bits.
|
||||
*(*uint8)(f) &^= 3 << shift
|
||||
// Set new bits.
|
||||
*(*uint8)(f) |= uint8(b&3) << shift
|
||||
}
|
||||
|
||||
type bitset16 uint16
|
||||
|
||||
func (f *bitset16) set(mask uint16, b bool) {
|
||||
@@ -6,11 +6,8 @@
|
||||
|
||||
package gc
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/base"
|
||||
"runtime"
|
||||
)
|
||||
import "runtime"
|
||||
|
||||
func startMutexProfiling() {
|
||||
base.Fatalf("mutex profiling unavailable in version %v", runtime.Version())
|
||||
Fatalf("mutex profiling unavailable in version %v", runtime.Version())
|
||||
}
|
||||
|
||||
340
src/cmd/compile/internal/gc/builtin.go
Normal file
340
src/cmd/compile/internal/gc/builtin.go
Normal file
@@ -0,0 +1,340 @@
|
||||
// Code generated by mkbuiltin.go. DO NOT EDIT.
|
||||
|
||||
package gc
|
||||
|
||||
import "cmd/compile/internal/types"
|
||||
|
||||
var runtimeDecls = [...]struct {
|
||||
name string
|
||||
tag int
|
||||
typ int
|
||||
}{
|
||||
{"newobject", funcTag, 4},
|
||||
{"mallocgc", funcTag, 8},
|
||||
{"panicdivide", funcTag, 9},
|
||||
{"panicshift", funcTag, 9},
|
||||
{"panicmakeslicelen", funcTag, 9},
|
||||
{"panicmakeslicecap", funcTag, 9},
|
||||
{"throwinit", funcTag, 9},
|
||||
{"panicwrap", funcTag, 9},
|
||||
{"gopanic", funcTag, 11},
|
||||
{"gorecover", funcTag, 14},
|
||||
{"goschedguarded", funcTag, 9},
|
||||
{"goPanicIndex", funcTag, 16},
|
||||
{"goPanicIndexU", funcTag, 18},
|
||||
{"goPanicSliceAlen", funcTag, 16},
|
||||
{"goPanicSliceAlenU", funcTag, 18},
|
||||
{"goPanicSliceAcap", funcTag, 16},
|
||||
{"goPanicSliceAcapU", funcTag, 18},
|
||||
{"goPanicSliceB", funcTag, 16},
|
||||
{"goPanicSliceBU", funcTag, 18},
|
||||
{"goPanicSlice3Alen", funcTag, 16},
|
||||
{"goPanicSlice3AlenU", funcTag, 18},
|
||||
{"goPanicSlice3Acap", funcTag, 16},
|
||||
{"goPanicSlice3AcapU", funcTag, 18},
|
||||
{"goPanicSlice3B", funcTag, 16},
|
||||
{"goPanicSlice3BU", funcTag, 18},
|
||||
{"goPanicSlice3C", funcTag, 16},
|
||||
{"goPanicSlice3CU", funcTag, 18},
|
||||
{"printbool", funcTag, 19},
|
||||
{"printfloat", funcTag, 21},
|
||||
{"printint", funcTag, 23},
|
||||
{"printhex", funcTag, 25},
|
||||
{"printuint", funcTag, 25},
|
||||
{"printcomplex", funcTag, 27},
|
||||
{"printstring", funcTag, 29},
|
||||
{"printpointer", funcTag, 30},
|
||||
{"printuintptr", funcTag, 31},
|
||||
{"printiface", funcTag, 30},
|
||||
{"printeface", funcTag, 30},
|
||||
{"printslice", funcTag, 30},
|
||||
{"printnl", funcTag, 9},
|
||||
{"printsp", funcTag, 9},
|
||||
{"printlock", funcTag, 9},
|
||||
{"printunlock", funcTag, 9},
|
||||
{"concatstring2", funcTag, 34},
|
||||
{"concatstring3", funcTag, 35},
|
||||
{"concatstring4", funcTag, 36},
|
||||
{"concatstring5", funcTag, 37},
|
||||
{"concatstrings", funcTag, 39},
|
||||
{"cmpstring", funcTag, 40},
|
||||
{"intstring", funcTag, 43},
|
||||
{"slicebytetostring", funcTag, 44},
|
||||
{"slicebytetostringtmp", funcTag, 45},
|
||||
{"slicerunetostring", funcTag, 48},
|
||||
{"stringtoslicebyte", funcTag, 50},
|
||||
{"stringtoslicerune", funcTag, 53},
|
||||
{"slicecopy", funcTag, 54},
|
||||
{"decoderune", funcTag, 55},
|
||||
{"countrunes", funcTag, 56},
|
||||
{"convI2I", funcTag, 57},
|
||||
{"convT16", funcTag, 58},
|
||||
{"convT32", funcTag, 58},
|
||||
{"convT64", funcTag, 58},
|
||||
{"convTstring", funcTag, 58},
|
||||
{"convTslice", funcTag, 58},
|
||||
{"convT2E", funcTag, 59},
|
||||
{"convT2Enoptr", funcTag, 59},
|
||||
{"convT2I", funcTag, 59},
|
||||
{"convT2Inoptr", funcTag, 59},
|
||||
{"assertE2I", funcTag, 57},
|
||||
{"assertE2I2", funcTag, 60},
|
||||
{"assertI2I", funcTag, 57},
|
||||
{"assertI2I2", funcTag, 60},
|
||||
{"panicdottypeE", funcTag, 61},
|
||||
{"panicdottypeI", funcTag, 61},
|
||||
{"panicnildottype", funcTag, 62},
|
||||
{"ifaceeq", funcTag, 64},
|
||||
{"efaceeq", funcTag, 64},
|
||||
{"fastrand", funcTag, 66},
|
||||
{"makemap64", funcTag, 68},
|
||||
{"makemap", funcTag, 69},
|
||||
{"makemap_small", funcTag, 70},
|
||||
{"mapaccess1", funcTag, 71},
|
||||
{"mapaccess1_fast32", funcTag, 72},
|
||||
{"mapaccess1_fast64", funcTag, 72},
|
||||
{"mapaccess1_faststr", funcTag, 72},
|
||||
{"mapaccess1_fat", funcTag, 73},
|
||||
{"mapaccess2", funcTag, 74},
|
||||
{"mapaccess2_fast32", funcTag, 75},
|
||||
{"mapaccess2_fast64", funcTag, 75},
|
||||
{"mapaccess2_faststr", funcTag, 75},
|
||||
{"mapaccess2_fat", funcTag, 76},
|
||||
{"mapassign", funcTag, 71},
|
||||
{"mapassign_fast32", funcTag, 72},
|
||||
{"mapassign_fast32ptr", funcTag, 72},
|
||||
{"mapassign_fast64", funcTag, 72},
|
||||
{"mapassign_fast64ptr", funcTag, 72},
|
||||
{"mapassign_faststr", funcTag, 72},
|
||||
{"mapiterinit", funcTag, 77},
|
||||
{"mapdelete", funcTag, 77},
|
||||
{"mapdelete_fast32", funcTag, 78},
|
||||
{"mapdelete_fast64", funcTag, 78},
|
||||
{"mapdelete_faststr", funcTag, 78},
|
||||
{"mapiternext", funcTag, 79},
|
||||
{"mapclear", funcTag, 80},
|
||||
{"makechan64", funcTag, 82},
|
||||
{"makechan", funcTag, 83},
|
||||
{"chanrecv1", funcTag, 85},
|
||||
{"chanrecv2", funcTag, 86},
|
||||
{"chansend1", funcTag, 88},
|
||||
{"closechan", funcTag, 30},
|
||||
{"writeBarrier", varTag, 90},
|
||||
{"typedmemmove", funcTag, 91},
|
||||
{"typedmemclr", funcTag, 92},
|
||||
{"typedslicecopy", funcTag, 93},
|
||||
{"selectnbsend", funcTag, 94},
|
||||
{"selectnbrecv", funcTag, 95},
|
||||
{"selectnbrecv2", funcTag, 97},
|
||||
{"selectsetpc", funcTag, 98},
|
||||
{"selectgo", funcTag, 99},
|
||||
{"block", funcTag, 9},
|
||||
{"makeslice", funcTag, 100},
|
||||
{"makeslice64", funcTag, 101},
|
||||
{"makeslicecopy", funcTag, 102},
|
||||
{"growslice", funcTag, 104},
|
||||
{"memmove", funcTag, 105},
|
||||
{"memclrNoHeapPointers", funcTag, 106},
|
||||
{"memclrHasPointers", funcTag, 106},
|
||||
{"memequal", funcTag, 107},
|
||||
{"memequal0", funcTag, 108},
|
||||
{"memequal8", funcTag, 108},
|
||||
{"memequal16", funcTag, 108},
|
||||
{"memequal32", funcTag, 108},
|
||||
{"memequal64", funcTag, 108},
|
||||
{"memequal128", funcTag, 108},
|
||||
{"f32equal", funcTag, 109},
|
||||
{"f64equal", funcTag, 109},
|
||||
{"c64equal", funcTag, 109},
|
||||
{"c128equal", funcTag, 109},
|
||||
{"strequal", funcTag, 109},
|
||||
{"interequal", funcTag, 109},
|
||||
{"nilinterequal", funcTag, 109},
|
||||
{"memhash", funcTag, 110},
|
||||
{"memhash0", funcTag, 111},
|
||||
{"memhash8", funcTag, 111},
|
||||
{"memhash16", funcTag, 111},
|
||||
{"memhash32", funcTag, 111},
|
||||
{"memhash64", funcTag, 111},
|
||||
{"memhash128", funcTag, 111},
|
||||
{"f32hash", funcTag, 111},
|
||||
{"f64hash", funcTag, 111},
|
||||
{"c64hash", funcTag, 111},
|
||||
{"c128hash", funcTag, 111},
|
||||
{"strhash", funcTag, 111},
|
||||
{"interhash", funcTag, 111},
|
||||
{"nilinterhash", funcTag, 111},
|
||||
{"int64div", funcTag, 112},
|
||||
{"uint64div", funcTag, 113},
|
||||
{"int64mod", funcTag, 112},
|
||||
{"uint64mod", funcTag, 113},
|
||||
{"float64toint64", funcTag, 114},
|
||||
{"float64touint64", funcTag, 115},
|
||||
{"float64touint32", funcTag, 116},
|
||||
{"int64tofloat64", funcTag, 117},
|
||||
{"uint64tofloat64", funcTag, 118},
|
||||
{"uint32tofloat64", funcTag, 119},
|
||||
{"complex128div", funcTag, 120},
|
||||
{"racefuncenter", funcTag, 31},
|
||||
{"racefuncenterfp", funcTag, 9},
|
||||
{"racefuncexit", funcTag, 9},
|
||||
{"raceread", funcTag, 31},
|
||||
{"racewrite", funcTag, 31},
|
||||
{"racereadrange", funcTag, 121},
|
||||
{"racewriterange", funcTag, 121},
|
||||
{"msanread", funcTag, 121},
|
||||
{"msanwrite", funcTag, 121},
|
||||
{"msanmove", funcTag, 122},
|
||||
{"checkptrAlignment", funcTag, 123},
|
||||
{"checkptrArithmetic", funcTag, 125},
|
||||
{"libfuzzerTraceCmp1", funcTag, 127},
|
||||
{"libfuzzerTraceCmp2", funcTag, 129},
|
||||
{"libfuzzerTraceCmp4", funcTag, 130},
|
||||
{"libfuzzerTraceCmp8", funcTag, 131},
|
||||
{"libfuzzerTraceConstCmp1", funcTag, 127},
|
||||
{"libfuzzerTraceConstCmp2", funcTag, 129},
|
||||
{"libfuzzerTraceConstCmp4", funcTag, 130},
|
||||
{"libfuzzerTraceConstCmp8", funcTag, 131},
|
||||
{"x86HasPOPCNT", varTag, 6},
|
||||
{"x86HasSSE41", varTag, 6},
|
||||
{"x86HasFMA", varTag, 6},
|
||||
{"armHasVFPv4", varTag, 6},
|
||||
{"arm64HasATOMICS", varTag, 6},
|
||||
}
|
||||
|
||||
func runtimeTypes() []*types.Type {
|
||||
var typs [132]*types.Type
|
||||
typs[0] = types.Bytetype
|
||||
typs[1] = types.NewPtr(typs[0])
|
||||
typs[2] = types.Types[TANY]
|
||||
typs[3] = types.NewPtr(typs[2])
|
||||
typs[4] = functype(nil, []*Node{anonfield(typs[1])}, []*Node{anonfield(typs[3])})
|
||||
typs[5] = types.Types[TUINTPTR]
|
||||
typs[6] = types.Types[TBOOL]
|
||||
typs[7] = types.Types[TUNSAFEPTR]
|
||||
typs[8] = functype(nil, []*Node{anonfield(typs[5]), anonfield(typs[1]), anonfield(typs[6])}, []*Node{anonfield(typs[7])})
|
||||
typs[9] = functype(nil, nil, nil)
|
||||
typs[10] = types.Types[TINTER]
|
||||
typs[11] = functype(nil, []*Node{anonfield(typs[10])}, nil)
|
||||
typs[12] = types.Types[TINT32]
|
||||
typs[13] = types.NewPtr(typs[12])
|
||||
typs[14] = functype(nil, []*Node{anonfield(typs[13])}, []*Node{anonfield(typs[10])})
|
||||
typs[15] = types.Types[TINT]
|
||||
typs[16] = functype(nil, []*Node{anonfield(typs[15]), anonfield(typs[15])}, nil)
|
||||
typs[17] = types.Types[TUINT]
|
||||
typs[18] = functype(nil, []*Node{anonfield(typs[17]), anonfield(typs[15])}, nil)
|
||||
typs[19] = functype(nil, []*Node{anonfield(typs[6])}, nil)
|
||||
typs[20] = types.Types[TFLOAT64]
|
||||
typs[21] = functype(nil, []*Node{anonfield(typs[20])}, nil)
|
||||
typs[22] = types.Types[TINT64]
|
||||
typs[23] = functype(nil, []*Node{anonfield(typs[22])}, nil)
|
||||
typs[24] = types.Types[TUINT64]
|
||||
typs[25] = functype(nil, []*Node{anonfield(typs[24])}, nil)
|
||||
typs[26] = types.Types[TCOMPLEX128]
|
||||
typs[27] = functype(nil, []*Node{anonfield(typs[26])}, nil)
|
||||
typs[28] = types.Types[TSTRING]
|
||||
typs[29] = functype(nil, []*Node{anonfield(typs[28])}, nil)
|
||||
typs[30] = functype(nil, []*Node{anonfield(typs[2])}, nil)
|
||||
typs[31] = functype(nil, []*Node{anonfield(typs[5])}, nil)
|
||||
typs[32] = types.NewArray(typs[0], 32)
|
||||
typs[33] = types.NewPtr(typs[32])
|
||||
typs[34] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
|
||||
typs[35] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
|
||||
typs[36] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
|
||||
typs[37] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
|
||||
typs[38] = types.NewSlice(typs[28])
|
||||
typs[39] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[38])}, []*Node{anonfield(typs[28])})
|
||||
typs[40] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[15])})
|
||||
typs[41] = types.NewArray(typs[0], 4)
|
||||
typs[42] = types.NewPtr(typs[41])
|
||||
typs[43] = functype(nil, []*Node{anonfield(typs[42]), anonfield(typs[22])}, []*Node{anonfield(typs[28])})
|
||||
typs[44] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
|
||||
typs[45] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
|
||||
typs[46] = types.Runetype
|
||||
typs[47] = types.NewSlice(typs[46])
|
||||
typs[48] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[47])}, []*Node{anonfield(typs[28])})
|
||||
typs[49] = types.NewSlice(typs[0])
|
||||
typs[50] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28])}, []*Node{anonfield(typs[49])})
|
||||
typs[51] = types.NewArray(typs[46], 32)
|
||||
typs[52] = types.NewPtr(typs[51])
|
||||
typs[53] = functype(nil, []*Node{anonfield(typs[52]), anonfield(typs[28])}, []*Node{anonfield(typs[47])})
|
||||
typs[54] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []*Node{anonfield(typs[15])})
|
||||
typs[55] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[15])}, []*Node{anonfield(typs[46]), anonfield(typs[15])})
|
||||
typs[56] = functype(nil, []*Node{anonfield(typs[28])}, []*Node{anonfield(typs[15])})
|
||||
typs[57] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2])})
|
||||
typs[58] = functype(nil, []*Node{anonfield(typs[2])}, []*Node{anonfield(typs[7])})
|
||||
typs[59] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, []*Node{anonfield(typs[2])})
|
||||
typs[60] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2]), anonfield(typs[6])})
|
||||
typs[61] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil)
|
||||
typs[62] = functype(nil, []*Node{anonfield(typs[1])}, nil)
|
||||
typs[63] = types.NewPtr(typs[5])
|
||||
typs[64] = functype(nil, []*Node{anonfield(typs[63]), anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
|
||||
typs[65] = types.Types[TUINT32]
|
||||
typs[66] = functype(nil, nil, []*Node{anonfield(typs[65])})
|
||||
typs[67] = types.NewMap(typs[2], typs[2])
|
||||
typs[68] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []*Node{anonfield(typs[67])})
|
||||
typs[69] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []*Node{anonfield(typs[67])})
|
||||
typs[70] = functype(nil, nil, []*Node{anonfield(typs[67])})
|
||||
typs[71] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*Node{anonfield(typs[3])})
|
||||
typs[72] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*Node{anonfield(typs[3])})
|
||||
typs[73] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3])})
|
||||
typs[74] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
|
||||
typs[75] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
|
||||
typs[76] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
|
||||
typs[77] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, nil)
|
||||
typs[78] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, nil)
|
||||
typs[79] = functype(nil, []*Node{anonfield(typs[3])}, nil)
|
||||
typs[80] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67])}, nil)
|
||||
typs[81] = types.NewChan(typs[2], types.Cboth)
|
||||
typs[82] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22])}, []*Node{anonfield(typs[81])})
|
||||
typs[83] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[81])})
|
||||
typs[84] = types.NewChan(typs[2], types.Crecv)
|
||||
typs[85] = functype(nil, []*Node{anonfield(typs[84]), anonfield(typs[3])}, nil)
|
||||
typs[86] = functype(nil, []*Node{anonfield(typs[84]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
|
||||
typs[87] = types.NewChan(typs[2], types.Csend)
|
||||
typs[88] = functype(nil, []*Node{anonfield(typs[87]), anonfield(typs[3])}, nil)
|
||||
typs[89] = types.NewArray(typs[0], 3)
|
||||
typs[90] = tostruct([]*Node{namedfield("enabled", typs[6]), namedfield("pad", typs[89]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])})
|
||||
typs[91] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
|
||||
typs[92] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, nil)
|
||||
typs[93] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []*Node{anonfield(typs[15])})
|
||||
typs[94] = functype(nil, []*Node{anonfield(typs[87]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
|
||||
typs[95] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[84])}, []*Node{anonfield(typs[6])})
|
||||
typs[96] = types.NewPtr(typs[6])
|
||||
typs[97] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[96]), anonfield(typs[84])}, []*Node{anonfield(typs[6])})
|
||||
typs[98] = functype(nil, []*Node{anonfield(typs[63])}, nil)
|
||||
typs[99] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[63]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []*Node{anonfield(typs[15]), anonfield(typs[6])})
|
||||
typs[100] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*Node{anonfield(typs[7])})
|
||||
typs[101] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[7])})
|
||||
typs[102] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []*Node{anonfield(typs[7])})
|
||||
typs[103] = types.NewSlice(typs[2])
|
||||
typs[104] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[103]), anonfield(typs[15])}, []*Node{anonfield(typs[103])})
|
||||
typs[105] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil)
|
||||
typs[106] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, nil)
|
||||
typs[107] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []*Node{anonfield(typs[6])})
|
||||
typs[108] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
|
||||
typs[109] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
|
||||
typs[110] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
|
||||
typs[111] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
|
||||
typs[112] = functype(nil, []*Node{anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[22])})
|
||||
typs[113] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, []*Node{anonfield(typs[24])})
|
||||
typs[114] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[22])})
|
||||
typs[115] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[24])})
|
||||
typs[116] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[65])})
|
||||
typs[117] = functype(nil, []*Node{anonfield(typs[22])}, []*Node{anonfield(typs[20])})
|
||||
typs[118] = functype(nil, []*Node{anonfield(typs[24])}, []*Node{anonfield(typs[20])})
|
||||
typs[119] = functype(nil, []*Node{anonfield(typs[65])}, []*Node{anonfield(typs[20])})
|
||||
typs[120] = functype(nil, []*Node{anonfield(typs[26]), anonfield(typs[26])}, []*Node{anonfield(typs[26])})
|
||||
typs[121] = functype(nil, []*Node{anonfield(typs[5]), anonfield(typs[5])}, nil)
|
||||
typs[122] = functype(nil, []*Node{anonfield(typs[5]), anonfield(typs[5]), anonfield(typs[5])}, nil)
|
||||
typs[123] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[1]), anonfield(typs[5])}, nil)
|
||||
typs[124] = types.NewSlice(typs[7])
|
||||
typs[125] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[124])}, nil)
|
||||
typs[126] = types.Types[TUINT8]
|
||||
typs[127] = functype(nil, []*Node{anonfield(typs[126]), anonfield(typs[126])}, nil)
|
||||
typs[128] = types.Types[TUINT16]
|
||||
typs[129] = functype(nil, []*Node{anonfield(typs[128]), anonfield(typs[128])}, nil)
|
||||
typs[130] = functype(nil, []*Node{anonfield(typs[65]), anonfield(typs[65])}, nil)
|
||||
typs[131] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, nil)
|
||||
return typs[:]
|
||||
}
|
||||
@@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package typecheck
|
||||
package gc_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
278
src/cmd/compile/internal/gc/bv.go
Normal file
278
src/cmd/compile/internal/gc/bv.go
Normal file
@@ -0,0 +1,278 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gc
|
||||
|
||||
import (
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
const (
|
||||
wordBits = 32
|
||||
wordMask = wordBits - 1
|
||||
wordShift = 5
|
||||
)
|
||||
|
||||
// A bvec is a bit vector.
|
||||
type bvec struct {
|
||||
n int32 // number of bits in vector
|
||||
b []uint32 // words holding bits
|
||||
}
|
||||
|
||||
func bvalloc(n int32) bvec {
|
||||
nword := (n + wordBits - 1) / wordBits
|
||||
return bvec{n, make([]uint32, nword)}
|
||||
}
|
||||
|
||||
type bulkBvec struct {
|
||||
words []uint32
|
||||
nbit int32
|
||||
nword int32
|
||||
}
|
||||
|
||||
func bvbulkalloc(nbit int32, count int32) bulkBvec {
|
||||
nword := (nbit + wordBits - 1) / wordBits
|
||||
size := int64(nword) * int64(count)
|
||||
if int64(int32(size*4)) != size*4 {
|
||||
Fatalf("bvbulkalloc too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size)
|
||||
}
|
||||
return bulkBvec{
|
||||
words: make([]uint32, size),
|
||||
nbit: nbit,
|
||||
nword: nword,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bulkBvec) next() bvec {
|
||||
out := bvec{b.nbit, b.words[:b.nword]}
|
||||
b.words = b.words[b.nword:]
|
||||
return out
|
||||
}
|
||||
|
||||
func (bv1 bvec) Eq(bv2 bvec) bool {
|
||||
if bv1.n != bv2.n {
|
||||
Fatalf("bvequal: lengths %d and %d are not equal", bv1.n, bv2.n)
|
||||
}
|
||||
for i, x := range bv1.b {
|
||||
if x != bv2.b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (dst bvec) Copy(src bvec) {
|
||||
copy(dst.b, src.b)
|
||||
}
|
||||
|
||||
func (bv bvec) Get(i int32) bool {
|
||||
if i < 0 || i >= bv.n {
|
||||
Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.n)
|
||||
}
|
||||
mask := uint32(1 << uint(i%wordBits))
|
||||
return bv.b[i>>wordShift]&mask != 0
|
||||
}
|
||||
|
||||
func (bv bvec) Set(i int32) {
|
||||
if i < 0 || i >= bv.n {
|
||||
Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.n)
|
||||
}
|
||||
mask := uint32(1 << uint(i%wordBits))
|
||||
bv.b[i/wordBits] |= mask
|
||||
}
|
||||
|
||||
func (bv bvec) Unset(i int32) {
|
||||
if i < 0 || i >= bv.n {
|
||||
Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.n)
|
||||
}
|
||||
mask := uint32(1 << uint(i%wordBits))
|
||||
bv.b[i/wordBits] &^= mask
|
||||
}
|
||||
|
||||
// bvnext returns the smallest index >= i for which bvget(bv, i) == 1.
|
||||
// If there is no such index, bvnext returns -1.
|
||||
func (bv bvec) Next(i int32) int32 {
|
||||
if i >= bv.n {
|
||||
return -1
|
||||
}
|
||||
|
||||
// Jump i ahead to next word with bits.
|
||||
if bv.b[i>>wordShift]>>uint(i&wordMask) == 0 {
|
||||
i &^= wordMask
|
||||
i += wordBits
|
||||
for i < bv.n && bv.b[i>>wordShift] == 0 {
|
||||
i += wordBits
|
||||
}
|
||||
}
|
||||
|
||||
if i >= bv.n {
|
||||
return -1
|
||||
}
|
||||
|
||||
// Find 1 bit.
|
||||
w := bv.b[i>>wordShift] >> uint(i&wordMask)
|
||||
i += int32(bits.TrailingZeros32(w))
|
||||
|
||||
return i
|
||||
}
|
||||
|
||||
func (bv bvec) IsEmpty() bool {
|
||||
for _, x := range bv.b {
|
||||
if x != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (bv bvec) Not() {
|
||||
for i, x := range bv.b {
|
||||
bv.b[i] = ^x
|
||||
}
|
||||
}
|
||||
|
||||
// union
|
||||
func (dst bvec) Or(src1, src2 bvec) {
|
||||
if len(src1.b) == 0 {
|
||||
return
|
||||
}
|
||||
_, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop
|
||||
|
||||
for i, x := range src1.b {
|
||||
dst.b[i] = x | src2.b[i]
|
||||
}
|
||||
}
|
||||
|
||||
// intersection
|
||||
func (dst bvec) And(src1, src2 bvec) {
|
||||
if len(src1.b) == 0 {
|
||||
return
|
||||
}
|
||||
_, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop
|
||||
|
||||
for i, x := range src1.b {
|
||||
dst.b[i] = x & src2.b[i]
|
||||
}
|
||||
}
|
||||
|
||||
// difference
|
||||
func (dst bvec) AndNot(src1, src2 bvec) {
|
||||
if len(src1.b) == 0 {
|
||||
return
|
||||
}
|
||||
_, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop
|
||||
|
||||
for i, x := range src1.b {
|
||||
dst.b[i] = x &^ src2.b[i]
|
||||
}
|
||||
}
|
||||
|
||||
func (bv bvec) String() string {
|
||||
s := make([]byte, 2+bv.n)
|
||||
copy(s, "#*")
|
||||
for i := int32(0); i < bv.n; i++ {
|
||||
ch := byte('0')
|
||||
if bv.Get(i) {
|
||||
ch = '1'
|
||||
}
|
||||
s[2+i] = ch
|
||||
}
|
||||
return string(s)
|
||||
}
|
||||
|
||||
func (bv bvec) Clear() {
|
||||
for i := range bv.b {
|
||||
bv.b[i] = 0
|
||||
}
|
||||
}
|
||||
|
||||
// FNV-1 hash function constants.
|
||||
const (
|
||||
H0 = 2166136261
|
||||
Hp = 16777619
|
||||
)
|
||||
|
||||
func hashbitmap(h uint32, bv bvec) uint32 {
|
||||
n := int((bv.n + 31) / 32)
|
||||
for i := 0; i < n; i++ {
|
||||
w := bv.b[i]
|
||||
h = (h * Hp) ^ (w & 0xff)
|
||||
h = (h * Hp) ^ ((w >> 8) & 0xff)
|
||||
h = (h * Hp) ^ ((w >> 16) & 0xff)
|
||||
h = (h * Hp) ^ ((w >> 24) & 0xff)
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
// bvecSet is a set of bvecs, in initial insertion order.
|
||||
type bvecSet struct {
|
||||
index []int // hash -> uniq index. -1 indicates empty slot.
|
||||
uniq []bvec // unique bvecs, in insertion order
|
||||
}
|
||||
|
||||
func (m *bvecSet) grow() {
|
||||
// Allocate new index.
|
||||
n := len(m.index) * 2
|
||||
if n == 0 {
|
||||
n = 32
|
||||
}
|
||||
newIndex := make([]int, n)
|
||||
for i := range newIndex {
|
||||
newIndex[i] = -1
|
||||
}
|
||||
|
||||
// Rehash into newIndex.
|
||||
for i, bv := range m.uniq {
|
||||
h := hashbitmap(H0, bv) % uint32(len(newIndex))
|
||||
for {
|
||||
j := newIndex[h]
|
||||
if j < 0 {
|
||||
newIndex[h] = i
|
||||
break
|
||||
}
|
||||
h++
|
||||
if h == uint32(len(newIndex)) {
|
||||
h = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
m.index = newIndex
|
||||
}
|
||||
|
||||
// add adds bv to the set and returns its index in m.extractUniqe.
|
||||
// The caller must not modify bv after this.
|
||||
func (m *bvecSet) add(bv bvec) int {
|
||||
if len(m.uniq)*4 >= len(m.index) {
|
||||
m.grow()
|
||||
}
|
||||
|
||||
index := m.index
|
||||
h := hashbitmap(H0, bv) % uint32(len(index))
|
||||
for {
|
||||
j := index[h]
|
||||
if j < 0 {
|
||||
// New bvec.
|
||||
index[h] = len(m.uniq)
|
||||
m.uniq = append(m.uniq, bv)
|
||||
return len(m.uniq) - 1
|
||||
}
|
||||
jlive := m.uniq[j]
|
||||
if bv.Eq(jlive) {
|
||||
// Existing bvec.
|
||||
return j
|
||||
}
|
||||
|
||||
h++
|
||||
if h == uint32(len(index)) {
|
||||
h = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// extractUniqe returns this slice of unique bit vectors in m, as
|
||||
// indexed by the result of bvecSet.add.
|
||||
func (m *bvecSet) extractUniqe() []bvec {
|
||||
return m.uniq
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by "stringer -type=Class name.go"; DO NOT EDIT.
|
||||
// Code generated by "stringer -type=Class"; DO NOT EDIT.
|
||||
|
||||
package ir
|
||||
package gc
|
||||
|
||||
import "strconv"
|
||||
|
||||
594
src/cmd/compile/internal/gc/closure.go
Normal file
594
src/cmd/compile/internal/gc/closure.go
Normal file
@@ -0,0 +1,594 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gc
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/syntax"
|
||||
"cmd/compile/internal/types"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func (p *noder) funcLit(expr *syntax.FuncLit) *Node {
|
||||
xtype := p.typeExpr(expr.Type)
|
||||
ntype := p.typeExpr(expr.Type)
|
||||
|
||||
xfunc := p.nod(expr, ODCLFUNC, nil, nil)
|
||||
xfunc.Func.SetIsHiddenClosure(Curfn != nil)
|
||||
xfunc.Func.Nname = newfuncnamel(p.pos(expr), nblank.Sym) // filled in by typecheckclosure
|
||||
xfunc.Func.Nname.Name.Param.Ntype = xtype
|
||||
xfunc.Func.Nname.Name.Defn = xfunc
|
||||
|
||||
clo := p.nod(expr, OCLOSURE, nil, nil)
|
||||
clo.Func.Ntype = ntype
|
||||
|
||||
xfunc.Func.Closure = clo
|
||||
clo.Func.Closure = xfunc
|
||||
|
||||
p.funcBody(xfunc, expr.Body)
|
||||
|
||||
// closure-specific variables are hanging off the
|
||||
// ordinary ones in the symbol table; see oldname.
|
||||
// unhook them.
|
||||
// make the list of pointers for the closure call.
|
||||
for _, v := range xfunc.Func.Cvars.Slice() {
|
||||
// Unlink from v1; see comment in syntax.go type Param for these fields.
|
||||
v1 := v.Name.Defn
|
||||
v1.Name.Param.Innermost = v.Name.Param.Outer
|
||||
|
||||
// If the closure usage of v is not dense,
|
||||
// we need to make it dense; now that we're out
|
||||
// of the function in which v appeared,
|
||||
// look up v.Sym in the enclosing function
|
||||
// and keep it around for use in the compiled code.
|
||||
//
|
||||
// That is, suppose we just finished parsing the innermost
|
||||
// closure f4 in this code:
|
||||
//
|
||||
// func f() {
|
||||
// v := 1
|
||||
// func() { // f2
|
||||
// use(v)
|
||||
// func() { // f3
|
||||
// func() { // f4
|
||||
// use(v)
|
||||
// }()
|
||||
// }()
|
||||
// }()
|
||||
// }
|
||||
//
|
||||
// At this point v.Outer is f2's v; there is no f3's v.
|
||||
// To construct the closure f4 from within f3,
|
||||
// we need to use f3's v and in this case we need to create f3's v.
|
||||
// We are now in the context of f3, so calling oldname(v.Sym)
|
||||
// obtains f3's v, creating it if necessary (as it is in the example).
|
||||
//
|
||||
// capturevars will decide whether to use v directly or &v.
|
||||
v.Name.Param.Outer = oldname(v.Sym)
|
||||
}
|
||||
|
||||
return clo
|
||||
}
|
||||
|
||||
// typecheckclosure typechecks an OCLOSURE node. It also creates the named
|
||||
// function associated with the closure.
|
||||
// TODO: This creation of the named function should probably really be done in a
|
||||
// separate pass from type-checking.
|
||||
func typecheckclosure(clo *Node, top int) {
|
||||
xfunc := clo.Func.Closure
|
||||
// Set current associated iota value, so iota can be used inside
|
||||
// function in ConstSpec, see issue #22344
|
||||
if x := getIotaValue(); x >= 0 {
|
||||
xfunc.SetIota(x)
|
||||
}
|
||||
|
||||
clo.Func.Ntype = typecheck(clo.Func.Ntype, ctxType)
|
||||
clo.Type = clo.Func.Ntype.Type
|
||||
clo.Func.Top = top
|
||||
|
||||
// Do not typecheck xfunc twice, otherwise, we will end up pushing
|
||||
// xfunc to xtop multiple times, causing initLSym called twice.
|
||||
// See #30709
|
||||
if xfunc.Typecheck() == 1 {
|
||||
return
|
||||
}
|
||||
|
||||
for _, ln := range xfunc.Func.Cvars.Slice() {
|
||||
n := ln.Name.Defn
|
||||
if !n.Name.Captured() {
|
||||
n.Name.SetCaptured(true)
|
||||
if n.Name.Decldepth == 0 {
|
||||
Fatalf("typecheckclosure: var %S does not have decldepth assigned", n)
|
||||
}
|
||||
|
||||
// Ignore assignments to the variable in straightline code
|
||||
// preceding the first capturing by a closure.
|
||||
if n.Name.Decldepth == decldepth {
|
||||
n.Name.SetAssigned(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
xfunc.Func.Nname.Sym = closurename(Curfn)
|
||||
setNodeNameFunc(xfunc.Func.Nname)
|
||||
xfunc = typecheck(xfunc, ctxStmt)
|
||||
|
||||
// Type check the body now, but only if we're inside a function.
|
||||
// At top level (in a variable initialization: curfn==nil) we're not
|
||||
// ready to type check code yet; we'll check it later, because the
|
||||
// underlying closure function we create is added to xtop.
|
||||
if Curfn != nil && clo.Type != nil {
|
||||
oldfn := Curfn
|
||||
Curfn = xfunc
|
||||
olddd := decldepth
|
||||
decldepth = 1
|
||||
typecheckslice(xfunc.Nbody.Slice(), ctxStmt)
|
||||
decldepth = olddd
|
||||
Curfn = oldfn
|
||||
}
|
||||
|
||||
xtop = append(xtop, xfunc)
|
||||
}
|
||||
|
||||
// globClosgen is like Func.Closgen, but for the global scope.
|
||||
var globClosgen int
|
||||
|
||||
// closurename generates a new unique name for a closure within
|
||||
// outerfunc.
|
||||
func closurename(outerfunc *Node) *types.Sym {
|
||||
outer := "glob."
|
||||
prefix := "func"
|
||||
gen := &globClosgen
|
||||
|
||||
if outerfunc != nil {
|
||||
if outerfunc.Func.Closure != nil {
|
||||
prefix = ""
|
||||
}
|
||||
|
||||
outer = outerfunc.funcname()
|
||||
|
||||
// There may be multiple functions named "_". In those
|
||||
// cases, we can't use their individual Closgens as it
|
||||
// would lead to name clashes.
|
||||
if !outerfunc.Func.Nname.isBlank() {
|
||||
gen = &outerfunc.Func.Closgen
|
||||
}
|
||||
}
|
||||
|
||||
*gen++
|
||||
return lookup(fmt.Sprintf("%s.%s%d", outer, prefix, *gen))
|
||||
}
|
||||
|
||||
// capturevarscomplete is set to true when the capturevars phase is done.
|
||||
var capturevarscomplete bool
|
||||
|
||||
// capturevars is called in a separate phase after all typechecking is done.
|
||||
// It decides whether each variable captured by a closure should be captured
|
||||
// by value or by reference.
|
||||
// We use value capturing for values <= 128 bytes that are never reassigned
|
||||
// after capturing (effectively constant).
|
||||
func capturevars(xfunc *Node) {
|
||||
lno := lineno
|
||||
lineno = xfunc.Pos
|
||||
|
||||
clo := xfunc.Func.Closure
|
||||
cvars := xfunc.Func.Cvars.Slice()
|
||||
out := cvars[:0]
|
||||
for _, v := range cvars {
|
||||
if v.Type == nil {
|
||||
// If v.Type is nil, it means v looked like it
|
||||
// was going to be used in the closure, but
|
||||
// isn't. This happens in struct literals like
|
||||
// s{f: x} where we can't distinguish whether
|
||||
// f is a field identifier or expression until
|
||||
// resolving s.
|
||||
continue
|
||||
}
|
||||
out = append(out, v)
|
||||
|
||||
// type check the & of closed variables outside the closure,
|
||||
// so that the outer frame also grabs them and knows they escape.
|
||||
dowidth(v.Type)
|
||||
|
||||
outer := v.Name.Param.Outer
|
||||
outermost := v.Name.Defn
|
||||
|
||||
// out parameters will be assigned to implicitly upon return.
|
||||
if outermost.Class() != PPARAMOUT && !outermost.Name.Addrtaken() && !outermost.Name.Assigned() && v.Type.Width <= 128 {
|
||||
v.Name.SetByval(true)
|
||||
} else {
|
||||
outermost.Name.SetAddrtaken(true)
|
||||
outer = nod(OADDR, outer, nil)
|
||||
}
|
||||
|
||||
if Debug.m > 1 {
|
||||
var name *types.Sym
|
||||
if v.Name.Curfn != nil && v.Name.Curfn.Func.Nname != nil {
|
||||
name = v.Name.Curfn.Func.Nname.Sym
|
||||
}
|
||||
how := "ref"
|
||||
if v.Name.Byval() {
|
||||
how = "value"
|
||||
}
|
||||
Warnl(v.Pos, "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym, outermost.Name.Addrtaken(), outermost.Name.Assigned(), int32(v.Type.Width))
|
||||
}
|
||||
|
||||
outer = typecheck(outer, ctxExpr)
|
||||
clo.Func.Enter.Append(outer)
|
||||
}
|
||||
|
||||
xfunc.Func.Cvars.Set(out)
|
||||
lineno = lno
|
||||
}
|
||||
|
||||
// transformclosure is called in a separate phase after escape analysis.
|
||||
// It transform closure bodies to properly reference captured variables.
|
||||
func transformclosure(xfunc *Node) {
|
||||
lno := lineno
|
||||
lineno = xfunc.Pos
|
||||
clo := xfunc.Func.Closure
|
||||
|
||||
if clo.Func.Top&ctxCallee != 0 {
|
||||
// If the closure is directly called, we transform it to a plain function call
|
||||
// with variables passed as args. This avoids allocation of a closure object.
|
||||
// Here we do only a part of the transformation. Walk of OCALLFUNC(OCLOSURE)
|
||||
// will complete the transformation later.
|
||||
// For illustration, the following closure:
|
||||
// func(a int) {
|
||||
// println(byval)
|
||||
// byref++
|
||||
// }(42)
|
||||
// becomes:
|
||||
// func(byval int, &byref *int, a int) {
|
||||
// println(byval)
|
||||
// (*&byref)++
|
||||
// }(byval, &byref, 42)
|
||||
|
||||
// f is ONAME of the actual function.
|
||||
f := xfunc.Func.Nname
|
||||
|
||||
// We are going to insert captured variables before input args.
|
||||
var params []*types.Field
|
||||
var decls []*Node
|
||||
for _, v := range xfunc.Func.Cvars.Slice() {
|
||||
if !v.Name.Byval() {
|
||||
// If v of type T is captured by reference,
|
||||
// we introduce function param &v *T
|
||||
// and v remains PAUTOHEAP with &v heapaddr
|
||||
// (accesses will implicitly deref &v).
|
||||
addr := newname(lookup("&" + v.Sym.Name))
|
||||
addr.Type = types.NewPtr(v.Type)
|
||||
v.Name.Param.Heapaddr = addr
|
||||
v = addr
|
||||
}
|
||||
|
||||
v.SetClass(PPARAM)
|
||||
decls = append(decls, v)
|
||||
|
||||
fld := types.NewField()
|
||||
fld.Nname = asTypesNode(v)
|
||||
fld.Type = v.Type
|
||||
fld.Sym = v.Sym
|
||||
params = append(params, fld)
|
||||
}
|
||||
|
||||
if len(params) > 0 {
|
||||
// Prepend params and decls.
|
||||
f.Type.Params().SetFields(append(params, f.Type.Params().FieldSlice()...))
|
||||
xfunc.Func.Dcl = append(decls, xfunc.Func.Dcl...)
|
||||
}
|
||||
|
||||
dowidth(f.Type)
|
||||
xfunc.Type = f.Type // update type of ODCLFUNC
|
||||
} else {
|
||||
// The closure is not called, so it is going to stay as closure.
|
||||
var body []*Node
|
||||
offset := int64(Widthptr)
|
||||
for _, v := range xfunc.Func.Cvars.Slice() {
|
||||
// cv refers to the field inside of closure OSTRUCTLIT.
|
||||
cv := nod(OCLOSUREVAR, nil, nil)
|
||||
|
||||
cv.Type = v.Type
|
||||
if !v.Name.Byval() {
|
||||
cv.Type = types.NewPtr(v.Type)
|
||||
}
|
||||
offset = Rnd(offset, int64(cv.Type.Align))
|
||||
cv.Xoffset = offset
|
||||
offset += cv.Type.Width
|
||||
|
||||
if v.Name.Byval() && v.Type.Width <= int64(2*Widthptr) {
|
||||
// If it is a small variable captured by value, downgrade it to PAUTO.
|
||||
v.SetClass(PAUTO)
|
||||
xfunc.Func.Dcl = append(xfunc.Func.Dcl, v)
|
||||
body = append(body, nod(OAS, v, cv))
|
||||
} else {
|
||||
// Declare variable holding addresses taken from closure
|
||||
// and initialize in entry prologue.
|
||||
addr := newname(lookup("&" + v.Sym.Name))
|
||||
addr.Type = types.NewPtr(v.Type)
|
||||
addr.SetClass(PAUTO)
|
||||
addr.Name.SetUsed(true)
|
||||
addr.Name.Curfn = xfunc
|
||||
xfunc.Func.Dcl = append(xfunc.Func.Dcl, addr)
|
||||
v.Name.Param.Heapaddr = addr
|
||||
if v.Name.Byval() {
|
||||
cv = nod(OADDR, cv, nil)
|
||||
}
|
||||
body = append(body, nod(OAS, addr, cv))
|
||||
}
|
||||
}
|
||||
|
||||
if len(body) > 0 {
|
||||
typecheckslice(body, ctxStmt)
|
||||
xfunc.Func.Enter.Set(body)
|
||||
xfunc.Func.SetNeedctxt(true)
|
||||
}
|
||||
}
|
||||
|
||||
lineno = lno
|
||||
}
|
||||
|
||||
// hasemptycvars reports whether closure clo has an
|
||||
// empty list of captured vars.
|
||||
func hasemptycvars(clo *Node) bool {
|
||||
xfunc := clo.Func.Closure
|
||||
return xfunc.Func.Cvars.Len() == 0
|
||||
}
|
||||
|
||||
// closuredebugruntimecheck applies boilerplate checks for debug flags
|
||||
// and compiling runtime
|
||||
func closuredebugruntimecheck(clo *Node) {
|
||||
if Debug_closure > 0 {
|
||||
xfunc := clo.Func.Closure
|
||||
if clo.Esc == EscHeap {
|
||||
Warnl(clo.Pos, "heap closure, captured vars = %v", xfunc.Func.Cvars)
|
||||
} else {
|
||||
Warnl(clo.Pos, "stack closure, captured vars = %v", xfunc.Func.Cvars)
|
||||
}
|
||||
}
|
||||
if compiling_runtime && clo.Esc == EscHeap {
|
||||
yyerrorl(clo.Pos, "heap-allocated closure, not allowed in runtime")
|
||||
}
|
||||
}
|
||||
|
||||
// closureType returns the struct type used to hold all the information
|
||||
// needed in the closure for clo (clo must be a OCLOSURE node).
|
||||
// The address of a variable of the returned type can be cast to a func.
|
||||
func closureType(clo *Node) *types.Type {
|
||||
// Create closure in the form of a composite literal.
|
||||
// supposing the closure captures an int i and a string s
|
||||
// and has one float64 argument and no results,
|
||||
// the generated code looks like:
|
||||
//
|
||||
// clos = &struct{.F uintptr; i *int; s *string}{func.1, &i, &s}
|
||||
//
|
||||
// The use of the struct provides type information to the garbage
|
||||
// collector so that it can walk the closure. We could use (in this case)
|
||||
// [3]unsafe.Pointer instead, but that would leave the gc in the dark.
|
||||
// The information appears in the binary in the form of type descriptors;
|
||||
// the struct is unnamed so that closures in multiple packages with the
|
||||
// same struct type can share the descriptor.
|
||||
fields := []*Node{
|
||||
namedfield(".F", types.Types[TUINTPTR]),
|
||||
}
|
||||
for _, v := range clo.Func.Closure.Func.Cvars.Slice() {
|
||||
typ := v.Type
|
||||
if !v.Name.Byval() {
|
||||
typ = types.NewPtr(typ)
|
||||
}
|
||||
fields = append(fields, symfield(v.Sym, typ))
|
||||
}
|
||||
typ := tostruct(fields)
|
||||
typ.SetNoalg(true)
|
||||
return typ
|
||||
}
|
||||
|
||||
func walkclosure(clo *Node, init *Nodes) *Node {
|
||||
xfunc := clo.Func.Closure
|
||||
|
||||
// If no closure vars, don't bother wrapping.
|
||||
if hasemptycvars(clo) {
|
||||
if Debug_closure > 0 {
|
||||
Warnl(clo.Pos, "closure converted to global")
|
||||
}
|
||||
return xfunc.Func.Nname
|
||||
}
|
||||
closuredebugruntimecheck(clo)
|
||||
|
||||
typ := closureType(clo)
|
||||
|
||||
clos := nod(OCOMPLIT, nil, typenod(typ))
|
||||
clos.Esc = clo.Esc
|
||||
clos.List.Set(append([]*Node{nod(OCFUNC, xfunc.Func.Nname, nil)}, clo.Func.Enter.Slice()...))
|
||||
|
||||
clos = nod(OADDR, clos, nil)
|
||||
clos.Esc = clo.Esc
|
||||
|
||||
// Force type conversion from *struct to the func type.
|
||||
clos = convnop(clos, clo.Type)
|
||||
|
||||
// non-escaping temp to use, if any.
|
||||
if x := prealloc[clo]; x != nil {
|
||||
if !types.Identical(typ, x.Type) {
|
||||
panic("closure type does not match order's assigned type")
|
||||
}
|
||||
clos.Left.Right = x
|
||||
delete(prealloc, clo)
|
||||
}
|
||||
|
||||
return walkexpr(clos, init)
|
||||
}
|
||||
|
||||
func typecheckpartialcall(fn *Node, sym *types.Sym) {
|
||||
switch fn.Op {
|
||||
case ODOTINTER, ODOTMETH:
|
||||
break
|
||||
|
||||
default:
|
||||
Fatalf("invalid typecheckpartialcall")
|
||||
}
|
||||
|
||||
// Create top-level function.
|
||||
xfunc := makepartialcall(fn, fn.Type, sym)
|
||||
fn.Func = xfunc.Func
|
||||
fn.Func.SetWrapper(true)
|
||||
fn.Right = newname(sym)
|
||||
fn.Op = OCALLPART
|
||||
fn.Type = xfunc.Type
|
||||
}
|
||||
|
||||
// makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed
|
||||
// for partial calls.
|
||||
func makepartialcall(fn *Node, t0 *types.Type, meth *types.Sym) *Node {
|
||||
rcvrtype := fn.Left.Type
|
||||
sym := methodSymSuffix(rcvrtype, meth, "-fm")
|
||||
|
||||
if sym.Uniq() {
|
||||
return asNode(sym.Def)
|
||||
}
|
||||
sym.SetUniq(true)
|
||||
|
||||
savecurfn := Curfn
|
||||
saveLineNo := lineno
|
||||
Curfn = nil
|
||||
|
||||
// Set line number equal to the line number where the method is declared.
|
||||
var m *types.Field
|
||||
if lookdot0(meth, rcvrtype, &m, false) == 1 && m.Pos.IsKnown() {
|
||||
lineno = m.Pos
|
||||
}
|
||||
// Note: !m.Pos.IsKnown() happens for method expressions where
|
||||
// the method is implicitly declared. The Error method of the
|
||||
// built-in error type is one such method. We leave the line
|
||||
// number at the use of the method expression in this
|
||||
// case. See issue 29389.
|
||||
|
||||
tfn := nod(OTFUNC, nil, nil)
|
||||
tfn.List.Set(structargs(t0.Params(), true))
|
||||
tfn.Rlist.Set(structargs(t0.Results(), false))
|
||||
|
||||
xfunc := dclfunc(sym, tfn)
|
||||
xfunc.Func.SetDupok(true)
|
||||
xfunc.Func.SetNeedctxt(true)
|
||||
|
||||
tfn.Type.SetPkg(t0.Pkg())
|
||||
|
||||
// Declare and initialize variable holding receiver.
|
||||
|
||||
cv := nod(OCLOSUREVAR, nil, nil)
|
||||
cv.Type = rcvrtype
|
||||
cv.Xoffset = Rnd(int64(Widthptr), int64(cv.Type.Align))
|
||||
|
||||
ptr := newname(lookup(".this"))
|
||||
declare(ptr, PAUTO)
|
||||
ptr.Name.SetUsed(true)
|
||||
var body []*Node
|
||||
if rcvrtype.IsPtr() || rcvrtype.IsInterface() {
|
||||
ptr.Type = rcvrtype
|
||||
body = append(body, nod(OAS, ptr, cv))
|
||||
} else {
|
||||
ptr.Type = types.NewPtr(rcvrtype)
|
||||
body = append(body, nod(OAS, ptr, nod(OADDR, cv, nil)))
|
||||
}
|
||||
|
||||
call := nod(OCALL, nodSym(OXDOT, ptr, meth), nil)
|
||||
call.List.Set(paramNnames(tfn.Type))
|
||||
call.SetIsDDD(tfn.Type.IsVariadic())
|
||||
if t0.NumResults() != 0 {
|
||||
n := nod(ORETURN, nil, nil)
|
||||
n.List.Set1(call)
|
||||
call = n
|
||||
}
|
||||
body = append(body, call)
|
||||
|
||||
xfunc.Nbody.Set(body)
|
||||
funcbody()
|
||||
|
||||
xfunc = typecheck(xfunc, ctxStmt)
|
||||
// Need to typecheck the body of the just-generated wrapper.
|
||||
// typecheckslice() requires that Curfn is set when processing an ORETURN.
|
||||
Curfn = xfunc
|
||||
typecheckslice(xfunc.Nbody.Slice(), ctxStmt)
|
||||
sym.Def = asTypesNode(xfunc)
|
||||
xtop = append(xtop, xfunc)
|
||||
Curfn = savecurfn
|
||||
lineno = saveLineNo
|
||||
|
||||
return xfunc
|
||||
}
|
||||
|
||||
// partialCallType returns the struct type used to hold all the information
|
||||
// needed in the closure for n (n must be a OCALLPART node).
|
||||
// The address of a variable of the returned type can be cast to a func.
|
||||
func partialCallType(n *Node) *types.Type {
|
||||
t := tostruct([]*Node{
|
||||
namedfield("F", types.Types[TUINTPTR]),
|
||||
namedfield("R", n.Left.Type),
|
||||
})
|
||||
t.SetNoalg(true)
|
||||
return t
|
||||
}
|
||||
|
||||
func walkpartialcall(n *Node, init *Nodes) *Node {
|
||||
// Create closure in the form of a composite literal.
|
||||
// For x.M with receiver (x) type T, the generated code looks like:
|
||||
//
|
||||
// clos = &struct{F uintptr; R T}{T.M·f, x}
|
||||
//
|
||||
// Like walkclosure above.
|
||||
|
||||
if n.Left.Type.IsInterface() {
|
||||
// Trigger panic for method on nil interface now.
|
||||
// Otherwise it happens in the wrapper and is confusing.
|
||||
n.Left = cheapexpr(n.Left, init)
|
||||
n.Left = walkexpr(n.Left, nil)
|
||||
|
||||
tab := nod(OITAB, n.Left, nil)
|
||||
tab = typecheck(tab, ctxExpr)
|
||||
|
||||
c := nod(OCHECKNIL, tab, nil)
|
||||
c.SetTypecheck(1)
|
||||
init.Append(c)
|
||||
}
|
||||
|
||||
typ := partialCallType(n)
|
||||
|
||||
clos := nod(OCOMPLIT, nil, typenod(typ))
|
||||
clos.Esc = n.Esc
|
||||
clos.List.Set2(nod(OCFUNC, n.Func.Nname, nil), n.Left)
|
||||
|
||||
clos = nod(OADDR, clos, nil)
|
||||
clos.Esc = n.Esc
|
||||
|
||||
// Force type conversion from *struct to the func type.
|
||||
clos = convnop(clos, n.Type)
|
||||
|
||||
// non-escaping temp to use, if any.
|
||||
if x := prealloc[n]; x != nil {
|
||||
if !types.Identical(typ, x.Type) {
|
||||
panic("partial call type does not match order's assigned type")
|
||||
}
|
||||
clos.Left.Right = x
|
||||
delete(prealloc, n)
|
||||
}
|
||||
|
||||
return walkexpr(clos, init)
|
||||
}
|
||||
|
||||
// callpartMethod returns the *types.Field representing the method
|
||||
// referenced by method value n.
|
||||
func callpartMethod(n *Node) *types.Field {
|
||||
if n.Op != OCALLPART {
|
||||
Fatalf("expected OCALLPART, got %v", n)
|
||||
}
|
||||
|
||||
// TODO(mdempsky): Optimize this. If necessary,
|
||||
// makepartialcall could save m for us somewhere.
|
||||
var m *types.Field
|
||||
if lookdot0(n.Right.Sym, n.Left.Type, &m, false) != 1 {
|
||||
Fatalf("failed to find field for OCALLPART")
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
@@ -1,147 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gc
|
||||
|
||||
import (
|
||||
"internal/race"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"cmd/compile/internal/base"
|
||||
"cmd/compile/internal/ir"
|
||||
"cmd/compile/internal/liveness"
|
||||
"cmd/compile/internal/ssagen"
|
||||
"cmd/compile/internal/typecheck"
|
||||
"cmd/compile/internal/types"
|
||||
"cmd/compile/internal/walk"
|
||||
)
|
||||
|
||||
// "Portable" code generation.
|
||||
|
||||
var (
|
||||
compilequeue []*ir.Func // functions waiting to be compiled
|
||||
)
|
||||
|
||||
func enqueueFunc(fn *ir.Func) {
|
||||
if ir.CurFunc != nil {
|
||||
base.FatalfAt(fn.Pos(), "enqueueFunc %v inside %v", fn, ir.CurFunc)
|
||||
}
|
||||
|
||||
if ir.FuncName(fn) == "_" {
|
||||
// Skip compiling blank functions.
|
||||
// Frontend already reported any spec-mandated errors (#29870).
|
||||
return
|
||||
}
|
||||
|
||||
if clo := fn.OClosure; clo != nil && !ir.IsTrivialClosure(clo) {
|
||||
return // we'll get this as part of its enclosing function
|
||||
}
|
||||
|
||||
if len(fn.Body) == 0 {
|
||||
// Initialize ABI wrappers if necessary.
|
||||
ssagen.InitLSym(fn, false)
|
||||
liveness.WriteFuncMap(fn)
|
||||
return
|
||||
}
|
||||
|
||||
errorsBefore := base.Errors()
|
||||
|
||||
todo := []*ir.Func{fn}
|
||||
for len(todo) > 0 {
|
||||
next := todo[len(todo)-1]
|
||||
todo = todo[:len(todo)-1]
|
||||
|
||||
prepareFunc(next)
|
||||
todo = append(todo, next.Closures...)
|
||||
}
|
||||
|
||||
if base.Errors() > errorsBefore {
|
||||
return
|
||||
}
|
||||
|
||||
// Enqueue just fn itself. compileFunctions will handle
|
||||
// scheduling compilation of its closures after it's done.
|
||||
compilequeue = append(compilequeue, fn)
|
||||
}
|
||||
|
||||
// prepareFunc handles any remaining frontend compilation tasks that
|
||||
// aren't yet safe to perform concurrently.
|
||||
func prepareFunc(fn *ir.Func) {
|
||||
// Set up the function's LSym early to avoid data races with the assemblers.
|
||||
// Do this before walk, as walk needs the LSym to set attributes/relocations
|
||||
// (e.g. in MarkTypeUsedInInterface).
|
||||
ssagen.InitLSym(fn, true)
|
||||
|
||||
// Calculate parameter offsets.
|
||||
types.CalcSize(fn.Type())
|
||||
|
||||
typecheck.DeclContext = ir.PAUTO
|
||||
ir.CurFunc = fn
|
||||
walk.Walk(fn)
|
||||
ir.CurFunc = nil // enforce no further uses of CurFunc
|
||||
typecheck.DeclContext = ir.PEXTERN
|
||||
}
|
||||
|
||||
// compileFunctions compiles all functions in compilequeue.
|
||||
// It fans out nBackendWorkers to do the work
|
||||
// and waits for them to complete.
|
||||
func compileFunctions() {
|
||||
if len(compilequeue) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if race.Enabled {
|
||||
// Randomize compilation order to try to shake out races.
|
||||
tmp := make([]*ir.Func, len(compilequeue))
|
||||
perm := rand.Perm(len(compilequeue))
|
||||
for i, v := range perm {
|
||||
tmp[v] = compilequeue[i]
|
||||
}
|
||||
copy(compilequeue, tmp)
|
||||
} else {
|
||||
// Compile the longest functions first,
|
||||
// since they're most likely to be the slowest.
|
||||
// This helps avoid stragglers.
|
||||
sort.Slice(compilequeue, func(i, j int) bool {
|
||||
return len(compilequeue[i].Body) > len(compilequeue[j].Body)
|
||||
})
|
||||
}
|
||||
|
||||
// We queue up a goroutine per function that needs to be
|
||||
// compiled, but require them to grab an available worker ID
|
||||
// before doing any substantial work to limit parallelism.
|
||||
workerIDs := make(chan int, base.Flag.LowerC)
|
||||
for i := 0; i < base.Flag.LowerC; i++ {
|
||||
workerIDs <- i
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var asyncCompile func(*ir.Func)
|
||||
asyncCompile = func(fn *ir.Func) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
worker := <-workerIDs
|
||||
ssagen.Compile(fn, worker)
|
||||
workerIDs <- worker
|
||||
|
||||
// Done compiling fn. Schedule it's closures for compilation.
|
||||
for _, closure := range fn.Closures {
|
||||
asyncCompile(closure)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
types.CalcSizeDisabled = true // not safe to calculate sizes concurrently
|
||||
base.Ctxt.InParallel = true
|
||||
for _, fn := range compilequeue {
|
||||
asyncCompile(fn)
|
||||
}
|
||||
compilequeue = nil
|
||||
wg.Wait()
|
||||
base.Ctxt.InParallel = false
|
||||
types.CalcSizeDisabled = false
|
||||
}
|
||||
1323
src/cmd/compile/internal/gc/const.go
Normal file
1323
src/cmd/compile/internal/gc/const.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,7 +1,7 @@
|
||||
// run
|
||||
// Code generated by gen/constFoldGen.go. DO NOT EDIT.
|
||||
|
||||
package test
|
||||
package gc
|
||||
|
||||
import "testing"
|
||||
|
||||
1185
src/cmd/compile/internal/gc/dcl.go
Normal file
1185
src/cmd/compile/internal/gc/dcl.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package test
|
||||
package gc
|
||||
|
||||
import (
|
||||
"internal/testenv"
|
||||
@@ -18,7 +18,7 @@ func TestDeps(t *testing.T) {
|
||||
}
|
||||
for _, dep := range strings.Fields(strings.Trim(string(out), "[]")) {
|
||||
switch dep {
|
||||
case "go/build", "go/scanner":
|
||||
case "go/build", "go/token":
|
||||
t.Errorf("undesired dependency on %q", dep)
|
||||
}
|
||||
}
|
||||
@@ -6,23 +6,21 @@
|
||||
// for debugging purposes. The code is customized for Node graphs
|
||||
// and may be used for an alternative view of the node structure.
|
||||
|
||||
package ir
|
||||
package gc
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/types"
|
||||
"cmd/internal/src"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
|
||||
"cmd/compile/internal/base"
|
||||
"cmd/compile/internal/types"
|
||||
"cmd/internal/src"
|
||||
)
|
||||
|
||||
// dump is like fdump but prints to stderr.
|
||||
func DumpAny(root interface{}, filter string, depth int) {
|
||||
FDumpAny(os.Stderr, root, filter, depth)
|
||||
func dump(root interface{}, filter string, depth int) {
|
||||
fdump(os.Stderr, root, filter, depth)
|
||||
}
|
||||
|
||||
// fdump prints the structure of a rooted data structure
|
||||
@@ -42,7 +40,7 @@ func DumpAny(root interface{}, filter string, depth int) {
|
||||
// rather than their type; struct fields with zero values or
|
||||
// non-matching field names are omitted, and "…" means recursion
|
||||
// depth has been reached or struct fields have been omitted.
|
||||
func FDumpAny(w io.Writer, root interface{}, filter string, depth int) {
|
||||
func fdump(w io.Writer, root interface{}, filter string, depth int) {
|
||||
if root == nil {
|
||||
fmt.Fprintln(w, "nil")
|
||||
return
|
||||
@@ -140,9 +138,19 @@ func (p *dumper) dump(x reflect.Value, depth int) {
|
||||
return
|
||||
}
|
||||
|
||||
if pos, ok := x.Interface().(src.XPos); ok {
|
||||
p.printf("%s", base.FmtPos(pos))
|
||||
// special cases
|
||||
switch v := x.Interface().(type) {
|
||||
case Nodes:
|
||||
// unpack Nodes since reflect cannot look inside
|
||||
// due to the unexported field in its struct
|
||||
x = reflect.ValueOf(v.Slice())
|
||||
|
||||
case src.XPos:
|
||||
p.printf("%s", linestr(v))
|
||||
return
|
||||
|
||||
case *types.Node:
|
||||
x = reflect.ValueOf(asNode(v))
|
||||
}
|
||||
|
||||
switch x.Kind() {
|
||||
@@ -195,7 +203,7 @@ func (p *dumper) dump(x reflect.Value, depth int) {
|
||||
isNode := false
|
||||
if n, ok := x.Interface().(Node); ok {
|
||||
isNode = true
|
||||
p.printf("%s %s {", n.Op().String(), p.addr(x))
|
||||
p.printf("%s %s {", n.Op.String(), p.addr(x))
|
||||
} else {
|
||||
p.printf("%s {", typ)
|
||||
}
|
||||
@@ -222,7 +230,7 @@ func (p *dumper) dump(x reflect.Value, depth int) {
|
||||
omitted = true
|
||||
continue // exclude zero-valued fields
|
||||
}
|
||||
if n, ok := x.Interface().(Nodes); ok && len(n) == 0 {
|
||||
if n, ok := x.Interface().(Nodes); ok && n.Len() == 0 {
|
||||
omitted = true
|
||||
continue // exclude empty Nodes slices
|
||||
}
|
||||
@@ -2,17 +2,14 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package dwarfgen
|
||||
package gc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"cmd/compile/internal/base"
|
||||
"cmd/compile/internal/ir"
|
||||
"cmd/internal/dwarf"
|
||||
"cmd/internal/obj"
|
||||
"cmd/internal/src"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// To identify variables by original source position.
|
||||
@@ -29,8 +26,8 @@ type varPos struct {
|
||||
func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls {
|
||||
var inlcalls dwarf.InlCalls
|
||||
|
||||
if base.Debug.DwarfInl != 0 {
|
||||
base.Ctxt.Logf("assembling DWARF inlined routine info for %v\n", fnsym.Name)
|
||||
if Debug_gendwarfinl != 0 {
|
||||
Ctxt.Logf("assembling DWARF inlined routine info for %v\n", fnsym.Name)
|
||||
}
|
||||
|
||||
// This maps inline index (from Ctxt.InlTree) to index in inlcalls.Calls
|
||||
@@ -109,7 +106,7 @@ func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls {
|
||||
}
|
||||
m = makePreinlineDclMap(fnsym)
|
||||
} else {
|
||||
ifnlsym := base.Ctxt.InlTree.InlinedFunction(int(ii - 1))
|
||||
ifnlsym := Ctxt.InlTree.InlinedFunction(int(ii - 1))
|
||||
m = makePreinlineDclMap(ifnlsym)
|
||||
}
|
||||
|
||||
@@ -184,7 +181,7 @@ func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls {
|
||||
}
|
||||
|
||||
// Debugging
|
||||
if base.Debug.DwarfInl != 0 {
|
||||
if Debug_gendwarfinl != 0 {
|
||||
dumpInlCalls(inlcalls)
|
||||
dumpInlVars(dwVars)
|
||||
}
|
||||
@@ -207,17 +204,16 @@ func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls {
|
||||
// late in the compilation when it is determined that we need an
|
||||
// abstract function DIE for an inlined routine imported from a
|
||||
// previously compiled package.
|
||||
func AbstractFunc(fn *obj.LSym) {
|
||||
ifn := base.Ctxt.DwFixups.GetPrecursorFunc(fn)
|
||||
func genAbstractFunc(fn *obj.LSym) {
|
||||
ifn := Ctxt.DwFixups.GetPrecursorFunc(fn)
|
||||
if ifn == nil {
|
||||
base.Ctxt.Diag("failed to locate precursor fn for %v", fn)
|
||||
Ctxt.Diag("failed to locate precursor fn for %v", fn)
|
||||
return
|
||||
}
|
||||
_ = ifn.(*ir.Func)
|
||||
if base.Debug.DwarfInl != 0 {
|
||||
base.Ctxt.Logf("DwarfAbstractFunc(%v)\n", fn.Name)
|
||||
if Debug_gendwarfinl != 0 {
|
||||
Ctxt.Logf("DwarfAbstractFunc(%v)\n", fn.Name)
|
||||
}
|
||||
base.Ctxt.DwarfAbstractFunc(ifn, fn, base.Ctxt.Pkgpath)
|
||||
Ctxt.DwarfAbstractFunc(ifn, fn, myimportpath)
|
||||
}
|
||||
|
||||
// Undo any versioning performed when a name was written
|
||||
@@ -239,15 +235,16 @@ func makePreinlineDclMap(fnsym *obj.LSym) map[varPos]int {
|
||||
dcl := preInliningDcls(fnsym)
|
||||
m := make(map[varPos]int)
|
||||
for i, n := range dcl {
|
||||
pos := base.Ctxt.InnermostPos(n.Pos())
|
||||
pos := Ctxt.InnermostPos(n.Pos)
|
||||
vp := varPos{
|
||||
DeclName: unversion(n.Sym().Name),
|
||||
DeclName: unversion(n.Sym.Name),
|
||||
DeclFile: pos.RelFilename(),
|
||||
DeclLine: pos.RelLine(),
|
||||
DeclCol: pos.Col(),
|
||||
}
|
||||
if _, found := m[vp]; found {
|
||||
base.Fatalf("child dcl collision on symbol %s within %v\n", n.Sym().Name, fnsym.Name)
|
||||
// We can see collisions (variables with the same name/file/line/col) in obfuscated or machine-generated code -- see issue 44378 for an example. Skip duplicates in such cases, since it is unlikely that a human will be debugging such code.
|
||||
continue
|
||||
}
|
||||
m[vp] = i
|
||||
}
|
||||
@@ -264,17 +261,17 @@ func insertInlCall(dwcalls *dwarf.InlCalls, inlIdx int, imap map[int]int) int {
|
||||
// is one. We do this first so that parents appear before their
|
||||
// children in the resulting table.
|
||||
parCallIdx := -1
|
||||
parInlIdx := base.Ctxt.InlTree.Parent(inlIdx)
|
||||
parInlIdx := Ctxt.InlTree.Parent(inlIdx)
|
||||
if parInlIdx >= 0 {
|
||||
parCallIdx = insertInlCall(dwcalls, parInlIdx, imap)
|
||||
}
|
||||
|
||||
// Create new entry for this inline
|
||||
inlinedFn := base.Ctxt.InlTree.InlinedFunction(inlIdx)
|
||||
callXPos := base.Ctxt.InlTree.CallPos(inlIdx)
|
||||
absFnSym := base.Ctxt.DwFixups.AbsFuncDwarfSym(inlinedFn)
|
||||
pb := base.Ctxt.PosTable.Pos(callXPos).Base()
|
||||
callFileSym := base.Ctxt.Lookup(pb.SymFilename())
|
||||
inlinedFn := Ctxt.InlTree.InlinedFunction(inlIdx)
|
||||
callXPos := Ctxt.InlTree.CallPos(inlIdx)
|
||||
absFnSym := Ctxt.DwFixups.AbsFuncDwarfSym(inlinedFn)
|
||||
pb := Ctxt.PosTable.Pos(callXPos).Base()
|
||||
callFileSym := Ctxt.Lookup(pb.SymFilename())
|
||||
ic := dwarf.InlCall{
|
||||
InlIndex: inlIdx,
|
||||
CallFile: callFileSym,
|
||||
@@ -302,7 +299,7 @@ func insertInlCall(dwcalls *dwarf.InlCalls, inlIdx int, imap map[int]int) int {
|
||||
// the index for a node from the inlined body of D will refer to the
|
||||
// call to D from C. Whew.
|
||||
func posInlIndex(xpos src.XPos) int {
|
||||
pos := base.Ctxt.PosTable.Pos(xpos)
|
||||
pos := Ctxt.PosTable.Pos(xpos)
|
||||
if b := pos.Base(); b != nil {
|
||||
ii := b.InliningIndex()
|
||||
if ii >= 0 {
|
||||
@@ -328,7 +325,7 @@ func addRange(calls []dwarf.InlCall, start, end int64, ii int, imap map[int]int)
|
||||
// Append range to correct inlined call
|
||||
callIdx, found := imap[ii]
|
||||
if !found {
|
||||
base.Fatalf("can't find inlIndex %d in imap for prog at %d\n", ii, start)
|
||||
Fatalf("can't find inlIndex %d in imap for prog at %d\n", ii, start)
|
||||
}
|
||||
call := &calls[callIdx]
|
||||
call.Ranges = append(call.Ranges, dwarf.Range{Start: start, End: end})
|
||||
@@ -336,23 +333,23 @@ func addRange(calls []dwarf.InlCall, start, end int64, ii int, imap map[int]int)
|
||||
|
||||
func dumpInlCall(inlcalls dwarf.InlCalls, idx, ilevel int) {
|
||||
for i := 0; i < ilevel; i++ {
|
||||
base.Ctxt.Logf(" ")
|
||||
Ctxt.Logf(" ")
|
||||
}
|
||||
ic := inlcalls.Calls[idx]
|
||||
callee := base.Ctxt.InlTree.InlinedFunction(ic.InlIndex)
|
||||
base.Ctxt.Logf(" %d: II:%d (%s) V: (", idx, ic.InlIndex, callee.Name)
|
||||
callee := Ctxt.InlTree.InlinedFunction(ic.InlIndex)
|
||||
Ctxt.Logf(" %d: II:%d (%s) V: (", idx, ic.InlIndex, callee.Name)
|
||||
for _, f := range ic.InlVars {
|
||||
base.Ctxt.Logf(" %v", f.Name)
|
||||
Ctxt.Logf(" %v", f.Name)
|
||||
}
|
||||
base.Ctxt.Logf(" ) C: (")
|
||||
Ctxt.Logf(" ) C: (")
|
||||
for _, k := range ic.Children {
|
||||
base.Ctxt.Logf(" %v", k)
|
||||
Ctxt.Logf(" %v", k)
|
||||
}
|
||||
base.Ctxt.Logf(" ) R:")
|
||||
Ctxt.Logf(" ) R:")
|
||||
for _, r := range ic.Ranges {
|
||||
base.Ctxt.Logf(" [%d,%d)", r.Start, r.End)
|
||||
Ctxt.Logf(" [%d,%d)", r.Start, r.End)
|
||||
}
|
||||
base.Ctxt.Logf("\n")
|
||||
Ctxt.Logf("\n")
|
||||
for _, k := range ic.Children {
|
||||
dumpInlCall(inlcalls, k, ilevel+1)
|
||||
}
|
||||
@@ -377,7 +374,7 @@ func dumpInlVars(dwvars []*dwarf.Var) {
|
||||
if dwv.IsInAbstract {
|
||||
ia = 1
|
||||
}
|
||||
base.Ctxt.Logf("V%d: %s CI:%d II:%d IA:%d %s\n", i, dwv.Name, dwv.ChildIndex, dwv.InlIndex-1, ia, typ)
|
||||
Ctxt.Logf("V%d: %s CI:%d II:%d IA:%d %s\n", i, dwv.Name, dwv.ChildIndex, dwv.InlIndex-1, ia, typ)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -414,7 +411,7 @@ func checkInlCall(funcName string, inlCalls dwarf.InlCalls, funcSize int64, idx,
|
||||
|
||||
// Callee
|
||||
ic := inlCalls.Calls[idx]
|
||||
callee := base.Ctxt.InlTree.InlinedFunction(ic.InlIndex).Name
|
||||
callee := Ctxt.InlTree.InlinedFunction(ic.InlIndex).Name
|
||||
calleeRanges := ic.Ranges
|
||||
|
||||
// Caller
|
||||
@@ -422,14 +419,14 @@ func checkInlCall(funcName string, inlCalls dwarf.InlCalls, funcSize int64, idx,
|
||||
parentRanges := []dwarf.Range{dwarf.Range{Start: int64(0), End: funcSize}}
|
||||
if parentIdx != -1 {
|
||||
pic := inlCalls.Calls[parentIdx]
|
||||
caller = base.Ctxt.InlTree.InlinedFunction(pic.InlIndex).Name
|
||||
caller = Ctxt.InlTree.InlinedFunction(pic.InlIndex).Name
|
||||
parentRanges = pic.Ranges
|
||||
}
|
||||
|
||||
// Callee ranges contained in caller ranges?
|
||||
c, m := rangesContainsAll(parentRanges, calleeRanges)
|
||||
if !c {
|
||||
base.Fatalf("** malformed inlined routine range in %s: caller %s callee %s II=%d %s\n", funcName, caller, callee, idx, m)
|
||||
Fatalf("** malformed inlined routine range in %s: caller %s callee %s II=%d %s\n", funcName, caller, callee, idx, m)
|
||||
}
|
||||
|
||||
// Now visit kids
|
||||
256
src/cmd/compile/internal/gc/embed.go
Normal file
256
src/cmd/compile/internal/gc/embed.go
Normal file
@@ -0,0 +1,256 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gc
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/syntax"
|
||||
"cmd/compile/internal/types"
|
||||
"cmd/internal/obj"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var embedlist []*Node
|
||||
|
||||
var embedCfg struct {
|
||||
Patterns map[string][]string
|
||||
Files map[string]string
|
||||
}
|
||||
|
||||
func readEmbedCfg(file string) {
|
||||
data, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
log.Fatalf("-embedcfg: %v", err)
|
||||
}
|
||||
if err := json.Unmarshal(data, &embedCfg); err != nil {
|
||||
log.Fatalf("%s: %v", file, err)
|
||||
}
|
||||
if embedCfg.Patterns == nil {
|
||||
log.Fatalf("%s: invalid embedcfg: missing Patterns", file)
|
||||
}
|
||||
if embedCfg.Files == nil {
|
||||
log.Fatalf("%s: invalid embedcfg: missing Files", file)
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
embedUnknown = iota
|
||||
embedBytes
|
||||
embedString
|
||||
embedFiles
|
||||
)
|
||||
|
||||
func varEmbed(p *noder, names []*Node, typ *Node, exprs []*Node, embeds []PragmaEmbed) {
|
||||
haveEmbed := false
|
||||
for _, decl := range p.file.DeclList {
|
||||
imp, ok := decl.(*syntax.ImportDecl)
|
||||
if !ok {
|
||||
// imports always come first
|
||||
break
|
||||
}
|
||||
path, _ := strconv.Unquote(imp.Path.Value)
|
||||
if path == "embed" {
|
||||
haveEmbed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
pos := embeds[0].Pos
|
||||
if !haveEmbed {
|
||||
p.yyerrorpos(pos, "invalid go:embed: missing import \"embed\"")
|
||||
return
|
||||
}
|
||||
if len(names) > 1 {
|
||||
p.yyerrorpos(pos, "go:embed cannot apply to multiple vars")
|
||||
return
|
||||
}
|
||||
if len(exprs) > 0 {
|
||||
p.yyerrorpos(pos, "go:embed cannot apply to var with initializer")
|
||||
return
|
||||
}
|
||||
if typ == nil {
|
||||
// Should not happen, since len(exprs) == 0 now.
|
||||
p.yyerrorpos(pos, "go:embed cannot apply to var without type")
|
||||
return
|
||||
}
|
||||
if dclcontext != PEXTERN {
|
||||
p.yyerrorpos(pos, "go:embed cannot apply to var inside func")
|
||||
return
|
||||
}
|
||||
|
||||
var list []irEmbed
|
||||
for _, e := range embeds {
|
||||
list = append(list, irEmbed{Pos: p.makeXPos(e.Pos), Patterns: e.Patterns})
|
||||
}
|
||||
v := names[0]
|
||||
v.Name.Param.SetEmbedList(list)
|
||||
embedlist = append(embedlist, v)
|
||||
}
|
||||
|
||||
func embedFileList(v *Node, kind int) []string {
|
||||
// Build list of files to store.
|
||||
have := make(map[string]bool)
|
||||
var list []string
|
||||
for _, e := range v.Name.Param.EmbedList() {
|
||||
for _, pattern := range e.Patterns {
|
||||
files, ok := embedCfg.Patterns[pattern]
|
||||
if !ok {
|
||||
yyerrorl(e.Pos, "invalid go:embed: build system did not map pattern: %s", pattern)
|
||||
}
|
||||
for _, file := range files {
|
||||
if embedCfg.Files[file] == "" {
|
||||
yyerrorl(e.Pos, "invalid go:embed: build system did not map file: %s", file)
|
||||
continue
|
||||
}
|
||||
if !have[file] {
|
||||
have[file] = true
|
||||
list = append(list, file)
|
||||
}
|
||||
if kind == embedFiles {
|
||||
for dir := path.Dir(file); dir != "." && !have[dir]; dir = path.Dir(dir) {
|
||||
have[dir] = true
|
||||
list = append(list, dir+"/")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
sort.Slice(list, func(i, j int) bool {
|
||||
return embedFileLess(list[i], list[j])
|
||||
})
|
||||
|
||||
if kind == embedString || kind == embedBytes {
|
||||
if len(list) > 1 {
|
||||
yyerrorl(v.Pos, "invalid go:embed: multiple files for type %v", v.Type)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return list
|
||||
}
|
||||
|
||||
// embedKind determines the kind of embedding variable.
|
||||
func embedKind(typ *types.Type) int {
|
||||
if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == localpkg && myimportpath == "embed")) {
|
||||
return embedFiles
|
||||
}
|
||||
if typ.Etype == types.TSTRING {
|
||||
return embedString
|
||||
}
|
||||
if typ.Etype == types.TSLICE && typ.Elem().Etype == types.TUINT8 {
|
||||
return embedBytes
|
||||
}
|
||||
return embedUnknown
|
||||
}
|
||||
|
||||
func embedFileNameSplit(name string) (dir, elem string, isDir bool) {
|
||||
if name[len(name)-1] == '/' {
|
||||
isDir = true
|
||||
name = name[:len(name)-1]
|
||||
}
|
||||
i := len(name) - 1
|
||||
for i >= 0 && name[i] != '/' {
|
||||
i--
|
||||
}
|
||||
if i < 0 {
|
||||
return ".", name, isDir
|
||||
}
|
||||
return name[:i], name[i+1:], isDir
|
||||
}
|
||||
|
||||
// embedFileLess implements the sort order for a list of embedded files.
|
||||
// See the comment inside ../../../../embed/embed.go's Files struct for rationale.
|
||||
func embedFileLess(x, y string) bool {
|
||||
xdir, xelem, _ := embedFileNameSplit(x)
|
||||
ydir, yelem, _ := embedFileNameSplit(y)
|
||||
return xdir < ydir || xdir == ydir && xelem < yelem
|
||||
}
|
||||
|
||||
func dumpembeds() {
|
||||
for _, v := range embedlist {
|
||||
initEmbed(v)
|
||||
}
|
||||
}
|
||||
|
||||
// initEmbed emits the init data for a //go:embed variable,
|
||||
// which is either a string, a []byte, or an embed.FS.
|
||||
func initEmbed(v *Node) {
|
||||
commentPos := v.Name.Param.EmbedList()[0].Pos
|
||||
if !langSupported(1, 16, localpkg) {
|
||||
lno := lineno
|
||||
lineno = commentPos
|
||||
yyerrorv("go1.16", "go:embed")
|
||||
lineno = lno
|
||||
return
|
||||
}
|
||||
if embedCfg.Patterns == nil {
|
||||
yyerrorl(commentPos, "invalid go:embed: build system did not supply embed configuration")
|
||||
return
|
||||
}
|
||||
kind := embedKind(v.Type)
|
||||
if kind == embedUnknown {
|
||||
yyerrorl(v.Pos, "go:embed cannot apply to var of type %v", v.Type)
|
||||
return
|
||||
}
|
||||
|
||||
files := embedFileList(v, kind)
|
||||
switch kind {
|
||||
case embedString, embedBytes:
|
||||
file := files[0]
|
||||
fsym, size, err := fileStringSym(v.Pos, embedCfg.Files[file], kind == embedString, nil)
|
||||
if err != nil {
|
||||
yyerrorl(v.Pos, "embed %s: %v", file, err)
|
||||
}
|
||||
sym := v.Sym.Linksym()
|
||||
off := 0
|
||||
off = dsymptr(sym, off, fsym, 0) // data string
|
||||
off = duintptr(sym, off, uint64(size)) // len
|
||||
if kind == embedBytes {
|
||||
duintptr(sym, off, uint64(size)) // cap for slice
|
||||
}
|
||||
|
||||
case embedFiles:
|
||||
slicedata := Ctxt.Lookup(`"".` + v.Sym.Name + `.files`)
|
||||
off := 0
|
||||
// []files pointed at by Files
|
||||
off = dsymptr(slicedata, off, slicedata, 3*Widthptr) // []file, pointing just past slice
|
||||
off = duintptr(slicedata, off, uint64(len(files)))
|
||||
off = duintptr(slicedata, off, uint64(len(files)))
|
||||
|
||||
// embed/embed.go type file is:
|
||||
// name string
|
||||
// data string
|
||||
// hash [16]byte
|
||||
// Emit one of these per file in the set.
|
||||
const hashSize = 16
|
||||
hash := make([]byte, hashSize)
|
||||
for _, file := range files {
|
||||
off = dsymptr(slicedata, off, stringsym(v.Pos, file), 0) // file string
|
||||
off = duintptr(slicedata, off, uint64(len(file)))
|
||||
if strings.HasSuffix(file, "/") {
|
||||
// entry for directory - no data
|
||||
off = duintptr(slicedata, off, 0)
|
||||
off = duintptr(slicedata, off, 0)
|
||||
off += hashSize
|
||||
} else {
|
||||
fsym, size, err := fileStringSym(v.Pos, embedCfg.Files[file], true, hash)
|
||||
if err != nil {
|
||||
yyerrorl(v.Pos, "embed %s: %v", file, err)
|
||||
}
|
||||
off = dsymptr(slicedata, off, fsym, 0) // data string
|
||||
off = duintptr(slicedata, off, uint64(size))
|
||||
off = int(slicedata.WriteBytes(Ctxt, int64(off), hash))
|
||||
}
|
||||
}
|
||||
ggloblsym(slicedata, int32(off), obj.RODATA|obj.LOCAL)
|
||||
sym := v.Sym.Linksym()
|
||||
dsymptr(sym, 0, slicedata, 0)
|
||||
}
|
||||
}
|
||||
472
src/cmd/compile/internal/gc/esc.go
Normal file
472
src/cmd/compile/internal/gc/esc.go
Normal file
@@ -0,0 +1,472 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gc
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/types"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func escapes(all []*Node) {
|
||||
visitBottomUp(all, escapeFuncs)
|
||||
}
|
||||
|
||||
const (
|
||||
EscFuncUnknown = 0 + iota
|
||||
EscFuncPlanned
|
||||
EscFuncStarted
|
||||
EscFuncTagged
|
||||
)
|
||||
|
||||
func min8(a, b int8) int8 {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func max8(a, b int8) int8 {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
const (
|
||||
EscUnknown = iota
|
||||
EscNone // Does not escape to heap, result, or parameters.
|
||||
EscHeap // Reachable from the heap
|
||||
EscNever // By construction will not escape.
|
||||
)
|
||||
|
||||
// funcSym returns fn.Func.Nname.Sym if no nils are encountered along the way.
|
||||
func funcSym(fn *Node) *types.Sym {
|
||||
if fn == nil || fn.Func.Nname == nil {
|
||||
return nil
|
||||
}
|
||||
return fn.Func.Nname.Sym
|
||||
}
|
||||
|
||||
// Mark labels that have no backjumps to them as not increasing e.loopdepth.
|
||||
// Walk hasn't generated (goto|label).Left.Sym.Label yet, so we'll cheat
|
||||
// and set it to one of the following two. Then in esc we'll clear it again.
|
||||
var (
|
||||
looping Node
|
||||
nonlooping Node
|
||||
)
|
||||
|
||||
func isSliceSelfAssign(dst, src *Node) bool {
|
||||
// Detect the following special case.
|
||||
//
|
||||
// func (b *Buffer) Foo() {
|
||||
// n, m := ...
|
||||
// b.buf = b.buf[n:m]
|
||||
// }
|
||||
//
|
||||
// This assignment is a no-op for escape analysis,
|
||||
// it does not store any new pointers into b that were not already there.
|
||||
// However, without this special case b will escape, because we assign to OIND/ODOTPTR.
|
||||
// Here we assume that the statement will not contain calls,
|
||||
// that is, that order will move any calls to init.
|
||||
// Otherwise base ONAME value could change between the moments
|
||||
// when we evaluate it for dst and for src.
|
||||
|
||||
// dst is ONAME dereference.
|
||||
if dst.Op != ODEREF && dst.Op != ODOTPTR || dst.Left.Op != ONAME {
|
||||
return false
|
||||
}
|
||||
// src is a slice operation.
|
||||
switch src.Op {
|
||||
case OSLICE, OSLICE3, OSLICESTR:
|
||||
// OK.
|
||||
case OSLICEARR, OSLICE3ARR:
|
||||
// Since arrays are embedded into containing object,
|
||||
// slice of non-pointer array will introduce a new pointer into b that was not already there
|
||||
// (pointer to b itself). After such assignment, if b contents escape,
|
||||
// b escapes as well. If we ignore such OSLICEARR, we will conclude
|
||||
// that b does not escape when b contents do.
|
||||
//
|
||||
// Pointer to an array is OK since it's not stored inside b directly.
|
||||
// For slicing an array (not pointer to array), there is an implicit OADDR.
|
||||
// We check that to determine non-pointer array slicing.
|
||||
if src.Left.Op == OADDR {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
// slice is applied to ONAME dereference.
|
||||
if src.Left.Op != ODEREF && src.Left.Op != ODOTPTR || src.Left.Left.Op != ONAME {
|
||||
return false
|
||||
}
|
||||
// dst and src reference the same base ONAME.
|
||||
return dst.Left == src.Left.Left
|
||||
}
|
||||
|
||||
// isSelfAssign reports whether assignment from src to dst can
|
||||
// be ignored by the escape analysis as it's effectively a self-assignment.
|
||||
func isSelfAssign(dst, src *Node) bool {
|
||||
if isSliceSelfAssign(dst, src) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Detect trivial assignments that assign back to the same object.
|
||||
//
|
||||
// It covers these cases:
|
||||
// val.x = val.y
|
||||
// val.x[i] = val.y[j]
|
||||
// val.x1.x2 = val.x1.y2
|
||||
// ... etc
|
||||
//
|
||||
// These assignments do not change assigned object lifetime.
|
||||
|
||||
if dst == nil || src == nil || dst.Op != src.Op {
|
||||
return false
|
||||
}
|
||||
|
||||
switch dst.Op {
|
||||
case ODOT, ODOTPTR:
|
||||
// Safe trailing accessors that are permitted to differ.
|
||||
case OINDEX:
|
||||
if mayAffectMemory(dst.Right) || mayAffectMemory(src.Right) {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
// The expression prefix must be both "safe" and identical.
|
||||
return samesafeexpr(dst.Left, src.Left)
|
||||
}
|
||||
|
||||
// mayAffectMemory reports whether evaluation of n may affect the program's
|
||||
// memory state. If the expression can't affect memory state, then it can be
|
||||
// safely ignored by the escape analysis.
|
||||
func mayAffectMemory(n *Node) bool {
|
||||
// We may want to use a list of "memory safe" ops instead of generally
|
||||
// "side-effect free", which would include all calls and other ops that can
|
||||
// allocate or change global state. For now, it's safer to start with the latter.
|
||||
//
|
||||
// We're ignoring things like division by zero, index out of range,
|
||||
// and nil pointer dereference here.
|
||||
switch n.Op {
|
||||
case ONAME, OCLOSUREVAR, OLITERAL:
|
||||
return false
|
||||
|
||||
// Left+Right group.
|
||||
case OINDEX, OADD, OSUB, OOR, OXOR, OMUL, OLSH, ORSH, OAND, OANDNOT, ODIV, OMOD:
|
||||
return mayAffectMemory(n.Left) || mayAffectMemory(n.Right)
|
||||
|
||||
// Left group.
|
||||
case ODOT, ODOTPTR, ODEREF, OCONVNOP, OCONV, OLEN, OCAP,
|
||||
ONOT, OBITNOT, OPLUS, ONEG, OALIGNOF, OOFFSETOF, OSIZEOF:
|
||||
return mayAffectMemory(n.Left)
|
||||
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// heapAllocReason returns the reason the given Node must be heap
|
||||
// allocated, or the empty string if it doesn't.
|
||||
func heapAllocReason(n *Node) string {
|
||||
if n.Type == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Parameters are always passed via the stack.
|
||||
if n.Op == ONAME && (n.Class() == PPARAM || n.Class() == PPARAMOUT) {
|
||||
return ""
|
||||
}
|
||||
|
||||
if n.Type.Width > maxStackVarSize {
|
||||
return "too large for stack"
|
||||
}
|
||||
|
||||
if (n.Op == ONEW || n.Op == OPTRLIT) && n.Type.Elem().Width >= maxImplicitStackVarSize {
|
||||
return "too large for stack"
|
||||
}
|
||||
|
||||
if n.Op == OCLOSURE && closureType(n).Size() >= maxImplicitStackVarSize {
|
||||
return "too large for stack"
|
||||
}
|
||||
if n.Op == OCALLPART && partialCallType(n).Size() >= maxImplicitStackVarSize {
|
||||
return "too large for stack"
|
||||
}
|
||||
|
||||
if n.Op == OMAKESLICE {
|
||||
r := n.Right
|
||||
if r == nil {
|
||||
r = n.Left
|
||||
}
|
||||
if !smallintconst(r) {
|
||||
return "non-constant size"
|
||||
}
|
||||
if t := n.Type; t.Elem().Width != 0 && r.Int64Val() >= maxImplicitStackVarSize/t.Elem().Width {
|
||||
return "too large for stack"
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// addrescapes tags node n as having had its address taken
|
||||
// by "increasing" the "value" of n.Esc to EscHeap.
|
||||
// Storage is allocated as necessary to allow the address
|
||||
// to be taken.
|
||||
func addrescapes(n *Node) {
|
||||
switch n.Op {
|
||||
default:
|
||||
// Unexpected Op, probably due to a previous type error. Ignore.
|
||||
|
||||
case ODEREF, ODOTPTR:
|
||||
// Nothing to do.
|
||||
|
||||
case ONAME:
|
||||
if n == nodfp {
|
||||
break
|
||||
}
|
||||
|
||||
// if this is a tmpname (PAUTO), it was tagged by tmpname as not escaping.
|
||||
// on PPARAM it means something different.
|
||||
if n.Class() == PAUTO && n.Esc == EscNever {
|
||||
break
|
||||
}
|
||||
|
||||
// If a closure reference escapes, mark the outer variable as escaping.
|
||||
if n.Name.IsClosureVar() {
|
||||
addrescapes(n.Name.Defn)
|
||||
break
|
||||
}
|
||||
|
||||
if n.Class() != PPARAM && n.Class() != PPARAMOUT && n.Class() != PAUTO {
|
||||
break
|
||||
}
|
||||
|
||||
// This is a plain parameter or local variable that needs to move to the heap,
|
||||
// but possibly for the function outside the one we're compiling.
|
||||
// That is, if we have:
|
||||
//
|
||||
// func f(x int) {
|
||||
// func() {
|
||||
// global = &x
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// then we're analyzing the inner closure but we need to move x to the
|
||||
// heap in f, not in the inner closure. Flip over to f before calling moveToHeap.
|
||||
oldfn := Curfn
|
||||
Curfn = n.Name.Curfn
|
||||
if Curfn.Func.Closure != nil && Curfn.Op == OCLOSURE {
|
||||
Curfn = Curfn.Func.Closure
|
||||
}
|
||||
ln := lineno
|
||||
lineno = Curfn.Pos
|
||||
moveToHeap(n)
|
||||
Curfn = oldfn
|
||||
lineno = ln
|
||||
|
||||
// ODOTPTR has already been introduced,
|
||||
// so these are the non-pointer ODOT and OINDEX.
|
||||
// In &x[0], if x is a slice, then x does not
|
||||
// escape--the pointer inside x does, but that
|
||||
// is always a heap pointer anyway.
|
||||
case ODOT, OINDEX, OPAREN, OCONVNOP:
|
||||
if !n.Left.Type.IsSlice() {
|
||||
addrescapes(n.Left)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// moveToHeap records the parameter or local variable n as moved to the heap.
|
||||
func moveToHeap(n *Node) {
|
||||
if Debug.r != 0 {
|
||||
Dump("MOVE", n)
|
||||
}
|
||||
if compiling_runtime {
|
||||
yyerror("%v escapes to heap, not allowed in runtime", n)
|
||||
}
|
||||
if n.Class() == PAUTOHEAP {
|
||||
Dump("n", n)
|
||||
Fatalf("double move to heap")
|
||||
}
|
||||
|
||||
// Allocate a local stack variable to hold the pointer to the heap copy.
|
||||
// temp will add it to the function declaration list automatically.
|
||||
heapaddr := temp(types.NewPtr(n.Type))
|
||||
heapaddr.Sym = lookup("&" + n.Sym.Name)
|
||||
heapaddr.Orig.Sym = heapaddr.Sym
|
||||
heapaddr.Pos = n.Pos
|
||||
|
||||
// Unset AutoTemp to persist the &foo variable name through SSA to
|
||||
// liveness analysis.
|
||||
// TODO(mdempsky/drchase): Cleaner solution?
|
||||
heapaddr.Name.SetAutoTemp(false)
|
||||
|
||||
// Parameters have a local stack copy used at function start/end
|
||||
// in addition to the copy in the heap that may live longer than
|
||||
// the function.
|
||||
if n.Class() == PPARAM || n.Class() == PPARAMOUT {
|
||||
if n.Xoffset == BADWIDTH {
|
||||
Fatalf("addrescapes before param assignment")
|
||||
}
|
||||
|
||||
// We rewrite n below to be a heap variable (indirection of heapaddr).
|
||||
// Preserve a copy so we can still write code referring to the original,
|
||||
// and substitute that copy into the function declaration list
|
||||
// so that analyses of the local (on-stack) variables use it.
|
||||
stackcopy := newname(n.Sym)
|
||||
stackcopy.Type = n.Type
|
||||
stackcopy.Xoffset = n.Xoffset
|
||||
stackcopy.SetClass(n.Class())
|
||||
stackcopy.Name.Param.Heapaddr = heapaddr
|
||||
if n.Class() == PPARAMOUT {
|
||||
// Make sure the pointer to the heap copy is kept live throughout the function.
|
||||
// The function could panic at any point, and then a defer could recover.
|
||||
// Thus, we need the pointer to the heap copy always available so the
|
||||
// post-deferreturn code can copy the return value back to the stack.
|
||||
// See issue 16095.
|
||||
heapaddr.Name.SetIsOutputParamHeapAddr(true)
|
||||
}
|
||||
n.Name.Param.Stackcopy = stackcopy
|
||||
|
||||
// Substitute the stackcopy into the function variable list so that
|
||||
// liveness and other analyses use the underlying stack slot
|
||||
// and not the now-pseudo-variable n.
|
||||
found := false
|
||||
for i, d := range Curfn.Func.Dcl {
|
||||
if d == n {
|
||||
Curfn.Func.Dcl[i] = stackcopy
|
||||
found = true
|
||||
break
|
||||
}
|
||||
// Parameters are before locals, so can stop early.
|
||||
// This limits the search even in functions with many local variables.
|
||||
if d.Class() == PAUTO {
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
Fatalf("cannot find %v in local variable list", n)
|
||||
}
|
||||
Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
|
||||
}
|
||||
|
||||
// Modify n in place so that uses of n now mean indirection of the heapaddr.
|
||||
n.SetClass(PAUTOHEAP)
|
||||
n.Xoffset = 0
|
||||
n.Name.Param.Heapaddr = heapaddr
|
||||
n.Esc = EscHeap
|
||||
if Debug.m != 0 {
|
||||
Warnl(n.Pos, "moved to heap: %v", n)
|
||||
}
|
||||
}
|
||||
|
||||
// This special tag is applied to uintptr variables
|
||||
// that we believe may hold unsafe.Pointers for
|
||||
// calls into assembly functions.
|
||||
const unsafeUintptrTag = "unsafe-uintptr"
|
||||
|
||||
// This special tag is applied to uintptr parameters of functions
|
||||
// marked go:uintptrescapes.
|
||||
const uintptrEscapesTag = "uintptr-escapes"
|
||||
|
||||
func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string {
|
||||
name := func() string {
|
||||
if f.Sym != nil {
|
||||
return f.Sym.Name
|
||||
}
|
||||
return fmt.Sprintf("arg#%d", narg)
|
||||
}
|
||||
|
||||
if fn.Nbody.Len() == 0 {
|
||||
// Assume that uintptr arguments must be held live across the call.
|
||||
// This is most important for syscall.Syscall.
|
||||
// See golang.org/issue/13372.
|
||||
// This really doesn't have much to do with escape analysis per se,
|
||||
// but we are reusing the ability to annotate an individual function
|
||||
// argument and pass those annotations along to importing code.
|
||||
if f.Type.IsUintptr() {
|
||||
if Debug.m != 0 {
|
||||
Warnl(f.Pos, "assuming %v is unsafe uintptr", name())
|
||||
}
|
||||
return unsafeUintptrTag
|
||||
}
|
||||
|
||||
if !f.Type.HasPointers() { // don't bother tagging for scalars
|
||||
return ""
|
||||
}
|
||||
|
||||
var esc EscLeaks
|
||||
|
||||
// External functions are assumed unsafe, unless
|
||||
// //go:noescape is given before the declaration.
|
||||
if fn.Func.Pragma&Noescape != 0 {
|
||||
if Debug.m != 0 && f.Sym != nil {
|
||||
Warnl(f.Pos, "%v does not escape", name())
|
||||
}
|
||||
} else {
|
||||
if Debug.m != 0 && f.Sym != nil {
|
||||
Warnl(f.Pos, "leaking param: %v", name())
|
||||
}
|
||||
esc.AddHeap(0)
|
||||
}
|
||||
|
||||
return esc.Encode()
|
||||
}
|
||||
|
||||
if fn.Func.Pragma&UintptrEscapes != 0 {
|
||||
if f.Type.IsUintptr() {
|
||||
if Debug.m != 0 {
|
||||
Warnl(f.Pos, "marking %v as escaping uintptr", name())
|
||||
}
|
||||
return uintptrEscapesTag
|
||||
}
|
||||
if f.IsDDD() && f.Type.Elem().IsUintptr() {
|
||||
// final argument is ...uintptr.
|
||||
if Debug.m != 0 {
|
||||
Warnl(f.Pos, "marking %v as escaping ...uintptr", name())
|
||||
}
|
||||
return uintptrEscapesTag
|
||||
}
|
||||
}
|
||||
|
||||
if !f.Type.HasPointers() { // don't bother tagging for scalars
|
||||
return ""
|
||||
}
|
||||
|
||||
// Unnamed parameters are unused and therefore do not escape.
|
||||
if f.Sym == nil || f.Sym.IsBlank() {
|
||||
var esc EscLeaks
|
||||
return esc.Encode()
|
||||
}
|
||||
|
||||
n := asNode(f.Nname)
|
||||
loc := e.oldLoc(n)
|
||||
esc := loc.paramEsc
|
||||
esc.Optimize()
|
||||
|
||||
if Debug.m != 0 && !loc.escapes {
|
||||
if esc.Empty() {
|
||||
Warnl(f.Pos, "%v does not escape", name())
|
||||
}
|
||||
if x := esc.Heap(); x >= 0 {
|
||||
if x == 0 {
|
||||
Warnl(f.Pos, "leaking param: %v", name())
|
||||
} else {
|
||||
// TODO(mdempsky): Mention level=x like below?
|
||||
Warnl(f.Pos, "leaking param content: %v", name())
|
||||
}
|
||||
}
|
||||
for i := 0; i < numEscResults; i++ {
|
||||
if x := esc.Result(i); x >= 0 {
|
||||
res := fn.Type.Results().Field(i).Sym
|
||||
Warnl(f.Pos, "leaking param: %v to result %v level=%d", name(), res, x)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return esc.Encode()
|
||||
}
|
||||
1539
src/cmd/compile/internal/gc/escape.go
Normal file
1539
src/cmd/compile/internal/gc/escape.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -5,68 +5,225 @@
|
||||
package gc
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/base"
|
||||
"cmd/compile/internal/inline"
|
||||
"cmd/compile/internal/ir"
|
||||
"cmd/compile/internal/typecheck"
|
||||
"cmd/compile/internal/types"
|
||||
"cmd/internal/bio"
|
||||
"cmd/internal/src"
|
||||
"fmt"
|
||||
"go/constant"
|
||||
)
|
||||
|
||||
var (
|
||||
Debug_export int // if set, print debugging information about export data
|
||||
)
|
||||
|
||||
func exportf(bout *bio.Writer, format string, args ...interface{}) {
|
||||
fmt.Fprintf(bout, format, args...)
|
||||
if base.Debug.Export != 0 {
|
||||
if Debug_export != 0 {
|
||||
fmt.Printf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func dumpexport(bout *bio.Writer) {
|
||||
p := &exporter{marked: make(map[*types.Type]bool)}
|
||||
for _, n := range typecheck.Target.Exports {
|
||||
p.markObject(n)
|
||||
var asmlist []*Node
|
||||
|
||||
// exportsym marks n for export (or reexport).
|
||||
func exportsym(n *Node) {
|
||||
if n.Sym.OnExportList() {
|
||||
return
|
||||
}
|
||||
n.Sym.SetOnExportList(true)
|
||||
|
||||
if Debug.E != 0 {
|
||||
fmt.Printf("export symbol %v\n", n.Sym)
|
||||
}
|
||||
|
||||
exportlist = append(exportlist, n)
|
||||
}
|
||||
|
||||
func initname(s string) bool {
|
||||
return s == "init"
|
||||
}
|
||||
|
||||
func autoexport(n *Node, ctxt Class) {
|
||||
if n.Sym.Pkg != localpkg {
|
||||
return
|
||||
}
|
||||
if (ctxt != PEXTERN && ctxt != PFUNC) || dclcontext != PEXTERN {
|
||||
return
|
||||
}
|
||||
if n.Type != nil && n.Type.IsKind(TFUNC) && n.IsMethod() {
|
||||
return
|
||||
}
|
||||
|
||||
if types.IsExported(n.Sym.Name) || initname(n.Sym.Name) {
|
||||
exportsym(n)
|
||||
}
|
||||
if asmhdr != "" && !n.Sym.Asm() {
|
||||
n.Sym.SetAsm(true)
|
||||
asmlist = append(asmlist, n)
|
||||
}
|
||||
}
|
||||
|
||||
func dumpexport(bout *bio.Writer) {
|
||||
// The linker also looks for the $$ marker - use char after $$ to distinguish format.
|
||||
exportf(bout, "\n$$B\n") // indicate binary export format
|
||||
off := bout.Offset()
|
||||
typecheck.WriteExports(bout.Writer)
|
||||
iexport(bout.Writer)
|
||||
size := bout.Offset() - off
|
||||
exportf(bout, "\n$$\n")
|
||||
|
||||
if base.Debug.Export != 0 {
|
||||
fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", base.Ctxt.Pkgpath, size)
|
||||
if Debug_export != 0 {
|
||||
fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", myimportpath, size)
|
||||
}
|
||||
}
|
||||
|
||||
func importsym(ipkg *types.Pkg, s *types.Sym, op Op) *Node {
|
||||
n := asNode(s.PkgDef())
|
||||
if n == nil {
|
||||
// iimport should have created a stub ONONAME
|
||||
// declaration for all imported symbols. The exception
|
||||
// is declarations for Runtimepkg, which are populated
|
||||
// by loadsys instead.
|
||||
if s.Pkg != Runtimepkg {
|
||||
Fatalf("missing ONONAME for %v\n", s)
|
||||
}
|
||||
|
||||
n = dclname(s)
|
||||
s.SetPkgDef(asTypesNode(n))
|
||||
s.Importdef = ipkg
|
||||
}
|
||||
if n.Op != ONONAME && n.Op != op {
|
||||
redeclare(lineno, s, fmt.Sprintf("during import %q", ipkg.Path))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// importtype returns the named type declared by symbol s.
|
||||
// If no such type has been declared yet, a forward declaration is returned.
|
||||
// ipkg is the package being imported
|
||||
func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *types.Type {
|
||||
n := importsym(ipkg, s, OTYPE)
|
||||
if n.Op != OTYPE {
|
||||
t := types.New(TFORW)
|
||||
t.Sym = s
|
||||
t.Nod = asTypesNode(n)
|
||||
|
||||
n.Op = OTYPE
|
||||
n.Pos = pos
|
||||
n.Type = t
|
||||
n.SetClass(PEXTERN)
|
||||
}
|
||||
|
||||
t := n.Type
|
||||
if t == nil {
|
||||
Fatalf("importtype %v", s)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// importobj declares symbol s as an imported object representable by op.
|
||||
// ipkg is the package being imported
|
||||
func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op Op, ctxt Class, t *types.Type) *Node {
|
||||
n := importsym(ipkg, s, op)
|
||||
if n.Op != ONONAME {
|
||||
if n.Op == op && (n.Class() != ctxt || !types.Identical(n.Type, t)) {
|
||||
redeclare(lineno, s, fmt.Sprintf("during import %q", ipkg.Path))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
n.Op = op
|
||||
n.Pos = pos
|
||||
n.SetClass(ctxt)
|
||||
if ctxt == PFUNC {
|
||||
n.Sym.SetFunc(true)
|
||||
}
|
||||
n.Type = t
|
||||
return n
|
||||
}
|
||||
|
||||
// importconst declares symbol s as an imported constant with type t and value val.
|
||||
// ipkg is the package being imported
|
||||
func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val Val) {
|
||||
n := importobj(ipkg, pos, s, OLITERAL, PEXTERN, t)
|
||||
if n == nil { // TODO: Check that value matches.
|
||||
return
|
||||
}
|
||||
|
||||
n.SetVal(val)
|
||||
|
||||
if Debug.E != 0 {
|
||||
fmt.Printf("import const %v %L = %v\n", s, t, val)
|
||||
}
|
||||
}
|
||||
|
||||
// importfunc declares symbol s as an imported function with type t.
|
||||
// ipkg is the package being imported
|
||||
func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
|
||||
n := importobj(ipkg, pos, s, ONAME, PFUNC, t)
|
||||
if n == nil {
|
||||
return
|
||||
}
|
||||
|
||||
n.Func = new(Func)
|
||||
t.SetNname(asTypesNode(n))
|
||||
|
||||
if Debug.E != 0 {
|
||||
fmt.Printf("import func %v%S\n", s, t)
|
||||
}
|
||||
}
|
||||
|
||||
// importvar declares symbol s as an imported variable with type t.
|
||||
// ipkg is the package being imported
|
||||
func importvar(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
|
||||
n := importobj(ipkg, pos, s, ONAME, PEXTERN, t)
|
||||
if n == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if Debug.E != 0 {
|
||||
fmt.Printf("import var %v %L\n", s, t)
|
||||
}
|
||||
}
|
||||
|
||||
// importalias declares symbol s as an imported type alias with type t.
|
||||
// ipkg is the package being imported
|
||||
func importalias(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
|
||||
n := importobj(ipkg, pos, s, OTYPE, PEXTERN, t)
|
||||
if n == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if Debug.E != 0 {
|
||||
fmt.Printf("import type %v = %L\n", s, t)
|
||||
}
|
||||
}
|
||||
|
||||
func dumpasmhdr() {
|
||||
b, err := bio.Create(base.Flag.AsmHdr)
|
||||
b, err := bio.Create(asmhdr)
|
||||
if err != nil {
|
||||
base.Fatalf("%v", err)
|
||||
Fatalf("%v", err)
|
||||
}
|
||||
fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", types.LocalPkg.Name)
|
||||
for _, n := range typecheck.Target.Asms {
|
||||
if n.Sym().IsBlank() {
|
||||
fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", localpkg.Name)
|
||||
for _, n := range asmlist {
|
||||
if n.Sym.IsBlank() {
|
||||
continue
|
||||
}
|
||||
switch n.Op() {
|
||||
case ir.OLITERAL:
|
||||
t := n.Val().Kind()
|
||||
if t == constant.Float || t == constant.Complex {
|
||||
switch n.Op {
|
||||
case OLITERAL:
|
||||
t := n.Val().Ctype()
|
||||
if t == CTFLT || t == CTCPLX {
|
||||
break
|
||||
}
|
||||
fmt.Fprintf(b, "#define const_%s %#v\n", n.Sym().Name, n.Val())
|
||||
fmt.Fprintf(b, "#define const_%s %#v\n", n.Sym.Name, n.Val())
|
||||
|
||||
case ir.OTYPE:
|
||||
t := n.Type()
|
||||
case OTYPE:
|
||||
t := n.Type
|
||||
if !t.IsStruct() || t.StructType().Map != nil || t.IsFuncArgStruct() {
|
||||
break
|
||||
}
|
||||
fmt.Fprintf(b, "#define %s__size %d\n", n.Sym().Name, int(t.Width))
|
||||
fmt.Fprintf(b, "#define %s__size %d\n", n.Sym.Name, int(t.Width))
|
||||
for _, f := range t.Fields().Slice() {
|
||||
if !f.Sym.IsBlank() {
|
||||
fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym().Name, f.Sym.Name, int(f.Offset))
|
||||
fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym.Name, f.Sym.Name, int(f.Offset))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -74,83 +231,3 @@ func dumpasmhdr() {
|
||||
|
||||
b.Close()
|
||||
}
|
||||
|
||||
type exporter struct {
|
||||
marked map[*types.Type]bool // types already seen by markType
|
||||
}
|
||||
|
||||
// markObject visits a reachable object.
|
||||
func (p *exporter) markObject(n ir.Node) {
|
||||
if n.Op() == ir.ONAME {
|
||||
n := n.(*ir.Name)
|
||||
if n.Class == ir.PFUNC {
|
||||
inline.Inline_Flood(n, typecheck.Export)
|
||||
}
|
||||
}
|
||||
|
||||
p.markType(n.Type())
|
||||
}
|
||||
|
||||
// markType recursively visits types reachable from t to identify
|
||||
// functions whose inline bodies may be needed.
|
||||
func (p *exporter) markType(t *types.Type) {
|
||||
if p.marked[t] {
|
||||
return
|
||||
}
|
||||
p.marked[t] = true
|
||||
|
||||
// If this is a named type, mark all of its associated
|
||||
// methods. Skip interface types because t.Methods contains
|
||||
// only their unexpanded method set (i.e., exclusive of
|
||||
// interface embeddings), and the switch statement below
|
||||
// handles their full method set.
|
||||
if t.Sym() != nil && t.Kind() != types.TINTER {
|
||||
for _, m := range t.Methods().Slice() {
|
||||
if types.IsExported(m.Sym.Name) {
|
||||
p.markObject(ir.AsNode(m.Nname))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Recursively mark any types that can be produced given a
|
||||
// value of type t: dereferencing a pointer; indexing or
|
||||
// iterating over an array, slice, or map; receiving from a
|
||||
// channel; accessing a struct field or interface method; or
|
||||
// calling a function.
|
||||
//
|
||||
// Notably, we don't mark function parameter types, because
|
||||
// the user already needs some way to construct values of
|
||||
// those types.
|
||||
switch t.Kind() {
|
||||
case types.TPTR, types.TARRAY, types.TSLICE:
|
||||
p.markType(t.Elem())
|
||||
|
||||
case types.TCHAN:
|
||||
if t.ChanDir().CanRecv() {
|
||||
p.markType(t.Elem())
|
||||
}
|
||||
|
||||
case types.TMAP:
|
||||
p.markType(t.Key())
|
||||
p.markType(t.Elem())
|
||||
|
||||
case types.TSTRUCT:
|
||||
for _, f := range t.FieldSlice() {
|
||||
if types.IsExported(f.Sym.Name) || f.Embedded != 0 {
|
||||
p.markType(f.Type)
|
||||
}
|
||||
}
|
||||
|
||||
case types.TFUNC:
|
||||
for _, f := range t.Results().FieldSlice() {
|
||||
p.markType(f.Type)
|
||||
}
|
||||
|
||||
case types.TINTER:
|
||||
for _, f := range t.FieldSlice() {
|
||||
if types.IsExported(f.Sym.Name) {
|
||||
p.markType(f.Type)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package test
|
||||
package gc
|
||||
|
||||
import (
|
||||
"internal/testenv"
|
||||
@@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package test
|
||||
package gc
|
||||
|
||||
import (
|
||||
"math"
|
||||
1986
src/cmd/compile/internal/gc/fmt.go
Normal file
1986
src/cmd/compile/internal/gc/fmt.go
Normal file
File diff suppressed because it is too large
Load Diff
86
src/cmd/compile/internal/gc/gen.go
Normal file
86
src/cmd/compile/internal/gc/gen.go
Normal file
@@ -0,0 +1,86 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gc
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/types"
|
||||
"cmd/internal/obj"
|
||||
"cmd/internal/src"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// sysfunc looks up Go function name in package runtime. This function
|
||||
// must follow the internal calling convention.
|
||||
func sysfunc(name string) *obj.LSym {
|
||||
s := Runtimepkg.Lookup(name)
|
||||
s.SetFunc(true)
|
||||
return s.Linksym()
|
||||
}
|
||||
|
||||
// sysvar looks up a variable (or assembly function) name in package
|
||||
// runtime. If this is a function, it may have a special calling
|
||||
// convention.
|
||||
func sysvar(name string) *obj.LSym {
|
||||
return Runtimepkg.Lookup(name).Linksym()
|
||||
}
|
||||
|
||||
// isParamStackCopy reports whether this is the on-stack copy of a
|
||||
// function parameter that moved to the heap.
|
||||
func (n *Node) isParamStackCopy() bool {
|
||||
return n.Op == ONAME && (n.Class() == PPARAM || n.Class() == PPARAMOUT) && n.Name.Param.Heapaddr != nil
|
||||
}
|
||||
|
||||
// isParamHeapCopy reports whether this is the on-heap copy of
|
||||
// a function parameter that moved to the heap.
|
||||
func (n *Node) isParamHeapCopy() bool {
|
||||
return n.Op == ONAME && n.Class() == PAUTOHEAP && n.Name.Param.Stackcopy != nil
|
||||
}
|
||||
|
||||
// autotmpname returns the name for an autotmp variable numbered n.
|
||||
func autotmpname(n int) string {
|
||||
// Give each tmp a different name so that they can be registerized.
|
||||
// Add a preceding . to avoid clashing with legal names.
|
||||
const prefix = ".autotmp_"
|
||||
// Start with a buffer big enough to hold a large n.
|
||||
b := []byte(prefix + " ")[:len(prefix)]
|
||||
b = strconv.AppendInt(b, int64(n), 10)
|
||||
return types.InternString(b)
|
||||
}
|
||||
|
||||
// make a new Node off the books
|
||||
func tempAt(pos src.XPos, curfn *Node, t *types.Type) *Node {
|
||||
if curfn == nil {
|
||||
Fatalf("no curfn for tempAt")
|
||||
}
|
||||
if curfn.Func.Closure != nil && curfn.Op == OCLOSURE {
|
||||
Dump("tempAt", curfn)
|
||||
Fatalf("adding tempAt to wrong closure function")
|
||||
}
|
||||
if t == nil {
|
||||
Fatalf("tempAt called with nil type")
|
||||
}
|
||||
|
||||
s := &types.Sym{
|
||||
Name: autotmpname(len(curfn.Func.Dcl)),
|
||||
Pkg: localpkg,
|
||||
}
|
||||
n := newnamel(pos, s)
|
||||
s.Def = asTypesNode(n)
|
||||
n.Type = t
|
||||
n.SetClass(PAUTO)
|
||||
n.Esc = EscNever
|
||||
n.Name.Curfn = curfn
|
||||
n.Name.SetUsed(true)
|
||||
n.Name.SetAutoTemp(true)
|
||||
curfn.Func.Dcl = append(curfn.Func.Dcl, n)
|
||||
|
||||
dowidth(t)
|
||||
|
||||
return n.Orig
|
||||
}
|
||||
|
||||
func temp(t *types.Type) *Node {
|
||||
return tempAt(lineno, Curfn, t)
|
||||
}
|
||||
@@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package test
|
||||
package gc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
349
src/cmd/compile/internal/gc/go.go
Normal file
349
src/cmd/compile/internal/gc/go.go
Normal file
@@ -0,0 +1,349 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gc
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/ssa"
|
||||
"cmd/compile/internal/types"
|
||||
"cmd/internal/obj"
|
||||
"cmd/internal/src"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
BADWIDTH = types.BADWIDTH
|
||||
)
|
||||
|
||||
var (
|
||||
// maximum size variable which we will allocate on the stack.
|
||||
// This limit is for explicit variable declarations like "var x T" or "x := ...".
|
||||
// Note: the flag smallframes can update this value.
|
||||
maxStackVarSize = int64(10 * 1024 * 1024)
|
||||
|
||||
// maximum size of implicit variables that we will allocate on the stack.
|
||||
// p := new(T) allocating T on the stack
|
||||
// p := &T{} allocating T on the stack
|
||||
// s := make([]T, n) allocating [n]T on the stack
|
||||
// s := []byte("...") allocating [n]byte on the stack
|
||||
// Note: the flag smallframes can update this value.
|
||||
maxImplicitStackVarSize = int64(64 * 1024)
|
||||
|
||||
// smallArrayBytes is the maximum size of an array which is considered small.
|
||||
// Small arrays will be initialized directly with a sequence of constant stores.
|
||||
// Large arrays will be initialized by copying from a static temp.
|
||||
// 256 bytes was chosen to minimize generated code + statictmp size.
|
||||
smallArrayBytes = int64(256)
|
||||
)
|
||||
|
||||
// isRuntimePkg reports whether p is package runtime.
|
||||
func isRuntimePkg(p *types.Pkg) bool {
|
||||
if compiling_runtime && p == localpkg {
|
||||
return true
|
||||
}
|
||||
return p.Path == "runtime"
|
||||
}
|
||||
|
||||
// isReflectPkg reports whether p is package reflect.
|
||||
func isReflectPkg(p *types.Pkg) bool {
|
||||
if p == localpkg {
|
||||
return myimportpath == "reflect"
|
||||
}
|
||||
return p.Path == "reflect"
|
||||
}
|
||||
|
||||
// The Class of a variable/function describes the "storage class"
|
||||
// of a variable or function. During parsing, storage classes are
|
||||
// called declaration contexts.
|
||||
type Class uint8
|
||||
|
||||
//go:generate stringer -type=Class
|
||||
const (
|
||||
Pxxx Class = iota // no class; used during ssa conversion to indicate pseudo-variables
|
||||
PEXTERN // global variables
|
||||
PAUTO // local variables
|
||||
PAUTOHEAP // local variables or parameters moved to heap
|
||||
PPARAM // input arguments
|
||||
PPARAMOUT // output results
|
||||
PFUNC // global functions
|
||||
|
||||
// Careful: Class is stored in three bits in Node.flags.
|
||||
_ = uint((1 << 3) - iota) // static assert for iota <= (1 << 3)
|
||||
)
|
||||
|
||||
// Slices in the runtime are represented by three components:
|
||||
//
|
||||
// type slice struct {
|
||||
// ptr unsafe.Pointer
|
||||
// len int
|
||||
// cap int
|
||||
// }
|
||||
//
|
||||
// Strings in the runtime are represented by two components:
|
||||
//
|
||||
// type string struct {
|
||||
// ptr unsafe.Pointer
|
||||
// len int
|
||||
// }
|
||||
//
|
||||
// These variables are the offsets of fields and sizes of these structs.
|
||||
var (
|
||||
slicePtrOffset int64
|
||||
sliceLenOffset int64
|
||||
sliceCapOffset int64
|
||||
|
||||
sizeofSlice int64
|
||||
sizeofString int64
|
||||
)
|
||||
|
||||
var pragcgobuf [][]string
|
||||
|
||||
var outfile string
|
||||
var linkobj string
|
||||
|
||||
// nerrors is the number of compiler errors reported
|
||||
// since the last call to saveerrors.
|
||||
var nerrors int
|
||||
|
||||
// nsavederrors is the total number of compiler errors
|
||||
// reported before the last call to saveerrors.
|
||||
var nsavederrors int
|
||||
|
||||
var nsyntaxerrors int
|
||||
|
||||
var decldepth int32
|
||||
|
||||
var nolocalimports bool
|
||||
|
||||
// gc debug flags
|
||||
type DebugFlags struct {
|
||||
P, B, C, E,
|
||||
K, L, N, S,
|
||||
W, e, h, j,
|
||||
l, m, r, w int
|
||||
}
|
||||
|
||||
var Debug DebugFlags
|
||||
|
||||
var debugstr string
|
||||
|
||||
var Debug_checknil int
|
||||
var Debug_typeassert int
|
||||
|
||||
var localpkg *types.Pkg // package being compiled
|
||||
|
||||
var inimport bool // set during import
|
||||
|
||||
var itabpkg *types.Pkg // fake pkg for itab entries
|
||||
|
||||
var itablinkpkg *types.Pkg // fake package for runtime itab entries
|
||||
|
||||
var Runtimepkg *types.Pkg // fake package runtime
|
||||
|
||||
var racepkg *types.Pkg // package runtime/race
|
||||
|
||||
var msanpkg *types.Pkg // package runtime/msan
|
||||
|
||||
var unsafepkg *types.Pkg // package unsafe
|
||||
|
||||
var trackpkg *types.Pkg // fake package for field tracking
|
||||
|
||||
var mappkg *types.Pkg // fake package for map zero value
|
||||
|
||||
var gopkg *types.Pkg // pseudo-package for method symbols on anonymous receiver types
|
||||
|
||||
var zerosize int64
|
||||
|
||||
var myimportpath string
|
||||
|
||||
var localimport string
|
||||
|
||||
var asmhdr string
|
||||
|
||||
var simtype [NTYPE]types.EType
|
||||
|
||||
var (
|
||||
isInt [NTYPE]bool
|
||||
isFloat [NTYPE]bool
|
||||
isComplex [NTYPE]bool
|
||||
issimple [NTYPE]bool
|
||||
)
|
||||
|
||||
var (
|
||||
okforeq [NTYPE]bool
|
||||
okforadd [NTYPE]bool
|
||||
okforand [NTYPE]bool
|
||||
okfornone [NTYPE]bool
|
||||
okforcmp [NTYPE]bool
|
||||
okforbool [NTYPE]bool
|
||||
okforcap [NTYPE]bool
|
||||
okforlen [NTYPE]bool
|
||||
okforarith [NTYPE]bool
|
||||
okforconst [NTYPE]bool
|
||||
)
|
||||
|
||||
var (
|
||||
okfor [OEND][]bool
|
||||
iscmp [OEND]bool
|
||||
)
|
||||
|
||||
var minintval [NTYPE]*Mpint
|
||||
|
||||
var maxintval [NTYPE]*Mpint
|
||||
|
||||
var minfltval [NTYPE]*Mpflt
|
||||
|
||||
var maxfltval [NTYPE]*Mpflt
|
||||
|
||||
var xtop []*Node
|
||||
|
||||
var exportlist []*Node
|
||||
|
||||
var importlist []*Node // imported functions and methods with inlinable bodies
|
||||
|
||||
var (
|
||||
funcsymsmu sync.Mutex // protects funcsyms and associated package lookups (see func funcsym)
|
||||
funcsyms []*types.Sym
|
||||
)
|
||||
|
||||
var dclcontext Class // PEXTERN/PAUTO
|
||||
|
||||
var Curfn *Node
|
||||
|
||||
var Widthptr int
|
||||
|
||||
var Widthreg int
|
||||
|
||||
var nblank *Node
|
||||
|
||||
var typecheckok bool
|
||||
|
||||
var compiling_runtime bool
|
||||
|
||||
// Compiling the standard library
|
||||
var compiling_std bool
|
||||
|
||||
var use_writebarrier bool
|
||||
|
||||
var pure_go bool
|
||||
|
||||
var flag_installsuffix string
|
||||
|
||||
var flag_race bool
|
||||
|
||||
var flag_msan bool
|
||||
|
||||
var flagDWARF bool
|
||||
|
||||
// Whether we are adding any sort of code instrumentation, such as
|
||||
// when the race detector is enabled.
|
||||
var instrumenting bool
|
||||
|
||||
// Whether we are tracking lexical scopes for DWARF.
|
||||
var trackScopes bool
|
||||
|
||||
// Controls generation of DWARF inlined instance records. Zero
|
||||
// disables, 1 emits inlined routines but suppresses var info,
|
||||
// and 2 emits inlined routines with tracking of formals/locals.
|
||||
var genDwarfInline int
|
||||
|
||||
var debuglive int
|
||||
|
||||
var Ctxt *obj.Link
|
||||
|
||||
var writearchive bool
|
||||
|
||||
var nodfp *Node
|
||||
|
||||
var disable_checknil int
|
||||
|
||||
var autogeneratedPos src.XPos
|
||||
|
||||
// interface to back end
|
||||
|
||||
type Arch struct {
|
||||
LinkArch *obj.LinkArch
|
||||
|
||||
REGSP int
|
||||
MAXWIDTH int64
|
||||
SoftFloat bool
|
||||
|
||||
PadFrame func(int64) int64
|
||||
|
||||
// ZeroRange zeroes a range of memory on stack. It is only inserted
|
||||
// at function entry, and it is ok to clobber registers.
|
||||
ZeroRange func(*Progs, *obj.Prog, int64, int64, *uint32) *obj.Prog
|
||||
|
||||
Ginsnop func(*Progs) *obj.Prog
|
||||
Ginsnopdefer func(*Progs) *obj.Prog // special ginsnop for deferreturn
|
||||
|
||||
// SSAMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
|
||||
SSAMarkMoves func(*SSAGenState, *ssa.Block)
|
||||
|
||||
// SSAGenValue emits Prog(s) for the Value.
|
||||
SSAGenValue func(*SSAGenState, *ssa.Value)
|
||||
|
||||
// SSAGenBlock emits end-of-block Progs. SSAGenValue should be called
|
||||
// for all values in the block before SSAGenBlock.
|
||||
SSAGenBlock func(s *SSAGenState, b, next *ssa.Block)
|
||||
}
|
||||
|
||||
var thearch Arch
|
||||
|
||||
var (
|
||||
staticuint64s,
|
||||
zerobase *Node
|
||||
|
||||
assertE2I,
|
||||
assertE2I2,
|
||||
assertI2I,
|
||||
assertI2I2,
|
||||
deferproc,
|
||||
deferprocStack,
|
||||
Deferreturn,
|
||||
Duffcopy,
|
||||
Duffzero,
|
||||
gcWriteBarrier,
|
||||
goschedguarded,
|
||||
growslice,
|
||||
msanread,
|
||||
msanwrite,
|
||||
msanmove,
|
||||
newobject,
|
||||
newproc,
|
||||
panicdivide,
|
||||
panicshift,
|
||||
panicdottypeE,
|
||||
panicdottypeI,
|
||||
panicnildottype,
|
||||
panicoverflow,
|
||||
raceread,
|
||||
racereadrange,
|
||||
racewrite,
|
||||
racewriterange,
|
||||
x86HasPOPCNT,
|
||||
x86HasSSE41,
|
||||
x86HasFMA,
|
||||
armHasVFPv4,
|
||||
arm64HasATOMICS,
|
||||
typedmemclr,
|
||||
typedmemmove,
|
||||
Udiv,
|
||||
writeBarrier,
|
||||
zerobaseSym *obj.LSym
|
||||
|
||||
BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym
|
||||
ExtendCheckFunc [ssa.BoundsKindCount]*obj.LSym
|
||||
|
||||
// Wasm
|
||||
WasmMove,
|
||||
WasmZero,
|
||||
WasmDiv,
|
||||
WasmTruncS,
|
||||
WasmTruncU,
|
||||
SigPanic *obj.LSym
|
||||
)
|
||||
|
||||
// GCWriteBarrierReg maps from registers to gcWriteBarrier implementation LSyms.
|
||||
var GCWriteBarrierReg map[int16]*obj.LSym
|
||||
333
src/cmd/compile/internal/gc/gsubr.go
Normal file
333
src/cmd/compile/internal/gc/gsubr.go
Normal file
@@ -0,0 +1,333 @@
|
||||
// Derived from Inferno utils/6c/txt.c
|
||||
// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6c/txt.c
|
||||
//
|
||||
// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
|
||||
// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
|
||||
// Portions Copyright © 1997-1999 Vita Nuova Limited
|
||||
// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
|
||||
// Portions Copyright © 2004,2006 Bruce Ellis
|
||||
// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
|
||||
// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
|
||||
// Portions Copyright © 2009 The Go Authors. All rights reserved.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
|
||||
package gc
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/ssa"
|
||||
"cmd/internal/obj"
|
||||
"cmd/internal/objabi"
|
||||
"cmd/internal/src"
|
||||
)
|
||||
|
||||
var sharedProgArray = new([10000]obj.Prog) // *T instead of T to work around issue 19839
|
||||
|
||||
// Progs accumulates Progs for a function and converts them into machine code.
|
||||
type Progs struct {
|
||||
Text *obj.Prog // ATEXT Prog for this function
|
||||
next *obj.Prog // next Prog
|
||||
pc int64 // virtual PC; count of Progs
|
||||
pos src.XPos // position to use for new Progs
|
||||
curfn *Node // fn these Progs are for
|
||||
progcache []obj.Prog // local progcache
|
||||
cacheidx int // first free element of progcache
|
||||
|
||||
nextLive LivenessIndex // liveness index for the next Prog
|
||||
prevLive LivenessIndex // last emitted liveness index
|
||||
}
|
||||
|
||||
// newProgs returns a new Progs for fn.
|
||||
// worker indicates which of the backend workers will use the Progs.
|
||||
func newProgs(fn *Node, worker int) *Progs {
|
||||
pp := new(Progs)
|
||||
if Ctxt.CanReuseProgs() {
|
||||
sz := len(sharedProgArray) / nBackendWorkers
|
||||
pp.progcache = sharedProgArray[sz*worker : sz*(worker+1)]
|
||||
}
|
||||
pp.curfn = fn
|
||||
|
||||
// prime the pump
|
||||
pp.next = pp.NewProg()
|
||||
pp.clearp(pp.next)
|
||||
|
||||
pp.pos = fn.Pos
|
||||
pp.settext(fn)
|
||||
// PCDATA tables implicitly start with index -1.
|
||||
pp.prevLive = LivenessIndex{-1, false}
|
||||
pp.nextLive = pp.prevLive
|
||||
return pp
|
||||
}
|
||||
|
||||
func (pp *Progs) NewProg() *obj.Prog {
|
||||
var p *obj.Prog
|
||||
if pp.cacheidx < len(pp.progcache) {
|
||||
p = &pp.progcache[pp.cacheidx]
|
||||
pp.cacheidx++
|
||||
} else {
|
||||
p = new(obj.Prog)
|
||||
}
|
||||
p.Ctxt = Ctxt
|
||||
return p
|
||||
}
|
||||
|
||||
// Flush converts from pp to machine code.
|
||||
func (pp *Progs) Flush() {
|
||||
plist := &obj.Plist{Firstpc: pp.Text, Curfn: pp.curfn}
|
||||
obj.Flushplist(Ctxt, plist, pp.NewProg, myimportpath)
|
||||
}
|
||||
|
||||
// Free clears pp and any associated resources.
|
||||
func (pp *Progs) Free() {
|
||||
if Ctxt.CanReuseProgs() {
|
||||
// Clear progs to enable GC and avoid abuse.
|
||||
s := pp.progcache[:pp.cacheidx]
|
||||
for i := range s {
|
||||
s[i] = obj.Prog{}
|
||||
}
|
||||
}
|
||||
// Clear pp to avoid abuse.
|
||||
*pp = Progs{}
|
||||
}
|
||||
|
||||
// Prog adds a Prog with instruction As to pp.
|
||||
func (pp *Progs) Prog(as obj.As) *obj.Prog {
|
||||
if pp.nextLive.StackMapValid() && pp.nextLive.stackMapIndex != pp.prevLive.stackMapIndex {
|
||||
// Emit stack map index change.
|
||||
idx := pp.nextLive.stackMapIndex
|
||||
pp.prevLive.stackMapIndex = idx
|
||||
p := pp.Prog(obj.APCDATA)
|
||||
Addrconst(&p.From, objabi.PCDATA_StackMapIndex)
|
||||
Addrconst(&p.To, int64(idx))
|
||||
}
|
||||
if pp.nextLive.isUnsafePoint != pp.prevLive.isUnsafePoint {
|
||||
// Emit unsafe-point marker.
|
||||
pp.prevLive.isUnsafePoint = pp.nextLive.isUnsafePoint
|
||||
p := pp.Prog(obj.APCDATA)
|
||||
Addrconst(&p.From, objabi.PCDATA_UnsafePoint)
|
||||
if pp.nextLive.isUnsafePoint {
|
||||
Addrconst(&p.To, objabi.PCDATA_UnsafePointUnsafe)
|
||||
} else {
|
||||
Addrconst(&p.To, objabi.PCDATA_UnsafePointSafe)
|
||||
}
|
||||
}
|
||||
|
||||
p := pp.next
|
||||
pp.next = pp.NewProg()
|
||||
pp.clearp(pp.next)
|
||||
p.Link = pp.next
|
||||
|
||||
if !pp.pos.IsKnown() && Debug.K != 0 {
|
||||
Warn("prog: unknown position (line 0)")
|
||||
}
|
||||
|
||||
p.As = as
|
||||
p.Pos = pp.pos
|
||||
if pp.pos.IsStmt() == src.PosIsStmt {
|
||||
// Clear IsStmt for later Progs at this pos provided that as can be marked as a stmt
|
||||
if ssa.LosesStmtMark(as) {
|
||||
return p
|
||||
}
|
||||
pp.pos = pp.pos.WithNotStmt()
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (pp *Progs) clearp(p *obj.Prog) {
|
||||
obj.Nopout(p)
|
||||
p.As = obj.AEND
|
||||
p.Pc = pp.pc
|
||||
pp.pc++
|
||||
}
|
||||
|
||||
func (pp *Progs) Appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int16, foffset int64, ttype obj.AddrType, treg int16, toffset int64) *obj.Prog {
|
||||
q := pp.NewProg()
|
||||
pp.clearp(q)
|
||||
q.As = as
|
||||
q.Pos = p.Pos
|
||||
q.From.Type = ftype
|
||||
q.From.Reg = freg
|
||||
q.From.Offset = foffset
|
||||
q.To.Type = ttype
|
||||
q.To.Reg = treg
|
||||
q.To.Offset = toffset
|
||||
q.Link = p.Link
|
||||
p.Link = q
|
||||
return q
|
||||
}
|
||||
|
||||
func (pp *Progs) settext(fn *Node) {
|
||||
if pp.Text != nil {
|
||||
Fatalf("Progs.settext called twice")
|
||||
}
|
||||
ptxt := pp.Prog(obj.ATEXT)
|
||||
pp.Text = ptxt
|
||||
|
||||
fn.Func.lsym.Func().Text = ptxt
|
||||
ptxt.From.Type = obj.TYPE_MEM
|
||||
ptxt.From.Name = obj.NAME_EXTERN
|
||||
ptxt.From.Sym = fn.Func.lsym
|
||||
}
|
||||
|
||||
// initLSym defines f's obj.LSym and initializes it based on the
|
||||
// properties of f. This includes setting the symbol flags and ABI and
|
||||
// creating and initializing related DWARF symbols.
|
||||
//
|
||||
// initLSym must be called exactly once per function and must be
|
||||
// called for both functions with bodies and functions without bodies.
|
||||
func (f *Func) initLSym(hasBody bool) {
|
||||
if f.lsym != nil {
|
||||
Fatalf("Func.initLSym called twice")
|
||||
}
|
||||
|
||||
if nam := f.Nname; !nam.isBlank() {
|
||||
f.lsym = nam.Sym.Linksym()
|
||||
if f.Pragma&Systemstack != 0 {
|
||||
f.lsym.Set(obj.AttrCFunc, true)
|
||||
}
|
||||
|
||||
var aliasABI obj.ABI
|
||||
needABIAlias := false
|
||||
defABI, hasDefABI := symabiDefs[f.lsym.Name]
|
||||
if hasDefABI && defABI == obj.ABI0 {
|
||||
// Symbol is defined as ABI0. Create an
|
||||
// Internal -> ABI0 wrapper.
|
||||
f.lsym.SetABI(obj.ABI0)
|
||||
needABIAlias, aliasABI = true, obj.ABIInternal
|
||||
} else {
|
||||
// No ABI override. Check that the symbol is
|
||||
// using the expected ABI.
|
||||
want := obj.ABIInternal
|
||||
if f.lsym.ABI() != want {
|
||||
Fatalf("function symbol %s has the wrong ABI %v, expected %v", f.lsym.Name, f.lsym.ABI(), want)
|
||||
}
|
||||
}
|
||||
|
||||
isLinknameExported := nam.Sym.Linkname != "" && (hasBody || hasDefABI)
|
||||
if abi, ok := symabiRefs[f.lsym.Name]; (ok && abi == obj.ABI0) || isLinknameExported {
|
||||
// Either 1) this symbol is definitely
|
||||
// referenced as ABI0 from this package; or 2)
|
||||
// this symbol is defined in this package but
|
||||
// given a linkname, indicating that it may be
|
||||
// referenced from another package. Create an
|
||||
// ABI0 -> Internal wrapper so it can be
|
||||
// called as ABI0. In case 2, it's important
|
||||
// that we know it's defined in this package
|
||||
// since other packages may "pull" symbols
|
||||
// using linkname and we don't want to create
|
||||
// duplicate ABI wrappers.
|
||||
if f.lsym.ABI() != obj.ABI0 {
|
||||
needABIAlias, aliasABI = true, obj.ABI0
|
||||
}
|
||||
}
|
||||
|
||||
if needABIAlias {
|
||||
// These LSyms have the same name as the
|
||||
// native function, so we create them directly
|
||||
// rather than looking them up. The uniqueness
|
||||
// of f.lsym ensures uniqueness of asym.
|
||||
asym := &obj.LSym{
|
||||
Name: f.lsym.Name,
|
||||
Type: objabi.SABIALIAS,
|
||||
R: []obj.Reloc{{Sym: f.lsym}}, // 0 size, so "informational"
|
||||
}
|
||||
asym.SetABI(aliasABI)
|
||||
asym.Set(obj.AttrDuplicateOK, true)
|
||||
Ctxt.ABIAliases = append(Ctxt.ABIAliases, asym)
|
||||
}
|
||||
}
|
||||
|
||||
if !hasBody {
|
||||
// For body-less functions, we only create the LSym.
|
||||
return
|
||||
}
|
||||
|
||||
var flag int
|
||||
if f.Dupok() {
|
||||
flag |= obj.DUPOK
|
||||
}
|
||||
if f.Wrapper() {
|
||||
flag |= obj.WRAPPER
|
||||
}
|
||||
if f.Needctxt() {
|
||||
flag |= obj.NEEDCTXT
|
||||
}
|
||||
if f.Pragma&Nosplit != 0 {
|
||||
flag |= obj.NOSPLIT
|
||||
}
|
||||
if f.ReflectMethod() {
|
||||
flag |= obj.REFLECTMETHOD
|
||||
}
|
||||
|
||||
// Clumsy but important.
|
||||
// See test/recover.go for test cases and src/reflect/value.go
|
||||
// for the actual functions being considered.
|
||||
if myimportpath == "reflect" {
|
||||
switch f.Nname.Sym.Name {
|
||||
case "callReflect", "callMethod":
|
||||
flag |= obj.WRAPPER
|
||||
}
|
||||
}
|
||||
|
||||
Ctxt.InitTextSym(f.lsym, flag)
|
||||
}
|
||||
|
||||
func ggloblnod(nam *Node) {
|
||||
s := nam.Sym.Linksym()
|
||||
s.Gotype = ngotype(nam).Linksym()
|
||||
flags := 0
|
||||
if nam.Name.Readonly() {
|
||||
flags = obj.RODATA
|
||||
}
|
||||
if nam.Type != nil && !nam.Type.HasPointers() {
|
||||
flags |= obj.NOPTR
|
||||
}
|
||||
Ctxt.Globl(s, nam.Type.Width, flags)
|
||||
if nam.Name.LibfuzzerExtraCounter() {
|
||||
s.Type = objabi.SLIBFUZZER_EXTRA_COUNTER
|
||||
}
|
||||
if nam.Sym.Linkname != "" {
|
||||
// Make sure linkname'd symbol is non-package. When a symbol is
|
||||
// both imported and linkname'd, s.Pkg may not set to "_" in
|
||||
// types.Sym.Linksym because LSym already exists. Set it here.
|
||||
s.Pkg = "_"
|
||||
}
|
||||
}
|
||||
|
||||
func ggloblsym(s *obj.LSym, width int32, flags int16) {
|
||||
if flags&obj.LOCAL != 0 {
|
||||
s.Set(obj.AttrLocal, true)
|
||||
flags &^= obj.LOCAL
|
||||
}
|
||||
Ctxt.Globl(s, int64(width), int(flags))
|
||||
}
|
||||
|
||||
func Addrconst(a *obj.Addr, v int64) {
|
||||
a.Sym = nil
|
||||
a.Type = obj.TYPE_CONST
|
||||
a.Offset = v
|
||||
}
|
||||
|
||||
func Patch(p *obj.Prog, to *obj.Prog) {
|
||||
if p.To.Type != obj.TYPE_BRANCH {
|
||||
Fatalf("patch: not a branch")
|
||||
}
|
||||
p.To.SetTarget(to)
|
||||
p.To.Offset = to.Pc
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -2,13 +2,15 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package test
|
||||
|
||||
import "testing"
|
||||
package gc
|
||||
|
||||
// Test to make sure we make copies of the values we
|
||||
// put in interfaces.
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
var x int
|
||||
|
||||
func TestEfaceConv1(t *testing.T) {
|
||||
1117
src/cmd/compile/internal/gc/iimport.go
Normal file
1117
src/cmd/compile/internal/gc/iimport.go
Normal file
File diff suppressed because it is too large
Load Diff
109
src/cmd/compile/internal/gc/init.go
Normal file
109
src/cmd/compile/internal/gc/init.go
Normal file
@@ -0,0 +1,109 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gc
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/types"
|
||||
"cmd/internal/obj"
|
||||
)
|
||||
|
||||
// A function named init is a special case.
|
||||
// It is called by the initialization before main is run.
|
||||
// To make it unique within a package and also uncallable,
|
||||
// the name, normally "pkg.init", is altered to "pkg.init.0".
|
||||
var renameinitgen int
|
||||
|
||||
// Dummy function for autotmps generated during typechecking.
|
||||
var dummyInitFn = nod(ODCLFUNC, nil, nil)
|
||||
|
||||
func renameinit() *types.Sym {
|
||||
s := lookupN("init.", renameinitgen)
|
||||
renameinitgen++
|
||||
return s
|
||||
}
|
||||
|
||||
// fninit makes an initialization record for the package.
|
||||
// See runtime/proc.go:initTask for its layout.
|
||||
// The 3 tasks for initialization are:
|
||||
// 1) Initialize all of the packages the current package depends on.
|
||||
// 2) Initialize all the variables that have initializers.
|
||||
// 3) Run any init functions.
|
||||
func fninit(n []*Node) {
|
||||
nf := initOrder(n)
|
||||
|
||||
var deps []*obj.LSym // initTask records for packages the current package depends on
|
||||
var fns []*obj.LSym // functions to call for package initialization
|
||||
|
||||
// Find imported packages with init tasks.
|
||||
for _, s := range types.InitSyms {
|
||||
deps = append(deps, s.Linksym())
|
||||
}
|
||||
|
||||
// Make a function that contains all the initialization statements.
|
||||
if len(nf) > 0 {
|
||||
lineno = nf[0].Pos // prolog/epilog gets line number of first init stmt
|
||||
initializers := lookup("init")
|
||||
fn := dclfunc(initializers, nod(OTFUNC, nil, nil))
|
||||
for _, dcl := range dummyInitFn.Func.Dcl {
|
||||
dcl.Name.Curfn = fn
|
||||
}
|
||||
fn.Func.Dcl = append(fn.Func.Dcl, dummyInitFn.Func.Dcl...)
|
||||
dummyInitFn.Func.Dcl = nil
|
||||
|
||||
fn.Nbody.Set(nf)
|
||||
funcbody()
|
||||
|
||||
fn = typecheck(fn, ctxStmt)
|
||||
Curfn = fn
|
||||
typecheckslice(nf, ctxStmt)
|
||||
Curfn = nil
|
||||
xtop = append(xtop, fn)
|
||||
fns = append(fns, initializers.Linksym())
|
||||
}
|
||||
if dummyInitFn.Func.Dcl != nil {
|
||||
// We only generate temps using dummyInitFn if there
|
||||
// are package-scope initialization statements, so
|
||||
// something's weird if we get here.
|
||||
Fatalf("dummyInitFn still has declarations")
|
||||
}
|
||||
dummyInitFn = nil
|
||||
|
||||
// Record user init functions.
|
||||
for i := 0; i < renameinitgen; i++ {
|
||||
s := lookupN("init.", i)
|
||||
fn := asNode(s.Def).Name.Defn
|
||||
// Skip init functions with empty bodies.
|
||||
if fn.Nbody.Len() == 1 && fn.Nbody.First().Op == OEMPTY {
|
||||
continue
|
||||
}
|
||||
fns = append(fns, s.Linksym())
|
||||
}
|
||||
|
||||
if len(deps) == 0 && len(fns) == 0 && localpkg.Name != "main" && localpkg.Name != "runtime" {
|
||||
return // nothing to initialize
|
||||
}
|
||||
|
||||
// Make an .inittask structure.
|
||||
sym := lookup(".inittask")
|
||||
nn := newname(sym)
|
||||
nn.Type = types.Types[TUINT8] // dummy type
|
||||
nn.SetClass(PEXTERN)
|
||||
sym.Def = asTypesNode(nn)
|
||||
exportsym(nn)
|
||||
lsym := sym.Linksym()
|
||||
ot := 0
|
||||
ot = duintptr(lsym, ot, 0) // state: not initialized yet
|
||||
ot = duintptr(lsym, ot, uint64(len(deps)))
|
||||
ot = duintptr(lsym, ot, uint64(len(fns)))
|
||||
for _, d := range deps {
|
||||
ot = dsymptr(lsym, ot, d, 0)
|
||||
}
|
||||
for _, f := range fns {
|
||||
ot = dsymptr(lsym, ot, f, 0)
|
||||
}
|
||||
// An initTask has pointers, but none into the Go heap.
|
||||
// It's not quite read only, the state field must be modifiable.
|
||||
ggloblsym(lsym, int32(ot), obj.NOPTR)
|
||||
}
|
||||
@@ -2,16 +2,12 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pkginit
|
||||
package gc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"container/heap"
|
||||
"fmt"
|
||||
|
||||
"cmd/compile/internal/base"
|
||||
"cmd/compile/internal/ir"
|
||||
"cmd/compile/internal/staticinit"
|
||||
)
|
||||
|
||||
// Package initialization
|
||||
@@ -64,57 +60,56 @@ const (
|
||||
type InitOrder struct {
|
||||
// blocking maps initialization assignments to the assignments
|
||||
// that depend on it.
|
||||
blocking map[ir.Node][]ir.Node
|
||||
blocking map[*Node][]*Node
|
||||
|
||||
// ready is the queue of Pending initialization assignments
|
||||
// that are ready for initialization.
|
||||
ready declOrder
|
||||
|
||||
order map[ir.Node]int
|
||||
}
|
||||
|
||||
// initOrder computes initialization order for a list l of
|
||||
// package-level declarations (in declaration order) and outputs the
|
||||
// corresponding list of statements to include in the init() function
|
||||
// body.
|
||||
func initOrder(l []ir.Node) []ir.Node {
|
||||
s := staticinit.Schedule{
|
||||
Plans: make(map[ir.Node]*staticinit.Plan),
|
||||
Temps: make(map[ir.Node]*ir.Name),
|
||||
func initOrder(l []*Node) []*Node {
|
||||
s := InitSchedule{
|
||||
initplans: make(map[*Node]*InitPlan),
|
||||
inittemps: make(map[*Node]*Node),
|
||||
}
|
||||
o := InitOrder{
|
||||
blocking: make(map[ir.Node][]ir.Node),
|
||||
order: make(map[ir.Node]int),
|
||||
blocking: make(map[*Node][]*Node),
|
||||
}
|
||||
|
||||
// Process all package-level assignment in declaration order.
|
||||
for _, n := range l {
|
||||
switch n.Op() {
|
||||
case ir.OAS, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
|
||||
switch n.Op {
|
||||
case OAS, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
|
||||
o.processAssign(n)
|
||||
o.flushReady(s.StaticInit)
|
||||
case ir.ODCLCONST, ir.ODCLFUNC, ir.ODCLTYPE:
|
||||
o.flushReady(s.staticInit)
|
||||
case ODCLCONST, ODCLFUNC, ODCLTYPE:
|
||||
// nop
|
||||
default:
|
||||
base.Fatalf("unexpected package-level statement: %v", n)
|
||||
Fatalf("unexpected package-level statement: %v", n)
|
||||
}
|
||||
}
|
||||
|
||||
// Check that all assignments are now Done; if not, there must
|
||||
// have been a dependency cycle.
|
||||
for _, n := range l {
|
||||
switch n.Op() {
|
||||
case ir.OAS, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
|
||||
if o.order[n] != orderDone {
|
||||
switch n.Op {
|
||||
case OAS, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
|
||||
if n.Initorder() != InitDone {
|
||||
// If there have already been errors
|
||||
// printed, those errors may have
|
||||
// confused us and there might not be
|
||||
// a loop. Let the user fix those
|
||||
// first.
|
||||
base.ExitIfErrors()
|
||||
if nerrors > 0 {
|
||||
errorexit()
|
||||
}
|
||||
|
||||
o.findInitLoopAndExit(firstLHS(n), new([]*ir.Name), new(ir.NameSet))
|
||||
base.Fatalf("initialization unfinished, but failed to identify loop")
|
||||
findInitLoopAndExit(firstLHS(n), new([]*Node), make(map[*Node]bool))
|
||||
Fatalf("initialization unfinished, but failed to identify loop")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -122,56 +117,58 @@ func initOrder(l []ir.Node) []ir.Node {
|
||||
// Invariant consistency check. If this is non-zero, then we
|
||||
// should have found a cycle above.
|
||||
if len(o.blocking) != 0 {
|
||||
base.Fatalf("expected empty map: %v", o.blocking)
|
||||
Fatalf("expected empty map: %v", o.blocking)
|
||||
}
|
||||
|
||||
return s.Out
|
||||
return s.out
|
||||
}
|
||||
|
||||
func (o *InitOrder) processAssign(n ir.Node) {
|
||||
if _, ok := o.order[n]; ok {
|
||||
base.Fatalf("unexpected state: %v, %v", n, o.order[n])
|
||||
func (o *InitOrder) processAssign(n *Node) {
|
||||
if n.Initorder() != InitNotStarted || n.Xoffset != BADWIDTH {
|
||||
Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Xoffset)
|
||||
}
|
||||
o.order[n] = 0
|
||||
|
||||
n.SetInitorder(InitPending)
|
||||
n.Xoffset = 0
|
||||
|
||||
// Compute number of variable dependencies and build the
|
||||
// inverse dependency ("blocking") graph.
|
||||
for dep := range collectDeps(n, true) {
|
||||
defn := dep.Defn
|
||||
defn := dep.Name.Defn
|
||||
// Skip dependencies on functions (PFUNC) and
|
||||
// variables already initialized (InitDone).
|
||||
if dep.Class != ir.PEXTERN || o.order[defn] == orderDone {
|
||||
if dep.Class() != PEXTERN || defn.Initorder() == InitDone {
|
||||
continue
|
||||
}
|
||||
o.order[n]++
|
||||
n.Xoffset++
|
||||
o.blocking[defn] = append(o.blocking[defn], n)
|
||||
}
|
||||
|
||||
if o.order[n] == 0 {
|
||||
if n.Xoffset == 0 {
|
||||
heap.Push(&o.ready, n)
|
||||
}
|
||||
}
|
||||
|
||||
const orderDone = -1000
|
||||
|
||||
// flushReady repeatedly applies initialize to the earliest (in
|
||||
// declaration order) assignment ready for initialization and updates
|
||||
// the inverse dependency ("blocking") graph.
|
||||
func (o *InitOrder) flushReady(initialize func(ir.Node)) {
|
||||
func (o *InitOrder) flushReady(initialize func(*Node)) {
|
||||
for o.ready.Len() != 0 {
|
||||
n := heap.Pop(&o.ready).(ir.Node)
|
||||
if order, ok := o.order[n]; !ok || order != 0 {
|
||||
base.Fatalf("unexpected state: %v, %v, %v", n, ok, order)
|
||||
n := heap.Pop(&o.ready).(*Node)
|
||||
if n.Initorder() != InitPending || n.Xoffset != 0 {
|
||||
Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Xoffset)
|
||||
}
|
||||
|
||||
initialize(n)
|
||||
o.order[n] = orderDone
|
||||
n.SetInitorder(InitDone)
|
||||
n.Xoffset = BADWIDTH
|
||||
|
||||
blocked := o.blocking[n]
|
||||
delete(o.blocking, n)
|
||||
|
||||
for _, m := range blocked {
|
||||
if o.order[m]--; o.order[m] == 0 {
|
||||
m.Xoffset--
|
||||
if m.Xoffset == 0 {
|
||||
heap.Push(&o.ready, m)
|
||||
}
|
||||
}
|
||||
@@ -184,7 +181,7 @@ func (o *InitOrder) flushReady(initialize func(ir.Node)) {
|
||||
// path points to a slice used for tracking the sequence of
|
||||
// variables/functions visited. Using a pointer to a slice allows the
|
||||
// slice capacity to grow and limit reallocations.
|
||||
func (o *InitOrder) findInitLoopAndExit(n *ir.Name, path *[]*ir.Name, ok *ir.NameSet) {
|
||||
func findInitLoopAndExit(n *Node, path *[]*Node, ok map[*Node]bool) {
|
||||
for i, x := range *path {
|
||||
if x == n {
|
||||
reportInitLoopAndExit((*path)[i:])
|
||||
@@ -194,25 +191,24 @@ func (o *InitOrder) findInitLoopAndExit(n *ir.Name, path *[]*ir.Name, ok *ir.Nam
|
||||
|
||||
// There might be multiple loops involving n; by sorting
|
||||
// references, we deterministically pick the one reported.
|
||||
refers := collectDeps(n.Defn, false).Sorted(func(ni, nj *ir.Name) bool {
|
||||
return ni.Pos().Before(nj.Pos())
|
||||
refers := collectDeps(n.Name.Defn, false).Sorted(func(ni, nj *Node) bool {
|
||||
return ni.Pos.Before(nj.Pos)
|
||||
})
|
||||
|
||||
*path = append(*path, n)
|
||||
for _, ref := range refers {
|
||||
// Short-circuit variables that were initialized.
|
||||
if ref.Class == ir.PEXTERN && o.order[ref.Defn] == orderDone || ok.Has(ref) {
|
||||
if ref.Class() == PEXTERN && ref.Name.Defn.Initorder() == InitDone || ok[ref] {
|
||||
continue
|
||||
}
|
||||
|
||||
o.findInitLoopAndExit(ref, path, ok)
|
||||
findInitLoopAndExit(ref, path, ok)
|
||||
}
|
||||
|
||||
// n is not involved in a cycle.
|
||||
// Record that fact to avoid checking it again when reached another way,
|
||||
// or else this traversal will take exponential time traversing all paths
|
||||
// through the part of the package's call graph implicated in the cycle.
|
||||
ok.Add(n)
|
||||
ok[n] = true
|
||||
|
||||
*path = (*path)[:len(*path)-1]
|
||||
}
|
||||
@@ -220,12 +216,12 @@ func (o *InitOrder) findInitLoopAndExit(n *ir.Name, path *[]*ir.Name, ok *ir.Nam
|
||||
// reportInitLoopAndExit reports and initialization loop as an error
|
||||
// and exits. However, if l is not actually an initialization loop, it
|
||||
// simply returns instead.
|
||||
func reportInitLoopAndExit(l []*ir.Name) {
|
||||
func reportInitLoopAndExit(l []*Node) {
|
||||
// Rotate loop so that the earliest variable declaration is at
|
||||
// the start.
|
||||
i := -1
|
||||
for j, n := range l {
|
||||
if n.Class == ir.PEXTERN && (i == -1 || n.Pos().Before(l[i].Pos())) {
|
||||
if n.Class() == PEXTERN && (i == -1 || n.Pos.Before(l[i].Pos)) {
|
||||
i = j
|
||||
}
|
||||
}
|
||||
@@ -243,75 +239,69 @@ func reportInitLoopAndExit(l []*ir.Name) {
|
||||
var msg bytes.Buffer
|
||||
fmt.Fprintf(&msg, "initialization loop:\n")
|
||||
for _, n := range l {
|
||||
fmt.Fprintf(&msg, "\t%v: %v refers to\n", ir.Line(n), n)
|
||||
fmt.Fprintf(&msg, "\t%v: %v refers to\n", n.Line(), n)
|
||||
}
|
||||
fmt.Fprintf(&msg, "\t%v: %v", ir.Line(l[0]), l[0])
|
||||
fmt.Fprintf(&msg, "\t%v: %v", l[0].Line(), l[0])
|
||||
|
||||
base.ErrorfAt(l[0].Pos(), msg.String())
|
||||
base.ErrorExit()
|
||||
yyerrorl(l[0].Pos, msg.String())
|
||||
errorexit()
|
||||
}
|
||||
|
||||
// collectDeps returns all of the package-level functions and
|
||||
// variables that declaration n depends on. If transitive is true,
|
||||
// then it also includes the transitive dependencies of any depended
|
||||
// upon functions (but not variables).
|
||||
func collectDeps(n ir.Node, transitive bool) ir.NameSet {
|
||||
func collectDeps(n *Node, transitive bool) NodeSet {
|
||||
d := initDeps{transitive: transitive}
|
||||
switch n.Op() {
|
||||
case ir.OAS:
|
||||
n := n.(*ir.AssignStmt)
|
||||
d.inspect(n.Y)
|
||||
case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
|
||||
n := n.(*ir.AssignListStmt)
|
||||
d.inspect(n.Rhs[0])
|
||||
case ir.ODCLFUNC:
|
||||
n := n.(*ir.Func)
|
||||
d.inspectList(n.Body)
|
||||
switch n.Op {
|
||||
case OAS:
|
||||
d.inspect(n.Right)
|
||||
case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
|
||||
d.inspect(n.Right)
|
||||
case ODCLFUNC:
|
||||
d.inspectList(n.Nbody)
|
||||
default:
|
||||
base.Fatalf("unexpected Op: %v", n.Op())
|
||||
Fatalf("unexpected Op: %v", n.Op)
|
||||
}
|
||||
return d.seen
|
||||
}
|
||||
|
||||
type initDeps struct {
|
||||
transitive bool
|
||||
seen ir.NameSet
|
||||
cvisit func(ir.Node)
|
||||
seen NodeSet
|
||||
}
|
||||
|
||||
func (d *initDeps) cachedVisit() func(ir.Node) {
|
||||
if d.cvisit == nil {
|
||||
d.cvisit = d.visit // cache closure
|
||||
}
|
||||
return d.cvisit
|
||||
}
|
||||
|
||||
func (d *initDeps) inspect(n ir.Node) { ir.Visit(n, d.cachedVisit()) }
|
||||
func (d *initDeps) inspectList(l ir.Nodes) { ir.VisitList(l, d.cachedVisit()) }
|
||||
func (d *initDeps) inspect(n *Node) { inspect(n, d.visit) }
|
||||
func (d *initDeps) inspectList(l Nodes) { inspectList(l, d.visit) }
|
||||
|
||||
// visit calls foundDep on any package-level functions or variables
|
||||
// referenced by n, if any.
|
||||
func (d *initDeps) visit(n ir.Node) {
|
||||
switch n.Op() {
|
||||
case ir.ONAME:
|
||||
n := n.(*ir.Name)
|
||||
switch n.Class {
|
||||
case ir.PEXTERN, ir.PFUNC:
|
||||
func (d *initDeps) visit(n *Node) bool {
|
||||
switch n.Op {
|
||||
case ONAME:
|
||||
if n.isMethodExpression() {
|
||||
d.foundDep(asNode(n.Type.FuncType().Nname))
|
||||
return false
|
||||
}
|
||||
|
||||
switch n.Class() {
|
||||
case PEXTERN, PFUNC:
|
||||
d.foundDep(n)
|
||||
}
|
||||
|
||||
case ir.OCLOSURE:
|
||||
n := n.(*ir.ClosureExpr)
|
||||
d.inspectList(n.Func.Body)
|
||||
case OCLOSURE:
|
||||
d.inspectList(n.Func.Closure.Nbody)
|
||||
|
||||
case ir.ODOTMETH, ir.OCALLPART, ir.OMETHEXPR:
|
||||
d.foundDep(ir.MethodExprName(n))
|
||||
case ODOTMETH, OCALLPART:
|
||||
d.foundDep(asNode(n.Type.FuncType().Nname))
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// foundDep records that we've found a dependency on n by adding it to
|
||||
// seen.
|
||||
func (d *initDeps) foundDep(n *ir.Name) {
|
||||
func (d *initDeps) foundDep(n *Node) {
|
||||
// Can happen with method expressions involving interface
|
||||
// types; e.g., fixedbugs/issue4495.go.
|
||||
if n == nil {
|
||||
@@ -320,7 +310,7 @@ func (d *initDeps) foundDep(n *ir.Name) {
|
||||
|
||||
// Names without definitions aren't interesting as far as
|
||||
// initialization ordering goes.
|
||||
if n.Defn == nil {
|
||||
if n.Name.Defn == nil {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -328,8 +318,8 @@ func (d *initDeps) foundDep(n *ir.Name) {
|
||||
return
|
||||
}
|
||||
d.seen.Add(n)
|
||||
if d.transitive && n.Class == ir.PFUNC {
|
||||
d.inspectList(n.Defn.(*ir.Func).Body)
|
||||
if d.transitive && n.Class() == PFUNC {
|
||||
d.inspectList(n.Name.Defn.Nbody)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -340,15 +330,13 @@ func (d *initDeps) foundDep(n *ir.Name) {
|
||||
// an OAS node's Pos may not be unique. For example, given the
|
||||
// declaration "var a, b = f(), g()", "a" must be ordered before "b",
|
||||
// but both OAS nodes use the "=" token's position as their Pos.
|
||||
type declOrder []ir.Node
|
||||
type declOrder []*Node
|
||||
|
||||
func (s declOrder) Len() int { return len(s) }
|
||||
func (s declOrder) Less(i, j int) bool {
|
||||
return firstLHS(s[i]).Pos().Before(firstLHS(s[j]).Pos())
|
||||
}
|
||||
func (s declOrder) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s declOrder) Len() int { return len(s) }
|
||||
func (s declOrder) Less(i, j int) bool { return firstLHS(s[i]).Pos.Before(firstLHS(s[j]).Pos) }
|
||||
func (s declOrder) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
func (s *declOrder) Push(x interface{}) { *s = append(*s, x.(ir.Node)) }
|
||||
func (s *declOrder) Push(x interface{}) { *s = append(*s, x.(*Node)) }
|
||||
func (s *declOrder) Pop() interface{} {
|
||||
n := (*s)[len(*s)-1]
|
||||
*s = (*s)[:len(*s)-1]
|
||||
@@ -357,16 +345,14 @@ func (s *declOrder) Pop() interface{} {
|
||||
|
||||
// firstLHS returns the first expression on the left-hand side of
|
||||
// assignment n.
|
||||
func firstLHS(n ir.Node) *ir.Name {
|
||||
switch n.Op() {
|
||||
case ir.OAS:
|
||||
n := n.(*ir.AssignStmt)
|
||||
return n.X.Name()
|
||||
case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2RECV, ir.OAS2MAPR:
|
||||
n := n.(*ir.AssignListStmt)
|
||||
return n.Lhs[0].Name()
|
||||
func firstLHS(n *Node) *Node {
|
||||
switch n.Op {
|
||||
case OAS:
|
||||
return n.Left
|
||||
case OAS2DOTTYPE, OAS2FUNC, OAS2RECV, OAS2MAPR:
|
||||
return n.List.First()
|
||||
}
|
||||
|
||||
base.Fatalf("unexpected Op: %v", n.Op())
|
||||
Fatalf("unexpected Op: %v", n.Op)
|
||||
return nil
|
||||
}
|
||||
1507
src/cmd/compile/internal/gc/inl.go
Normal file
1507
src/cmd/compile/internal/gc/inl.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package test
|
||||
package gc
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
@@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package test
|
||||
package gc
|
||||
|
||||
import (
|
||||
"internal/testenv"
|
||||
@@ -2,17 +2,24 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package noder
|
||||
package gc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"cmd/compile/internal/ir"
|
||||
"cmd/compile/internal/syntax"
|
||||
"cmd/internal/objabi"
|
||||
"cmd/internal/src"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// lineno is the source position at the start of the most recently lexed token.
|
||||
// TODO(gri) rename and eventually remove
|
||||
var lineno src.XPos
|
||||
|
||||
func makePos(base *src.PosBase, line, col uint) src.XPos {
|
||||
return Ctxt.PosTable.XPos(src.MakePos(base, line, col))
|
||||
}
|
||||
|
||||
func isSpace(c rune) bool {
|
||||
return c == ' ' || c == '\t' || c == '\n' || c == '\r'
|
||||
}
|
||||
@@ -21,52 +28,78 @@ func isQuoted(s string) bool {
|
||||
return len(s) >= 2 && s[0] == '"' && s[len(s)-1] == '"'
|
||||
}
|
||||
|
||||
const (
|
||||
funcPragmas = ir.Nointerface |
|
||||
ir.Noescape |
|
||||
ir.Norace |
|
||||
ir.Nosplit |
|
||||
ir.Noinline |
|
||||
ir.NoCheckPtr |
|
||||
ir.RegisterParams | // TODO remove after register abi is working
|
||||
ir.CgoUnsafeArgs |
|
||||
ir.UintptrEscapes |
|
||||
ir.Systemstack |
|
||||
ir.Nowritebarrier |
|
||||
ir.Nowritebarrierrec |
|
||||
ir.Yeswritebarrierrec
|
||||
type PragmaFlag int16
|
||||
|
||||
typePragmas = ir.NotInHeap
|
||||
const (
|
||||
// Func pragmas.
|
||||
Nointerface PragmaFlag = 1 << iota
|
||||
Noescape // func parameters don't escape
|
||||
Norace // func must not have race detector annotations
|
||||
Nosplit // func should not execute on separate stack
|
||||
Noinline // func should not be inlined
|
||||
NoCheckPtr // func should not be instrumented by checkptr
|
||||
CgoUnsafeArgs // treat a pointer to one arg as a pointer to them all
|
||||
UintptrEscapes // pointers converted to uintptr escape
|
||||
|
||||
// Runtime-only func pragmas.
|
||||
// See ../../../../runtime/README.md for detailed descriptions.
|
||||
Systemstack // func must run on system stack
|
||||
Nowritebarrier // emit compiler error instead of write barrier
|
||||
Nowritebarrierrec // error on write barrier in this or recursive callees
|
||||
Yeswritebarrierrec // cancels Nowritebarrierrec in this function and callees
|
||||
|
||||
// Runtime and cgo type pragmas
|
||||
NotInHeap // values of this type must not be heap allocated
|
||||
|
||||
// Go command pragmas
|
||||
GoBuildPragma
|
||||
)
|
||||
|
||||
func pragmaFlag(verb string) ir.PragmaFlag {
|
||||
const (
|
||||
FuncPragmas = Nointerface |
|
||||
Noescape |
|
||||
Norace |
|
||||
Nosplit |
|
||||
Noinline |
|
||||
NoCheckPtr |
|
||||
CgoUnsafeArgs |
|
||||
UintptrEscapes |
|
||||
Systemstack |
|
||||
Nowritebarrier |
|
||||
Nowritebarrierrec |
|
||||
Yeswritebarrierrec
|
||||
|
||||
TypePragmas = NotInHeap
|
||||
)
|
||||
|
||||
func pragmaFlag(verb string) PragmaFlag {
|
||||
switch verb {
|
||||
case "go:build":
|
||||
return ir.GoBuildPragma
|
||||
return GoBuildPragma
|
||||
case "go:nointerface":
|
||||
if objabi.Fieldtrack_enabled != 0 {
|
||||
return ir.Nointerface
|
||||
return Nointerface
|
||||
}
|
||||
case "go:noescape":
|
||||
return ir.Noescape
|
||||
return Noescape
|
||||
case "go:norace":
|
||||
return ir.Norace
|
||||
return Norace
|
||||
case "go:nosplit":
|
||||
return ir.Nosplit | ir.NoCheckPtr // implies NoCheckPtr (see #34972)
|
||||
return Nosplit | NoCheckPtr // implies NoCheckPtr (see #34972)
|
||||
case "go:noinline":
|
||||
return ir.Noinline
|
||||
return Noinline
|
||||
case "go:nocheckptr":
|
||||
return ir.NoCheckPtr
|
||||
return NoCheckPtr
|
||||
case "go:systemstack":
|
||||
return ir.Systemstack
|
||||
return Systemstack
|
||||
case "go:nowritebarrier":
|
||||
return ir.Nowritebarrier
|
||||
return Nowritebarrier
|
||||
case "go:nowritebarrierrec":
|
||||
return ir.Nowritebarrierrec | ir.Nowritebarrier // implies Nowritebarrier
|
||||
return Nowritebarrierrec | Nowritebarrier // implies Nowritebarrier
|
||||
case "go:yeswritebarrierrec":
|
||||
return ir.Yeswritebarrierrec
|
||||
return Yeswritebarrierrec
|
||||
case "go:cgo_unsafe_args":
|
||||
return ir.CgoUnsafeArgs | ir.NoCheckPtr // implies NoCheckPtr (see #34968)
|
||||
return CgoUnsafeArgs | NoCheckPtr // implies NoCheckPtr (see #34968)
|
||||
case "go:uintptrescapes":
|
||||
// For the next function declared in the file
|
||||
// any uintptr arguments may be pointer values
|
||||
@@ -79,11 +112,9 @@ func pragmaFlag(verb string) ir.PragmaFlag {
|
||||
// call. The conversion to uintptr must appear
|
||||
// in the argument list.
|
||||
// Used in syscall/dll_windows.go.
|
||||
return ir.UintptrEscapes
|
||||
case "go:registerparams": // TODO remove after register abi is working
|
||||
return ir.RegisterParams
|
||||
return UintptrEscapes
|
||||
case "go:notinheap":
|
||||
return ir.NotInHeap
|
||||
return NotInHeap
|
||||
}
|
||||
return 0
|
||||
}
|
||||
@@ -2,14 +2,13 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package noder
|
||||
package gc
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/syntax"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"cmd/compile/internal/syntax"
|
||||
)
|
||||
|
||||
func eq(a, b []string) bool {
|
||||
@@ -1,4 +1,4 @@
|
||||
package test
|
||||
package gc
|
||||
|
||||
import "testing"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -4,7 +4,7 @@
|
||||
|
||||
// +build darwin dragonfly freebsd linux netbsd openbsd
|
||||
|
||||
package typecheck
|
||||
package gc
|
||||
|
||||
import (
|
||||
"os"
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd
|
||||
|
||||
package typecheck
|
||||
package gc
|
||||
|
||||
import (
|
||||
"io"
|
||||
@@ -33,12 +33,9 @@ func main() {
|
||||
var b bytes.Buffer
|
||||
fmt.Fprintln(&b, "// Code generated by mkbuiltin.go. DO NOT EDIT.")
|
||||
fmt.Fprintln(&b)
|
||||
fmt.Fprintln(&b, "package typecheck")
|
||||
fmt.Fprintln(&b, "package gc")
|
||||
fmt.Fprintln(&b)
|
||||
fmt.Fprintln(&b, `import (`)
|
||||
fmt.Fprintln(&b, ` "cmd/compile/internal/types"`)
|
||||
fmt.Fprintln(&b, ` "cmd/internal/src"`)
|
||||
fmt.Fprintln(&b, `)`)
|
||||
fmt.Fprintln(&b, `import "cmd/compile/internal/types"`)
|
||||
|
||||
mkbuiltin(&b, "runtime")
|
||||
|
||||
@@ -143,16 +140,16 @@ func (i *typeInterner) mktype(t ast.Expr) string {
|
||||
case *ast.Ident:
|
||||
switch t.Name {
|
||||
case "byte":
|
||||
return "types.ByteType"
|
||||
return "types.Bytetype"
|
||||
case "rune":
|
||||
return "types.RuneType"
|
||||
return "types.Runetype"
|
||||
}
|
||||
return fmt.Sprintf("types.Types[types.T%s]", strings.ToUpper(t.Name))
|
||||
return fmt.Sprintf("types.Types[T%s]", strings.ToUpper(t.Name))
|
||||
case *ast.SelectorExpr:
|
||||
if t.X.(*ast.Ident).Name != "unsafe" || t.Sel.Name != "Pointer" {
|
||||
log.Fatalf("unhandled type: %#v", t)
|
||||
}
|
||||
return "types.Types[types.TUNSAFEPTR]"
|
||||
return "types.Types[TUNSAFEPTR]"
|
||||
|
||||
case *ast.ArrayType:
|
||||
if t.Len == nil {
|
||||
@@ -169,18 +166,18 @@ func (i *typeInterner) mktype(t ast.Expr) string {
|
||||
}
|
||||
return fmt.Sprintf("types.NewChan(%s, %s)", i.subtype(t.Value), dir)
|
||||
case *ast.FuncType:
|
||||
return fmt.Sprintf("types.NewSignature(types.NoPkg, nil, %s, %s)", i.fields(t.Params, false), i.fields(t.Results, false))
|
||||
return fmt.Sprintf("functype(nil, %s, %s)", i.fields(t.Params, false), i.fields(t.Results, false))
|
||||
case *ast.InterfaceType:
|
||||
if len(t.Methods.List) != 0 {
|
||||
log.Fatal("non-empty interfaces unsupported")
|
||||
}
|
||||
return "types.Types[types.TINTER]"
|
||||
return "types.Types[TINTER]"
|
||||
case *ast.MapType:
|
||||
return fmt.Sprintf("types.NewMap(%s, %s)", i.subtype(t.Key), i.subtype(t.Value))
|
||||
case *ast.StarExpr:
|
||||
return fmt.Sprintf("types.NewPtr(%s)", i.subtype(t.X))
|
||||
case *ast.StructType:
|
||||
return fmt.Sprintf("types.NewStruct(types.NoPkg, %s)", i.fields(t.Fields, true))
|
||||
return fmt.Sprintf("tostruct(%s)", i.fields(t.Fields, true))
|
||||
|
||||
default:
|
||||
log.Fatalf("unhandled type: %#v", t)
|
||||
@@ -196,18 +193,18 @@ func (i *typeInterner) fields(fl *ast.FieldList, keepNames bool) string {
|
||||
for _, f := range fl.List {
|
||||
typ := i.subtype(f.Type)
|
||||
if len(f.Names) == 0 {
|
||||
res = append(res, fmt.Sprintf("types.NewField(src.NoXPos, nil, %s)", typ))
|
||||
res = append(res, fmt.Sprintf("anonfield(%s)", typ))
|
||||
} else {
|
||||
for _, name := range f.Names {
|
||||
if keepNames {
|
||||
res = append(res, fmt.Sprintf("types.NewField(src.NoXPos, Lookup(%q), %s)", name.Name, typ))
|
||||
res = append(res, fmt.Sprintf("namedfield(%q, %s)", name.Name, typ))
|
||||
} else {
|
||||
res = append(res, fmt.Sprintf("types.NewField(src.NoXPos, nil, %s)", typ))
|
||||
res = append(res, fmt.Sprintf("anonfield(%s)", typ))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("[]*types.Field{%s}", strings.Join(res, ", "))
|
||||
return fmt.Sprintf("[]*Node{%s}", strings.Join(res, ", "))
|
||||
}
|
||||
|
||||
func intconst(e ast.Expr) int64 {
|
||||
357
src/cmd/compile/internal/gc/mpfloat.go
Normal file
357
src/cmd/compile/internal/gc/mpfloat.go
Normal file
@@ -0,0 +1,357 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// implements float arithmetic
|
||||
|
||||
const (
|
||||
// Maximum size in bits for Mpints before signalling
|
||||
// overflow and also mantissa precision for Mpflts.
|
||||
Mpprec = 512
|
||||
// Turn on for constant arithmetic debugging output.
|
||||
Mpdebug = false
|
||||
)
|
||||
|
||||
// Mpflt represents a floating-point constant.
|
||||
type Mpflt struct {
|
||||
Val big.Float
|
||||
}
|
||||
|
||||
// Mpcplx represents a complex constant.
|
||||
type Mpcplx struct {
|
||||
Real Mpflt
|
||||
Imag Mpflt
|
||||
}
|
||||
|
||||
// Use newMpflt (not new(Mpflt)!) to get the correct default precision.
|
||||
func newMpflt() *Mpflt {
|
||||
var a Mpflt
|
||||
a.Val.SetPrec(Mpprec)
|
||||
return &a
|
||||
}
|
||||
|
||||
// Use newMpcmplx (not new(Mpcplx)!) to get the correct default precision.
|
||||
func newMpcmplx() *Mpcplx {
|
||||
var a Mpcplx
|
||||
a.Real = *newMpflt()
|
||||
a.Imag = *newMpflt()
|
||||
return &a
|
||||
}
|
||||
|
||||
func (a *Mpflt) SetInt(b *Mpint) {
|
||||
if b.checkOverflow(0) {
|
||||
// sign doesn't really matter but copy anyway
|
||||
a.Val.SetInf(b.Val.Sign() < 0)
|
||||
return
|
||||
}
|
||||
a.Val.SetInt(&b.Val)
|
||||
}
|
||||
|
||||
func (a *Mpflt) Set(b *Mpflt) {
|
||||
a.Val.Set(&b.Val)
|
||||
}
|
||||
|
||||
func (a *Mpflt) Add(b *Mpflt) {
|
||||
if Mpdebug {
|
||||
fmt.Printf("\n%v + %v", a, b)
|
||||
}
|
||||
|
||||
a.Val.Add(&a.Val, &b.Val)
|
||||
|
||||
if Mpdebug {
|
||||
fmt.Printf(" = %v\n\n", a)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Mpflt) AddFloat64(c float64) {
|
||||
var b Mpflt
|
||||
|
||||
b.SetFloat64(c)
|
||||
a.Add(&b)
|
||||
}
|
||||
|
||||
func (a *Mpflt) Sub(b *Mpflt) {
|
||||
if Mpdebug {
|
||||
fmt.Printf("\n%v - %v", a, b)
|
||||
}
|
||||
|
||||
a.Val.Sub(&a.Val, &b.Val)
|
||||
|
||||
if Mpdebug {
|
||||
fmt.Printf(" = %v\n\n", a)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Mpflt) Mul(b *Mpflt) {
|
||||
if Mpdebug {
|
||||
fmt.Printf("%v\n * %v\n", a, b)
|
||||
}
|
||||
|
||||
a.Val.Mul(&a.Val, &b.Val)
|
||||
|
||||
if Mpdebug {
|
||||
fmt.Printf(" = %v\n\n", a)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Mpflt) MulFloat64(c float64) {
|
||||
var b Mpflt
|
||||
|
||||
b.SetFloat64(c)
|
||||
a.Mul(&b)
|
||||
}
|
||||
|
||||
func (a *Mpflt) Quo(b *Mpflt) {
|
||||
if Mpdebug {
|
||||
fmt.Printf("%v\n / %v\n", a, b)
|
||||
}
|
||||
|
||||
a.Val.Quo(&a.Val, &b.Val)
|
||||
|
||||
if Mpdebug {
|
||||
fmt.Printf(" = %v\n\n", a)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Mpflt) Cmp(b *Mpflt) int {
|
||||
return a.Val.Cmp(&b.Val)
|
||||
}
|
||||
|
||||
func (a *Mpflt) CmpFloat64(c float64) int {
|
||||
if c == 0 {
|
||||
return a.Val.Sign() // common case shortcut
|
||||
}
|
||||
return a.Val.Cmp(big.NewFloat(c))
|
||||
}
|
||||
|
||||
func (a *Mpflt) Float64() float64 {
|
||||
x, _ := a.Val.Float64()
|
||||
|
||||
// check for overflow
|
||||
if math.IsInf(x, 0) && nsavederrors+nerrors == 0 {
|
||||
Fatalf("ovf in Mpflt Float64")
|
||||
}
|
||||
|
||||
return x + 0 // avoid -0 (should not be needed, but be conservative)
|
||||
}
|
||||
|
||||
func (a *Mpflt) Float32() float64 {
|
||||
x32, _ := a.Val.Float32()
|
||||
x := float64(x32)
|
||||
|
||||
// check for overflow
|
||||
if math.IsInf(x, 0) && nsavederrors+nerrors == 0 {
|
||||
Fatalf("ovf in Mpflt Float32")
|
||||
}
|
||||
|
||||
return x + 0 // avoid -0 (should not be needed, but be conservative)
|
||||
}
|
||||
|
||||
func (a *Mpflt) SetFloat64(c float64) {
|
||||
if Mpdebug {
|
||||
fmt.Printf("\nconst %g", c)
|
||||
}
|
||||
|
||||
// convert -0 to 0
|
||||
if c == 0 {
|
||||
c = 0
|
||||
}
|
||||
a.Val.SetFloat64(c)
|
||||
|
||||
if Mpdebug {
|
||||
fmt.Printf(" = %v\n", a)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Mpflt) Neg() {
|
||||
// avoid -0
|
||||
if a.Val.Sign() != 0 {
|
||||
a.Val.Neg(&a.Val)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Mpflt) SetString(as string) {
|
||||
f, _, err := a.Val.Parse(as, 0)
|
||||
if err != nil {
|
||||
yyerror("malformed constant: %s (%v)", as, err)
|
||||
a.Val.SetFloat64(0)
|
||||
return
|
||||
}
|
||||
|
||||
if f.IsInf() {
|
||||
yyerror("constant too large: %s", as)
|
||||
a.Val.SetFloat64(0)
|
||||
return
|
||||
}
|
||||
|
||||
// -0 becomes 0
|
||||
if f.Sign() == 0 && f.Signbit() {
|
||||
a.Val.SetFloat64(0)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Mpflt) String() string {
|
||||
return f.Val.Text('b', 0)
|
||||
}
|
||||
|
||||
func (fvp *Mpflt) GoString() string {
|
||||
// determine sign
|
||||
sign := ""
|
||||
f := &fvp.Val
|
||||
if f.Sign() < 0 {
|
||||
sign = "-"
|
||||
f = new(big.Float).Abs(f)
|
||||
}
|
||||
|
||||
// Don't try to convert infinities (will not terminate).
|
||||
if f.IsInf() {
|
||||
return sign + "Inf"
|
||||
}
|
||||
|
||||
// Use exact fmt formatting if in float64 range (common case):
|
||||
// proceed if f doesn't underflow to 0 or overflow to inf.
|
||||
if x, _ := f.Float64(); f.Sign() == 0 == (x == 0) && !math.IsInf(x, 0) {
|
||||
return fmt.Sprintf("%s%.6g", sign, x)
|
||||
}
|
||||
|
||||
// Out of float64 range. Do approximate manual to decimal
|
||||
// conversion to avoid precise but possibly slow Float
|
||||
// formatting.
|
||||
// f = mant * 2**exp
|
||||
var mant big.Float
|
||||
exp := f.MantExp(&mant) // 0.5 <= mant < 1.0
|
||||
|
||||
// approximate float64 mantissa m and decimal exponent d
|
||||
// f ~ m * 10**d
|
||||
m, _ := mant.Float64() // 0.5 <= m < 1.0
|
||||
d := float64(exp) * (math.Ln2 / math.Ln10) // log_10(2)
|
||||
|
||||
// adjust m for truncated (integer) decimal exponent e
|
||||
e := int64(d)
|
||||
m *= math.Pow(10, d-float64(e))
|
||||
|
||||
// ensure 1 <= m < 10
|
||||
switch {
|
||||
case m < 1-0.5e-6:
|
||||
// The %.6g format below rounds m to 5 digits after the
|
||||
// decimal point. Make sure that m*10 < 10 even after
|
||||
// rounding up: m*10 + 0.5e-5 < 10 => m < 1 - 0.5e6.
|
||||
m *= 10
|
||||
e--
|
||||
case m >= 10:
|
||||
m /= 10
|
||||
e++
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s%.6ge%+d", sign, m, e)
|
||||
}
|
||||
|
||||
// complex multiply v *= rv
|
||||
// (a, b) * (c, d) = (a*c - b*d, b*c + a*d)
|
||||
func (v *Mpcplx) Mul(rv *Mpcplx) {
|
||||
var ac, ad, bc, bd Mpflt
|
||||
|
||||
ac.Set(&v.Real)
|
||||
ac.Mul(&rv.Real) // ac
|
||||
|
||||
bd.Set(&v.Imag)
|
||||
bd.Mul(&rv.Imag) // bd
|
||||
|
||||
bc.Set(&v.Imag)
|
||||
bc.Mul(&rv.Real) // bc
|
||||
|
||||
ad.Set(&v.Real)
|
||||
ad.Mul(&rv.Imag) // ad
|
||||
|
||||
v.Real.Set(&ac)
|
||||
v.Real.Sub(&bd) // ac-bd
|
||||
|
||||
v.Imag.Set(&bc)
|
||||
v.Imag.Add(&ad) // bc+ad
|
||||
}
|
||||
|
||||
// complex divide v /= rv
|
||||
// (a, b) / (c, d) = ((a*c + b*d), (b*c - a*d))/(c*c + d*d)
|
||||
func (v *Mpcplx) Div(rv *Mpcplx) bool {
|
||||
if rv.Real.CmpFloat64(0) == 0 && rv.Imag.CmpFloat64(0) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
var ac, ad, bc, bd, cc_plus_dd Mpflt
|
||||
|
||||
cc_plus_dd.Set(&rv.Real)
|
||||
cc_plus_dd.Mul(&rv.Real) // cc
|
||||
|
||||
ac.Set(&rv.Imag)
|
||||
ac.Mul(&rv.Imag) // dd
|
||||
cc_plus_dd.Add(&ac) // cc+dd
|
||||
|
||||
// We already checked that c and d are not both zero, but we can't
|
||||
// assume that c²+d² != 0 follows, because for tiny values of c
|
||||
// and/or d c²+d² can underflow to zero. Check that c²+d² is
|
||||
// nonzero, return if it's not.
|
||||
if cc_plus_dd.CmpFloat64(0) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
ac.Set(&v.Real)
|
||||
ac.Mul(&rv.Real) // ac
|
||||
|
||||
bd.Set(&v.Imag)
|
||||
bd.Mul(&rv.Imag) // bd
|
||||
|
||||
bc.Set(&v.Imag)
|
||||
bc.Mul(&rv.Real) // bc
|
||||
|
||||
ad.Set(&v.Real)
|
||||
ad.Mul(&rv.Imag) // ad
|
||||
|
||||
v.Real.Set(&ac)
|
||||
v.Real.Add(&bd) // ac+bd
|
||||
v.Real.Quo(&cc_plus_dd) // (ac+bd)/(cc+dd)
|
||||
|
||||
v.Imag.Set(&bc)
|
||||
v.Imag.Sub(&ad) // bc-ad
|
||||
v.Imag.Quo(&cc_plus_dd) // (bc+ad)/(cc+dd)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (v *Mpcplx) String() string {
|
||||
return fmt.Sprintf("(%s+%si)", v.Real.String(), v.Imag.String())
|
||||
}
|
||||
|
||||
func (v *Mpcplx) GoString() string {
|
||||
var re string
|
||||
sre := v.Real.CmpFloat64(0)
|
||||
if sre != 0 {
|
||||
re = v.Real.GoString()
|
||||
}
|
||||
|
||||
var im string
|
||||
sim := v.Imag.CmpFloat64(0)
|
||||
if sim != 0 {
|
||||
im = v.Imag.GoString()
|
||||
}
|
||||
|
||||
switch {
|
||||
case sre == 0 && sim == 0:
|
||||
return "0"
|
||||
case sre == 0:
|
||||
return im + "i"
|
||||
case sim == 0:
|
||||
return re
|
||||
case sim < 0:
|
||||
return fmt.Sprintf("(%s%si)", re, im)
|
||||
default:
|
||||
return fmt.Sprintf("(%s+%si)", re, im)
|
||||
}
|
||||
}
|
||||
304
src/cmd/compile/internal/gc/mpint.go
Normal file
304
src/cmd/compile/internal/gc/mpint.go
Normal file
@@ -0,0 +1,304 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// implements integer arithmetic
|
||||
|
||||
// Mpint represents an integer constant.
|
||||
type Mpint struct {
|
||||
Val big.Int
|
||||
Ovf bool // set if Val overflowed compiler limit (sticky)
|
||||
Rune bool // set if syntax indicates default type rune
|
||||
}
|
||||
|
||||
func (a *Mpint) SetOverflow() {
|
||||
a.Val.SetUint64(1) // avoid spurious div-zero errors
|
||||
a.Ovf = true
|
||||
}
|
||||
|
||||
func (a *Mpint) checkOverflow(extra int) bool {
|
||||
// We don't need to be precise here, any reasonable upper limit would do.
|
||||
// For now, use existing limit so we pass all the tests unchanged.
|
||||
if a.Val.BitLen()+extra > Mpprec {
|
||||
a.SetOverflow()
|
||||
}
|
||||
return a.Ovf
|
||||
}
|
||||
|
||||
func (a *Mpint) Set(b *Mpint) {
|
||||
a.Val.Set(&b.Val)
|
||||
}
|
||||
|
||||
func (a *Mpint) SetFloat(b *Mpflt) bool {
|
||||
// avoid converting huge floating-point numbers to integers
|
||||
// (2*Mpprec is large enough to permit all tests to pass)
|
||||
if b.Val.MantExp(nil) > 2*Mpprec {
|
||||
a.SetOverflow()
|
||||
return false
|
||||
}
|
||||
|
||||
if _, acc := b.Val.Int(&a.Val); acc == big.Exact {
|
||||
return true
|
||||
}
|
||||
|
||||
const delta = 16 // a reasonably small number of bits > 0
|
||||
var t big.Float
|
||||
t.SetPrec(Mpprec - delta)
|
||||
|
||||
// try rounding down a little
|
||||
t.SetMode(big.ToZero)
|
||||
t.Set(&b.Val)
|
||||
if _, acc := t.Int(&a.Val); acc == big.Exact {
|
||||
return true
|
||||
}
|
||||
|
||||
// try rounding up a little
|
||||
t.SetMode(big.AwayFromZero)
|
||||
t.Set(&b.Val)
|
||||
if _, acc := t.Int(&a.Val); acc == big.Exact {
|
||||
return true
|
||||
}
|
||||
|
||||
a.Ovf = false
|
||||
return false
|
||||
}
|
||||
|
||||
func (a *Mpint) Add(b *Mpint) {
|
||||
if a.Ovf || b.Ovf {
|
||||
if nsavederrors+nerrors == 0 {
|
||||
Fatalf("ovf in Mpint Add")
|
||||
}
|
||||
a.SetOverflow()
|
||||
return
|
||||
}
|
||||
|
||||
a.Val.Add(&a.Val, &b.Val)
|
||||
|
||||
if a.checkOverflow(0) {
|
||||
yyerror("constant addition overflow")
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Mpint) Sub(b *Mpint) {
|
||||
if a.Ovf || b.Ovf {
|
||||
if nsavederrors+nerrors == 0 {
|
||||
Fatalf("ovf in Mpint Sub")
|
||||
}
|
||||
a.SetOverflow()
|
||||
return
|
||||
}
|
||||
|
||||
a.Val.Sub(&a.Val, &b.Val)
|
||||
|
||||
if a.checkOverflow(0) {
|
||||
yyerror("constant subtraction overflow")
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Mpint) Mul(b *Mpint) {
|
||||
if a.Ovf || b.Ovf {
|
||||
if nsavederrors+nerrors == 0 {
|
||||
Fatalf("ovf in Mpint Mul")
|
||||
}
|
||||
a.SetOverflow()
|
||||
return
|
||||
}
|
||||
|
||||
a.Val.Mul(&a.Val, &b.Val)
|
||||
|
||||
if a.checkOverflow(0) {
|
||||
yyerror("constant multiplication overflow")
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Mpint) Quo(b *Mpint) {
|
||||
if a.Ovf || b.Ovf {
|
||||
if nsavederrors+nerrors == 0 {
|
||||
Fatalf("ovf in Mpint Quo")
|
||||
}
|
||||
a.SetOverflow()
|
||||
return
|
||||
}
|
||||
|
||||
a.Val.Quo(&a.Val, &b.Val)
|
||||
|
||||
if a.checkOverflow(0) {
|
||||
// can only happen for div-0 which should be checked elsewhere
|
||||
yyerror("constant division overflow")
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Mpint) Rem(b *Mpint) {
|
||||
if a.Ovf || b.Ovf {
|
||||
if nsavederrors+nerrors == 0 {
|
||||
Fatalf("ovf in Mpint Rem")
|
||||
}
|
||||
a.SetOverflow()
|
||||
return
|
||||
}
|
||||
|
||||
a.Val.Rem(&a.Val, &b.Val)
|
||||
|
||||
if a.checkOverflow(0) {
|
||||
// should never happen
|
||||
yyerror("constant modulo overflow")
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Mpint) Or(b *Mpint) {
|
||||
if a.Ovf || b.Ovf {
|
||||
if nsavederrors+nerrors == 0 {
|
||||
Fatalf("ovf in Mpint Or")
|
||||
}
|
||||
a.SetOverflow()
|
||||
return
|
||||
}
|
||||
|
||||
a.Val.Or(&a.Val, &b.Val)
|
||||
}
|
||||
|
||||
func (a *Mpint) And(b *Mpint) {
|
||||
if a.Ovf || b.Ovf {
|
||||
if nsavederrors+nerrors == 0 {
|
||||
Fatalf("ovf in Mpint And")
|
||||
}
|
||||
a.SetOverflow()
|
||||
return
|
||||
}
|
||||
|
||||
a.Val.And(&a.Val, &b.Val)
|
||||
}
|
||||
|
||||
func (a *Mpint) AndNot(b *Mpint) {
|
||||
if a.Ovf || b.Ovf {
|
||||
if nsavederrors+nerrors == 0 {
|
||||
Fatalf("ovf in Mpint AndNot")
|
||||
}
|
||||
a.SetOverflow()
|
||||
return
|
||||
}
|
||||
|
||||
a.Val.AndNot(&a.Val, &b.Val)
|
||||
}
|
||||
|
||||
func (a *Mpint) Xor(b *Mpint) {
|
||||
if a.Ovf || b.Ovf {
|
||||
if nsavederrors+nerrors == 0 {
|
||||
Fatalf("ovf in Mpint Xor")
|
||||
}
|
||||
a.SetOverflow()
|
||||
return
|
||||
}
|
||||
|
||||
a.Val.Xor(&a.Val, &b.Val)
|
||||
}
|
||||
|
||||
func (a *Mpint) Lsh(b *Mpint) {
|
||||
if a.Ovf || b.Ovf {
|
||||
if nsavederrors+nerrors == 0 {
|
||||
Fatalf("ovf in Mpint Lsh")
|
||||
}
|
||||
a.SetOverflow()
|
||||
return
|
||||
}
|
||||
|
||||
s := b.Int64()
|
||||
if s < 0 || s >= Mpprec {
|
||||
msg := "shift count too large"
|
||||
if s < 0 {
|
||||
msg = "invalid negative shift count"
|
||||
}
|
||||
yyerror("%s: %d", msg, s)
|
||||
a.SetInt64(0)
|
||||
return
|
||||
}
|
||||
|
||||
if a.checkOverflow(int(s)) {
|
||||
yyerror("constant shift overflow")
|
||||
return
|
||||
}
|
||||
a.Val.Lsh(&a.Val, uint(s))
|
||||
}
|
||||
|
||||
func (a *Mpint) Rsh(b *Mpint) {
|
||||
if a.Ovf || b.Ovf {
|
||||
if nsavederrors+nerrors == 0 {
|
||||
Fatalf("ovf in Mpint Rsh")
|
||||
}
|
||||
a.SetOverflow()
|
||||
return
|
||||
}
|
||||
|
||||
s := b.Int64()
|
||||
if s < 0 {
|
||||
yyerror("invalid negative shift count: %d", s)
|
||||
if a.Val.Sign() < 0 {
|
||||
a.SetInt64(-1)
|
||||
} else {
|
||||
a.SetInt64(0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
a.Val.Rsh(&a.Val, uint(s))
|
||||
}
|
||||
|
||||
func (a *Mpint) Cmp(b *Mpint) int {
|
||||
return a.Val.Cmp(&b.Val)
|
||||
}
|
||||
|
||||
func (a *Mpint) CmpInt64(c int64) int {
|
||||
if c == 0 {
|
||||
return a.Val.Sign() // common case shortcut
|
||||
}
|
||||
return a.Val.Cmp(big.NewInt(c))
|
||||
}
|
||||
|
||||
func (a *Mpint) Neg() {
|
||||
a.Val.Neg(&a.Val)
|
||||
}
|
||||
|
||||
func (a *Mpint) Int64() int64 {
|
||||
if a.Ovf {
|
||||
if nsavederrors+nerrors == 0 {
|
||||
Fatalf("constant overflow")
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
return a.Val.Int64()
|
||||
}
|
||||
|
||||
func (a *Mpint) SetInt64(c int64) {
|
||||
a.Val.SetInt64(c)
|
||||
}
|
||||
|
||||
func (a *Mpint) SetString(as string) {
|
||||
_, ok := a.Val.SetString(as, 0)
|
||||
if !ok {
|
||||
// The lexer checks for correct syntax of the literal
|
||||
// and reports detailed errors. Thus SetString should
|
||||
// never fail (in theory it might run out of memory,
|
||||
// but that wouldn't be reported as an error here).
|
||||
Fatalf("malformed integer constant: %s", as)
|
||||
return
|
||||
}
|
||||
if a.checkOverflow(0) {
|
||||
yyerror("constant too large: %s", as)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Mpint) GoString() string {
|
||||
return a.Val.String()
|
||||
}
|
||||
|
||||
func (a *Mpint) String() string {
|
||||
return fmt.Sprintf("%#x", &a.Val)
|
||||
}
|
||||
1756
src/cmd/compile/internal/gc/noder.go
Normal file
1756
src/cmd/compile/internal/gc/noder.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -5,21 +5,28 @@
|
||||
package gc
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/base"
|
||||
"cmd/compile/internal/ir"
|
||||
"cmd/compile/internal/objw"
|
||||
"cmd/compile/internal/reflectdata"
|
||||
"cmd/compile/internal/staticdata"
|
||||
"cmd/compile/internal/typecheck"
|
||||
"cmd/compile/internal/types"
|
||||
"cmd/internal/archive"
|
||||
"cmd/internal/bio"
|
||||
"cmd/internal/obj"
|
||||
"cmd/internal/objabi"
|
||||
"cmd/internal/src"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// architecture-independent object file output
|
||||
const ArhdrSize = 60
|
||||
|
||||
func formathdr(arhdr []byte, name string, size int64) {
|
||||
copy(arhdr[:], fmt.Sprintf("%-16s%-12d%-6d%-6d%-8o%-10d`\n", name, 0, 0, 0, 0644, size))
|
||||
}
|
||||
|
||||
// These modes say which kind of object file to generate.
|
||||
// The default use of the toolchain is to set both bits,
|
||||
// generating a combined compiler+linker object, one that
|
||||
@@ -39,20 +46,20 @@ const (
|
||||
)
|
||||
|
||||
func dumpobj() {
|
||||
if base.Flag.LinkObj == "" {
|
||||
dumpobj1(base.Flag.LowerO, modeCompilerObj|modeLinkerObj)
|
||||
if linkobj == "" {
|
||||
dumpobj1(outfile, modeCompilerObj|modeLinkerObj)
|
||||
return
|
||||
}
|
||||
dumpobj1(base.Flag.LowerO, modeCompilerObj)
|
||||
dumpobj1(base.Flag.LinkObj, modeLinkerObj)
|
||||
dumpobj1(outfile, modeCompilerObj)
|
||||
dumpobj1(linkobj, modeLinkerObj)
|
||||
}
|
||||
|
||||
func dumpobj1(outfile string, mode int) {
|
||||
bout, err := bio.Create(outfile)
|
||||
if err != nil {
|
||||
base.FlushErrors()
|
||||
flusherrors()
|
||||
fmt.Printf("can't create %s: %v\n", outfile, err)
|
||||
base.ErrorExit()
|
||||
errorexit()
|
||||
}
|
||||
defer bout.Close()
|
||||
bout.WriteString("!<arch>\n")
|
||||
@@ -71,17 +78,17 @@ func dumpobj1(outfile string, mode int) {
|
||||
|
||||
func printObjHeader(bout *bio.Writer) {
|
||||
fmt.Fprintf(bout, "go object %s %s %s %s\n", objabi.GOOS, objabi.GOARCH, objabi.Version, objabi.Expstring())
|
||||
if base.Flag.BuildID != "" {
|
||||
fmt.Fprintf(bout, "build id %q\n", base.Flag.BuildID)
|
||||
if buildid != "" {
|
||||
fmt.Fprintf(bout, "build id %q\n", buildid)
|
||||
}
|
||||
if types.LocalPkg.Name == "main" {
|
||||
if localpkg.Name == "main" {
|
||||
fmt.Fprintf(bout, "main\n")
|
||||
}
|
||||
fmt.Fprintf(bout, "\n") // header ends with blank line
|
||||
}
|
||||
|
||||
func startArchiveEntry(bout *bio.Writer) int64 {
|
||||
var arhdr [archive.HeaderSize]byte
|
||||
var arhdr [ArhdrSize]byte
|
||||
bout.Write(arhdr[:])
|
||||
return bout.Offset()
|
||||
}
|
||||
@@ -92,10 +99,10 @@ func finishArchiveEntry(bout *bio.Writer, start int64, name string) {
|
||||
if size&1 != 0 {
|
||||
bout.WriteByte(0)
|
||||
}
|
||||
bout.MustSeek(start-archive.HeaderSize, 0)
|
||||
bout.MustSeek(start-ArhdrSize, 0)
|
||||
|
||||
var arhdr [archive.HeaderSize]byte
|
||||
archive.FormatHeader(arhdr[:], name, size)
|
||||
var arhdr [ArhdrSize]byte
|
||||
formathdr(arhdr[:], name, size)
|
||||
bout.Write(arhdr[:])
|
||||
bout.Flush()
|
||||
bout.MustSeek(start+size+(size&1), 0)
|
||||
@@ -107,21 +114,22 @@ func dumpCompilerObj(bout *bio.Writer) {
|
||||
}
|
||||
|
||||
func dumpdata() {
|
||||
numExterns := len(typecheck.Target.Externs)
|
||||
numDecls := len(typecheck.Target.Decls)
|
||||
externs := len(externdcl)
|
||||
xtops := len(xtop)
|
||||
|
||||
dumpglobls(typecheck.Target.Externs)
|
||||
reflectdata.CollectPTabs()
|
||||
numExports := len(typecheck.Target.Exports)
|
||||
addsignats(typecheck.Target.Externs)
|
||||
reflectdata.WriteRuntimeTypes()
|
||||
reflectdata.WriteTabs()
|
||||
numPTabs, numITabs := reflectdata.CountTabs()
|
||||
reflectdata.WriteImportStrings()
|
||||
reflectdata.WriteBasicTypes()
|
||||
dumpglobls()
|
||||
addptabs()
|
||||
exportlistLen := len(exportlist)
|
||||
addsignats(externdcl)
|
||||
dumpsignats()
|
||||
dumptabs()
|
||||
ptabsLen := len(ptabs)
|
||||
itabsLen := len(itabs)
|
||||
dumpimportstrings()
|
||||
dumpbasictypes()
|
||||
dumpembeds()
|
||||
|
||||
// Calls to WriteRuntimeTypes can generate functions,
|
||||
// Calls to dumpsignats can generate functions,
|
||||
// like method wrappers and hash and equality routines.
|
||||
// Compile any generated functions, process any new resulting types, repeat.
|
||||
// This can't loop forever, because there is no way to generate an infinite
|
||||
@@ -129,108 +137,169 @@ func dumpdata() {
|
||||
// In the typical case, we loop 0 or 1 times.
|
||||
// It was not until issue 24761 that we found any code that required a loop at all.
|
||||
for {
|
||||
for i := numDecls; i < len(typecheck.Target.Decls); i++ {
|
||||
if n, ok := typecheck.Target.Decls[i].(*ir.Func); ok {
|
||||
enqueueFunc(n)
|
||||
for i := xtops; i < len(xtop); i++ {
|
||||
n := xtop[i]
|
||||
if n.Op == ODCLFUNC {
|
||||
funccompile(n)
|
||||
}
|
||||
}
|
||||
numDecls = len(typecheck.Target.Decls)
|
||||
xtops = len(xtop)
|
||||
compileFunctions()
|
||||
reflectdata.WriteRuntimeTypes()
|
||||
if numDecls == len(typecheck.Target.Decls) {
|
||||
dumpsignats()
|
||||
if xtops == len(xtop) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Dump extra globals.
|
||||
dumpglobls(typecheck.Target.Externs[numExterns:])
|
||||
tmp := externdcl
|
||||
|
||||
if reflectdata.ZeroSize > 0 {
|
||||
zero := base.PkgLinksym("go.map", "zero", obj.ABI0)
|
||||
objw.Global(zero, int32(reflectdata.ZeroSize), obj.DUPOK|obj.RODATA)
|
||||
if externdcl != nil {
|
||||
externdcl = externdcl[externs:]
|
||||
}
|
||||
dumpglobls()
|
||||
externdcl = tmp
|
||||
|
||||
if zerosize > 0 {
|
||||
zero := mappkg.Lookup("zero")
|
||||
ggloblsym(zero.Linksym(), int32(zerosize), obj.DUPOK|obj.RODATA)
|
||||
}
|
||||
|
||||
staticdata.WriteFuncSyms()
|
||||
addGCLocals()
|
||||
|
||||
if numExports != len(typecheck.Target.Exports) {
|
||||
base.Fatalf("Target.Exports changed after compile functions loop")
|
||||
if exportlistLen != len(exportlist) {
|
||||
Fatalf("exportlist changed after compile functions loop")
|
||||
}
|
||||
newNumPTabs, newNumITabs := reflectdata.CountTabs()
|
||||
if newNumPTabs != numPTabs {
|
||||
base.Fatalf("ptabs changed after compile functions loop")
|
||||
if ptabsLen != len(ptabs) {
|
||||
Fatalf("ptabs changed after compile functions loop")
|
||||
}
|
||||
if newNumITabs != numITabs {
|
||||
base.Fatalf("itabs changed after compile functions loop")
|
||||
if itabsLen != len(itabs) {
|
||||
Fatalf("itabs changed after compile functions loop")
|
||||
}
|
||||
}
|
||||
|
||||
func dumpLinkerObj(bout *bio.Writer) {
|
||||
printObjHeader(bout)
|
||||
|
||||
if len(typecheck.Target.CgoPragmas) != 0 {
|
||||
if len(pragcgobuf) != 0 {
|
||||
// write empty export section; must be before cgo section
|
||||
fmt.Fprintf(bout, "\n$$\n\n$$\n\n")
|
||||
fmt.Fprintf(bout, "\n$$ // cgo\n")
|
||||
if err := json.NewEncoder(bout).Encode(typecheck.Target.CgoPragmas); err != nil {
|
||||
base.Fatalf("serializing pragcgobuf: %v", err)
|
||||
if err := json.NewEncoder(bout).Encode(pragcgobuf); err != nil {
|
||||
Fatalf("serializing pragcgobuf: %v", err)
|
||||
}
|
||||
fmt.Fprintf(bout, "\n$$\n\n")
|
||||
}
|
||||
|
||||
fmt.Fprintf(bout, "\n!\n")
|
||||
|
||||
obj.WriteObjFile(base.Ctxt, bout)
|
||||
obj.WriteObjFile(Ctxt, bout)
|
||||
}
|
||||
|
||||
func dumpGlobal(n *ir.Name) {
|
||||
if n.Type() == nil {
|
||||
base.Fatalf("external %v nil type\n", n)
|
||||
}
|
||||
if n.Class == ir.PFUNC {
|
||||
func addptabs() {
|
||||
if !Ctxt.Flag_dynlink || localpkg.Name != "main" {
|
||||
return
|
||||
}
|
||||
if n.Sym().Pkg != types.LocalPkg {
|
||||
for _, exportn := range exportlist {
|
||||
s := exportn.Sym
|
||||
n := asNode(s.Def)
|
||||
if n == nil {
|
||||
continue
|
||||
}
|
||||
if n.Op != ONAME {
|
||||
continue
|
||||
}
|
||||
if !types.IsExported(s.Name) {
|
||||
continue
|
||||
}
|
||||
if s.Pkg.Name != "main" {
|
||||
continue
|
||||
}
|
||||
if n.Type.Etype == TFUNC && n.Class() == PFUNC {
|
||||
// function
|
||||
ptabs = append(ptabs, ptabEntry{s: s, t: asNode(s.Def).Type})
|
||||
} else {
|
||||
// variable
|
||||
ptabs = append(ptabs, ptabEntry{s: s, t: types.NewPtr(asNode(s.Def).Type)})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func dumpGlobal(n *Node) {
|
||||
if n.Type == nil {
|
||||
Fatalf("external %v nil type\n", n)
|
||||
}
|
||||
if n.Class() == PFUNC {
|
||||
return
|
||||
}
|
||||
types.CalcSize(n.Type())
|
||||
if n.Sym.Pkg != localpkg {
|
||||
return
|
||||
}
|
||||
dowidth(n.Type)
|
||||
ggloblnod(n)
|
||||
}
|
||||
|
||||
func dumpGlobalConst(n ir.Node) {
|
||||
func dumpGlobalConst(n *Node) {
|
||||
// only export typed constants
|
||||
t := n.Type()
|
||||
t := n.Type
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
if n.Sym().Pkg != types.LocalPkg {
|
||||
if n.Sym.Pkg != localpkg {
|
||||
return
|
||||
}
|
||||
// only export integer constants for now
|
||||
if !t.IsInteger() {
|
||||
return
|
||||
}
|
||||
v := n.Val()
|
||||
if t.IsUntyped() {
|
||||
// Export untyped integers as int (if they fit).
|
||||
t = types.Types[types.TINT]
|
||||
if ir.ConstOverflow(v, t) {
|
||||
switch t.Etype {
|
||||
case TINT8:
|
||||
case TINT16:
|
||||
case TINT32:
|
||||
case TINT64:
|
||||
case TINT:
|
||||
case TUINT8:
|
||||
case TUINT16:
|
||||
case TUINT32:
|
||||
case TUINT64:
|
||||
case TUINT:
|
||||
case TUINTPTR:
|
||||
// ok
|
||||
case TIDEAL:
|
||||
if !Isconst(n, CTINT) {
|
||||
return
|
||||
}
|
||||
x := n.Val().U.(*Mpint)
|
||||
if x.Cmp(minintval[TINT]) < 0 || x.Cmp(maxintval[TINT]) > 0 {
|
||||
return
|
||||
}
|
||||
// Ideal integers we export as int (if they fit).
|
||||
t = types.Types[TINT]
|
||||
default:
|
||||
return
|
||||
}
|
||||
base.Ctxt.DwarfIntConst(base.Ctxt.Pkgpath, n.Sym().Name, types.TypeSymName(t), ir.IntVal(t, v))
|
||||
Ctxt.DwarfIntConst(myimportpath, n.Sym.Name, typesymname(t), n.Int64Val())
|
||||
}
|
||||
|
||||
func dumpglobls(externs []ir.Node) {
|
||||
func dumpglobls() {
|
||||
// add globals
|
||||
for _, n := range externs {
|
||||
switch n.Op() {
|
||||
case ir.ONAME:
|
||||
dumpGlobal(n.(*ir.Name))
|
||||
case ir.OLITERAL:
|
||||
for _, n := range externdcl {
|
||||
switch n.Op {
|
||||
case ONAME:
|
||||
dumpGlobal(n)
|
||||
case OLITERAL:
|
||||
dumpGlobalConst(n)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(funcsyms, func(i, j int) bool {
|
||||
return funcsyms[i].LinksymName() < funcsyms[j].LinksymName()
|
||||
})
|
||||
for _, s := range funcsyms {
|
||||
sf := s.Pkg.Lookup(funcsymname(s)).Linksym()
|
||||
dsymptr(sf, 0, s.Linksym(), 0)
|
||||
ggloblsym(sf, int32(Widthptr), obj.DUPOK|obj.RODATA)
|
||||
}
|
||||
|
||||
// Do not reprocess funcsyms on next dumpglobls call.
|
||||
funcsyms = nil
|
||||
}
|
||||
|
||||
// addGCLocals adds gcargs, gclocals, gcregs, and stack object symbols to Ctxt.Data.
|
||||
@@ -238,60 +307,332 @@ func dumpglobls(externs []ir.Node) {
|
||||
// This is done during the sequential phase after compilation, since
|
||||
// global symbols can't be declared during parallel compilation.
|
||||
func addGCLocals() {
|
||||
for _, s := range base.Ctxt.Text {
|
||||
for _, s := range Ctxt.Text {
|
||||
fn := s.Func()
|
||||
if fn == nil {
|
||||
continue
|
||||
}
|
||||
for _, gcsym := range []*obj.LSym{fn.GCArgs, fn.GCLocals} {
|
||||
if gcsym != nil && !gcsym.OnList() {
|
||||
objw.Global(gcsym, int32(len(gcsym.P)), obj.RODATA|obj.DUPOK)
|
||||
ggloblsym(gcsym, int32(len(gcsym.P)), obj.RODATA|obj.DUPOK)
|
||||
}
|
||||
}
|
||||
if x := fn.StackObjects; x != nil {
|
||||
attr := int16(obj.RODATA)
|
||||
objw.Global(x, int32(len(x.P)), attr)
|
||||
ggloblsym(x, int32(len(x.P)), attr)
|
||||
x.Set(obj.AttrStatic, true)
|
||||
}
|
||||
if x := fn.OpenCodedDeferInfo; x != nil {
|
||||
objw.Global(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
|
||||
ggloblsym(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ggloblnod(nam *ir.Name) {
|
||||
s := nam.Linksym()
|
||||
s.Gotype = reflectdata.TypeLinksym(nam.Type())
|
||||
flags := 0
|
||||
if nam.Readonly() {
|
||||
flags = obj.RODATA
|
||||
}
|
||||
if nam.Type() != nil && !nam.Type().HasPointers() {
|
||||
flags |= obj.NOPTR
|
||||
}
|
||||
base.Ctxt.Globl(s, nam.Type().Width, flags)
|
||||
if nam.LibfuzzerExtraCounter() {
|
||||
s.Type = objabi.SLIBFUZZER_EXTRA_COUNTER
|
||||
}
|
||||
if nam.Sym().Linkname != "" {
|
||||
// Make sure linkname'd symbol is non-package. When a symbol is
|
||||
// both imported and linkname'd, s.Pkg may not set to "_" in
|
||||
// types.Sym.Linksym because LSym already exists. Set it here.
|
||||
s.Pkg = "_"
|
||||
func duintxx(s *obj.LSym, off int, v uint64, wid int) int {
|
||||
if off&(wid-1) != 0 {
|
||||
Fatalf("duintxxLSym: misaligned: v=%d wid=%d off=%d", v, wid, off)
|
||||
}
|
||||
s.WriteInt(Ctxt, int64(off), wid, int64(v))
|
||||
return off + wid
|
||||
}
|
||||
|
||||
func dumpembeds() {
|
||||
for _, v := range typecheck.Target.Embeds {
|
||||
staticdata.WriteEmbed(v)
|
||||
}
|
||||
func duint8(s *obj.LSym, off int, v uint8) int {
|
||||
return duintxx(s, off, uint64(v), 1)
|
||||
}
|
||||
|
||||
func addsignats(dcls []ir.Node) {
|
||||
// copy types from dcl list to signatset
|
||||
for _, n := range dcls {
|
||||
if n.Op() == ir.OTYPE {
|
||||
reflectdata.NeedRuntimeType(n.Type())
|
||||
func duint16(s *obj.LSym, off int, v uint16) int {
|
||||
return duintxx(s, off, uint64(v), 2)
|
||||
}
|
||||
|
||||
func duint32(s *obj.LSym, off int, v uint32) int {
|
||||
return duintxx(s, off, uint64(v), 4)
|
||||
}
|
||||
|
||||
func duintptr(s *obj.LSym, off int, v uint64) int {
|
||||
return duintxx(s, off, v, Widthptr)
|
||||
}
|
||||
|
||||
func dbvec(s *obj.LSym, off int, bv bvec) int {
|
||||
// Runtime reads the bitmaps as byte arrays. Oblige.
|
||||
for j := 0; int32(j) < bv.n; j += 8 {
|
||||
word := bv.b[j/32]
|
||||
off = duint8(s, off, uint8(word>>(uint(j)%32)))
|
||||
}
|
||||
return off
|
||||
}
|
||||
|
||||
const (
|
||||
stringSymPrefix = "go.string."
|
||||
stringSymPattern = ".gostring.%d.%x"
|
||||
)
|
||||
|
||||
// stringsym returns a symbol containing the string s.
|
||||
// The symbol contains the string data, not a string header.
|
||||
func stringsym(pos src.XPos, s string) (data *obj.LSym) {
|
||||
var symname string
|
||||
if len(s) > 100 {
|
||||
// Huge strings are hashed to avoid long names in object files.
|
||||
// Indulge in some paranoia by writing the length of s, too,
|
||||
// as protection against length extension attacks.
|
||||
// Same pattern is known to fileStringSym below.
|
||||
h := sha256.New()
|
||||
io.WriteString(h, s)
|
||||
symname = fmt.Sprintf(stringSymPattern, len(s), h.Sum(nil))
|
||||
} else {
|
||||
// Small strings get named directly by their contents.
|
||||
symname = strconv.Quote(s)
|
||||
}
|
||||
|
||||
symdata := Ctxt.Lookup(stringSymPrefix + symname)
|
||||
if !symdata.OnList() {
|
||||
off := dstringdata(symdata, 0, s, pos, "string")
|
||||
ggloblsym(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL)
|
||||
symdata.Set(obj.AttrContentAddressable, true)
|
||||
}
|
||||
|
||||
return symdata
|
||||
}
|
||||
|
||||
// fileStringSym returns a symbol for the contents and the size of file.
|
||||
// If readonly is true, the symbol shares storage with any literal string
|
||||
// or other file with the same content and is placed in a read-only section.
|
||||
// If readonly is false, the symbol is a read-write copy separate from any other,
|
||||
// for use as the backing store of a []byte.
|
||||
// The content hash of file is copied into hash. (If hash is nil, nothing is copied.)
|
||||
// The returned symbol contains the data itself, not a string header.
|
||||
func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj.LSym, int64, error) {
|
||||
f, err := os.Open(file)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
defer f.Close()
|
||||
info, err := f.Stat()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if !info.Mode().IsRegular() {
|
||||
return nil, 0, fmt.Errorf("not a regular file")
|
||||
}
|
||||
size := info.Size()
|
||||
if size <= 1*1024 {
|
||||
data, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if int64(len(data)) != size {
|
||||
return nil, 0, fmt.Errorf("file changed between reads")
|
||||
}
|
||||
var sym *obj.LSym
|
||||
if readonly {
|
||||
sym = stringsym(pos, string(data))
|
||||
} else {
|
||||
sym = slicedata(pos, string(data)).Sym.Linksym()
|
||||
}
|
||||
if len(hash) > 0 {
|
||||
sum := sha256.Sum256(data)
|
||||
copy(hash, sum[:])
|
||||
}
|
||||
return sym, size, nil
|
||||
}
|
||||
if size > 2e9 {
|
||||
// ggloblsym takes an int32,
|
||||
// and probably the rest of the toolchain
|
||||
// can't handle such big symbols either.
|
||||
// See golang.org/issue/9862.
|
||||
return nil, 0, fmt.Errorf("file too large")
|
||||
}
|
||||
|
||||
// File is too big to read and keep in memory.
|
||||
// Compute hash if needed for read-only content hashing or if the caller wants it.
|
||||
var sum []byte
|
||||
if readonly || len(hash) > 0 {
|
||||
h := sha256.New()
|
||||
n, err := io.Copy(h, f)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if n != size {
|
||||
return nil, 0, fmt.Errorf("file changed between reads")
|
||||
}
|
||||
sum = h.Sum(nil)
|
||||
copy(hash, sum)
|
||||
}
|
||||
|
||||
var symdata *obj.LSym
|
||||
if readonly {
|
||||
symname := fmt.Sprintf(stringSymPattern, size, sum)
|
||||
symdata = Ctxt.Lookup(stringSymPrefix + symname)
|
||||
if !symdata.OnList() {
|
||||
info := symdata.NewFileInfo()
|
||||
info.Name = file
|
||||
info.Size = size
|
||||
ggloblsym(symdata, int32(size), obj.DUPOK|obj.RODATA|obj.LOCAL)
|
||||
// Note: AttrContentAddressable cannot be set here,
|
||||
// because the content-addressable-handling code
|
||||
// does not know about file symbols.
|
||||
}
|
||||
} else {
|
||||
// Emit a zero-length data symbol
|
||||
// and then fix up length and content to use file.
|
||||
symdata = slicedata(pos, "").Sym.Linksym()
|
||||
symdata.Size = size
|
||||
symdata.Type = objabi.SNOPTRDATA
|
||||
info := symdata.NewFileInfo()
|
||||
info.Name = file
|
||||
info.Size = size
|
||||
}
|
||||
|
||||
return symdata, size, nil
|
||||
}
|
||||
|
||||
var slicedataGen int
|
||||
|
||||
func slicedata(pos src.XPos, s string) *Node {
|
||||
slicedataGen++
|
||||
symname := fmt.Sprintf(".gobytes.%d", slicedataGen)
|
||||
sym := localpkg.Lookup(symname)
|
||||
symnode := newname(sym)
|
||||
sym.Def = asTypesNode(symnode)
|
||||
|
||||
lsym := sym.Linksym()
|
||||
off := dstringdata(lsym, 0, s, pos, "slice")
|
||||
ggloblsym(lsym, int32(off), obj.NOPTR|obj.LOCAL)
|
||||
|
||||
return symnode
|
||||
}
|
||||
|
||||
func slicebytes(nam *Node, s string) {
|
||||
if nam.Op != ONAME {
|
||||
Fatalf("slicebytes %v", nam)
|
||||
}
|
||||
slicesym(nam, slicedata(nam.Pos, s), int64(len(s)))
|
||||
}
|
||||
|
||||
func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int {
|
||||
// Objects that are too large will cause the data section to overflow right away,
|
||||
// causing a cryptic error message by the linker. Check for oversize objects here
|
||||
// and provide a useful error message instead.
|
||||
if int64(len(t)) > 2e9 {
|
||||
yyerrorl(pos, "%v with length %v is too big", what, len(t))
|
||||
return 0
|
||||
}
|
||||
|
||||
s.WriteString(Ctxt, int64(off), len(t), t)
|
||||
return off + len(t)
|
||||
}
|
||||
|
||||
func dsymptr(s *obj.LSym, off int, x *obj.LSym, xoff int) int {
|
||||
off = int(Rnd(int64(off), int64(Widthptr)))
|
||||
s.WriteAddr(Ctxt, int64(off), Widthptr, x, int64(xoff))
|
||||
off += Widthptr
|
||||
return off
|
||||
}
|
||||
|
||||
func dsymptrOff(s *obj.LSym, off int, x *obj.LSym) int {
|
||||
s.WriteOff(Ctxt, int64(off), x, 0)
|
||||
off += 4
|
||||
return off
|
||||
}
|
||||
|
||||
func dsymptrWeakOff(s *obj.LSym, off int, x *obj.LSym) int {
|
||||
s.WriteWeakOff(Ctxt, int64(off), x, 0)
|
||||
off += 4
|
||||
return off
|
||||
}
|
||||
|
||||
// slicesym writes a static slice symbol {&arr, lencap, lencap} to n.
|
||||
// arr must be an ONAME. slicesym does not modify n.
|
||||
func slicesym(n, arr *Node, lencap int64) {
|
||||
s := n.Sym.Linksym()
|
||||
base := n.Xoffset
|
||||
if arr.Op != ONAME {
|
||||
Fatalf("slicesym non-name arr %v", arr)
|
||||
}
|
||||
s.WriteAddr(Ctxt, base, Widthptr, arr.Sym.Linksym(), arr.Xoffset)
|
||||
s.WriteInt(Ctxt, base+sliceLenOffset, Widthptr, lencap)
|
||||
s.WriteInt(Ctxt, base+sliceCapOffset, Widthptr, lencap)
|
||||
}
|
||||
|
||||
// addrsym writes the static address of a to n. a must be an ONAME.
|
||||
// Neither n nor a is modified.
|
||||
func addrsym(n, a *Node) {
|
||||
if n.Op != ONAME {
|
||||
Fatalf("addrsym n op %v", n.Op)
|
||||
}
|
||||
if n.Sym == nil {
|
||||
Fatalf("addrsym nil n sym")
|
||||
}
|
||||
if a.Op != ONAME {
|
||||
Fatalf("addrsym a op %v", a.Op)
|
||||
}
|
||||
s := n.Sym.Linksym()
|
||||
s.WriteAddr(Ctxt, n.Xoffset, Widthptr, a.Sym.Linksym(), a.Xoffset)
|
||||
}
|
||||
|
||||
// pfuncsym writes the static address of f to n. f must be a global function.
|
||||
// Neither n nor f is modified.
|
||||
func pfuncsym(n, f *Node) {
|
||||
if n.Op != ONAME {
|
||||
Fatalf("pfuncsym n op %v", n.Op)
|
||||
}
|
||||
if n.Sym == nil {
|
||||
Fatalf("pfuncsym nil n sym")
|
||||
}
|
||||
if f.Class() != PFUNC {
|
||||
Fatalf("pfuncsym class not PFUNC %d", f.Class())
|
||||
}
|
||||
s := n.Sym.Linksym()
|
||||
s.WriteAddr(Ctxt, n.Xoffset, Widthptr, funcsym(f.Sym).Linksym(), f.Xoffset)
|
||||
}
|
||||
|
||||
// litsym writes the static literal c to n.
|
||||
// Neither n nor c is modified.
|
||||
func litsym(n, c *Node, wid int) {
|
||||
if n.Op != ONAME {
|
||||
Fatalf("litsym n op %v", n.Op)
|
||||
}
|
||||
if c.Op != OLITERAL {
|
||||
Fatalf("litsym c op %v", c.Op)
|
||||
}
|
||||
if n.Sym == nil {
|
||||
Fatalf("litsym nil n sym")
|
||||
}
|
||||
s := n.Sym.Linksym()
|
||||
switch u := c.Val().U.(type) {
|
||||
case bool:
|
||||
i := int64(obj.Bool2int(u))
|
||||
s.WriteInt(Ctxt, n.Xoffset, wid, i)
|
||||
|
||||
case *Mpint:
|
||||
s.WriteInt(Ctxt, n.Xoffset, wid, u.Int64())
|
||||
|
||||
case *Mpflt:
|
||||
f := u.Float64()
|
||||
switch n.Type.Etype {
|
||||
case TFLOAT32:
|
||||
s.WriteFloat32(Ctxt, n.Xoffset, float32(f))
|
||||
case TFLOAT64:
|
||||
s.WriteFloat64(Ctxt, n.Xoffset, f)
|
||||
}
|
||||
|
||||
case *Mpcplx:
|
||||
r := u.Real.Float64()
|
||||
i := u.Imag.Float64()
|
||||
switch n.Type.Etype {
|
||||
case TCOMPLEX64:
|
||||
s.WriteFloat32(Ctxt, n.Xoffset, float32(r))
|
||||
s.WriteFloat32(Ctxt, n.Xoffset+4, float32(i))
|
||||
case TCOMPLEX128:
|
||||
s.WriteFloat64(Ctxt, n.Xoffset, r)
|
||||
s.WriteFloat64(Ctxt, n.Xoffset+8, i)
|
||||
}
|
||||
|
||||
case string:
|
||||
symdata := stringsym(n.Pos, u)
|
||||
s.WriteAddr(Ctxt, n.Xoffset, Widthptr, symdata, 0)
|
||||
s.WriteInt(Ctxt, n.Xoffset+int64(Widthptr), Widthptr, int64(len(u)))
|
||||
|
||||
default:
|
||||
Fatalf("litsym unhandled OLITERAL %v", c)
|
||||
}
|
||||
}
|
||||
|
||||
175
src/cmd/compile/internal/gc/op_string.go
Normal file
175
src/cmd/compile/internal/gc/op_string.go
Normal file
@@ -0,0 +1,175 @@
|
||||
// Code generated by "stringer -type=Op -trimprefix=O"; DO NOT EDIT.
|
||||
|
||||
package gc
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[OXXX-0]
|
||||
_ = x[ONAME-1]
|
||||
_ = x[ONONAME-2]
|
||||
_ = x[OTYPE-3]
|
||||
_ = x[OPACK-4]
|
||||
_ = x[OLITERAL-5]
|
||||
_ = x[OADD-6]
|
||||
_ = x[OSUB-7]
|
||||
_ = x[OOR-8]
|
||||
_ = x[OXOR-9]
|
||||
_ = x[OADDSTR-10]
|
||||
_ = x[OADDR-11]
|
||||
_ = x[OANDAND-12]
|
||||
_ = x[OAPPEND-13]
|
||||
_ = x[OBYTES2STR-14]
|
||||
_ = x[OBYTES2STRTMP-15]
|
||||
_ = x[ORUNES2STR-16]
|
||||
_ = x[OSTR2BYTES-17]
|
||||
_ = x[OSTR2BYTESTMP-18]
|
||||
_ = x[OSTR2RUNES-19]
|
||||
_ = x[OAS-20]
|
||||
_ = x[OAS2-21]
|
||||
_ = x[OAS2DOTTYPE-22]
|
||||
_ = x[OAS2FUNC-23]
|
||||
_ = x[OAS2MAPR-24]
|
||||
_ = x[OAS2RECV-25]
|
||||
_ = x[OASOP-26]
|
||||
_ = x[OCALL-27]
|
||||
_ = x[OCALLFUNC-28]
|
||||
_ = x[OCALLMETH-29]
|
||||
_ = x[OCALLINTER-30]
|
||||
_ = x[OCALLPART-31]
|
||||
_ = x[OCAP-32]
|
||||
_ = x[OCLOSE-33]
|
||||
_ = x[OCLOSURE-34]
|
||||
_ = x[OCOMPLIT-35]
|
||||
_ = x[OMAPLIT-36]
|
||||
_ = x[OSTRUCTLIT-37]
|
||||
_ = x[OARRAYLIT-38]
|
||||
_ = x[OSLICELIT-39]
|
||||
_ = x[OPTRLIT-40]
|
||||
_ = x[OCONV-41]
|
||||
_ = x[OCONVIFACE-42]
|
||||
_ = x[OCONVNOP-43]
|
||||
_ = x[OCOPY-44]
|
||||
_ = x[ODCL-45]
|
||||
_ = x[ODCLFUNC-46]
|
||||
_ = x[ODCLFIELD-47]
|
||||
_ = x[ODCLCONST-48]
|
||||
_ = x[ODCLTYPE-49]
|
||||
_ = x[ODELETE-50]
|
||||
_ = x[ODOT-51]
|
||||
_ = x[ODOTPTR-52]
|
||||
_ = x[ODOTMETH-53]
|
||||
_ = x[ODOTINTER-54]
|
||||
_ = x[OXDOT-55]
|
||||
_ = x[ODOTTYPE-56]
|
||||
_ = x[ODOTTYPE2-57]
|
||||
_ = x[OEQ-58]
|
||||
_ = x[ONE-59]
|
||||
_ = x[OLT-60]
|
||||
_ = x[OLE-61]
|
||||
_ = x[OGE-62]
|
||||
_ = x[OGT-63]
|
||||
_ = x[ODEREF-64]
|
||||
_ = x[OINDEX-65]
|
||||
_ = x[OINDEXMAP-66]
|
||||
_ = x[OKEY-67]
|
||||
_ = x[OSTRUCTKEY-68]
|
||||
_ = x[OLEN-69]
|
||||
_ = x[OMAKE-70]
|
||||
_ = x[OMAKECHAN-71]
|
||||
_ = x[OMAKEMAP-72]
|
||||
_ = x[OMAKESLICE-73]
|
||||
_ = x[OMAKESLICECOPY-74]
|
||||
_ = x[OMUL-75]
|
||||
_ = x[ODIV-76]
|
||||
_ = x[OMOD-77]
|
||||
_ = x[OLSH-78]
|
||||
_ = x[ORSH-79]
|
||||
_ = x[OAND-80]
|
||||
_ = x[OANDNOT-81]
|
||||
_ = x[ONEW-82]
|
||||
_ = x[ONEWOBJ-83]
|
||||
_ = x[ONOT-84]
|
||||
_ = x[OBITNOT-85]
|
||||
_ = x[OPLUS-86]
|
||||
_ = x[ONEG-87]
|
||||
_ = x[OOROR-88]
|
||||
_ = x[OPANIC-89]
|
||||
_ = x[OPRINT-90]
|
||||
_ = x[OPRINTN-91]
|
||||
_ = x[OPAREN-92]
|
||||
_ = x[OSEND-93]
|
||||
_ = x[OSLICE-94]
|
||||
_ = x[OSLICEARR-95]
|
||||
_ = x[OSLICESTR-96]
|
||||
_ = x[OSLICE3-97]
|
||||
_ = x[OSLICE3ARR-98]
|
||||
_ = x[OSLICEHEADER-99]
|
||||
_ = x[ORECOVER-100]
|
||||
_ = x[ORECV-101]
|
||||
_ = x[ORUNESTR-102]
|
||||
_ = x[OSELRECV-103]
|
||||
_ = x[OSELRECV2-104]
|
||||
_ = x[OIOTA-105]
|
||||
_ = x[OREAL-106]
|
||||
_ = x[OIMAG-107]
|
||||
_ = x[OCOMPLEX-108]
|
||||
_ = x[OALIGNOF-109]
|
||||
_ = x[OOFFSETOF-110]
|
||||
_ = x[OSIZEOF-111]
|
||||
_ = x[OBLOCK-112]
|
||||
_ = x[OBREAK-113]
|
||||
_ = x[OCASE-114]
|
||||
_ = x[OCONTINUE-115]
|
||||
_ = x[ODEFER-116]
|
||||
_ = x[OEMPTY-117]
|
||||
_ = x[OFALL-118]
|
||||
_ = x[OFOR-119]
|
||||
_ = x[OFORUNTIL-120]
|
||||
_ = x[OGOTO-121]
|
||||
_ = x[OIF-122]
|
||||
_ = x[OLABEL-123]
|
||||
_ = x[OGO-124]
|
||||
_ = x[ORANGE-125]
|
||||
_ = x[ORETURN-126]
|
||||
_ = x[OSELECT-127]
|
||||
_ = x[OSWITCH-128]
|
||||
_ = x[OTYPESW-129]
|
||||
_ = x[OTCHAN-130]
|
||||
_ = x[OTMAP-131]
|
||||
_ = x[OTSTRUCT-132]
|
||||
_ = x[OTINTER-133]
|
||||
_ = x[OTFUNC-134]
|
||||
_ = x[OTARRAY-135]
|
||||
_ = x[ODDD-136]
|
||||
_ = x[OINLCALL-137]
|
||||
_ = x[OEFACE-138]
|
||||
_ = x[OITAB-139]
|
||||
_ = x[OIDATA-140]
|
||||
_ = x[OSPTR-141]
|
||||
_ = x[OCLOSUREVAR-142]
|
||||
_ = x[OCFUNC-143]
|
||||
_ = x[OCHECKNIL-144]
|
||||
_ = x[OVARDEF-145]
|
||||
_ = x[OVARKILL-146]
|
||||
_ = x[OVARLIVE-147]
|
||||
_ = x[ORESULT-148]
|
||||
_ = x[OINLMARK-149]
|
||||
_ = x[ORETJMP-150]
|
||||
_ = x[OGETG-151]
|
||||
_ = x[OEND-152]
|
||||
}
|
||||
|
||||
const _Op_name = "XXXNAMENONAMETYPEPACKLITERALADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLFIELDDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFBLOCKBREAKCASECONTINUEDEFEREMPTYFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYDDDINLCALLEFACEITABIDATASPTRCLOSUREVARCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKRETJMPGETGEND"
|
||||
|
||||
var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 36, 39, 45, 49, 55, 61, 70, 82, 91, 100, 112, 121, 123, 126, 136, 143, 150, 157, 161, 165, 173, 181, 190, 198, 201, 206, 213, 220, 226, 235, 243, 251, 257, 261, 270, 277, 281, 284, 291, 299, 307, 314, 320, 323, 329, 336, 344, 348, 355, 363, 365, 367, 369, 371, 373, 375, 380, 385, 393, 396, 405, 408, 412, 420, 427, 436, 449, 452, 455, 458, 461, 464, 467, 473, 476, 482, 485, 491, 495, 498, 502, 507, 512, 518, 523, 527, 532, 540, 548, 554, 563, 574, 581, 585, 592, 599, 607, 611, 615, 619, 626, 633, 641, 647, 652, 657, 661, 669, 674, 679, 683, 686, 694, 698, 700, 705, 707, 712, 718, 724, 730, 736, 741, 745, 752, 758, 763, 769, 772, 779, 784, 788, 793, 797, 807, 812, 820, 826, 833, 840, 846, 853, 859, 863, 866}
|
||||
|
||||
func (i Op) String() string {
|
||||
if i >= Op(len(_Op_index)-1) {
|
||||
return "Op(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _Op_name[_Op_index[i]:_Op_index[i+1]]
|
||||
}
|
||||
1441
src/cmd/compile/internal/gc/order.go
Normal file
1441
src/cmd/compile/internal/gc/order.go
Normal file
File diff suppressed because it is too large
Load Diff
798
src/cmd/compile/internal/gc/pgen.go
Normal file
798
src/cmd/compile/internal/gc/pgen.go
Normal file
@@ -0,0 +1,798 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gc
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/ssa"
|
||||
"cmd/compile/internal/types"
|
||||
"cmd/internal/dwarf"
|
||||
"cmd/internal/obj"
|
||||
"cmd/internal/objabi"
|
||||
"cmd/internal/src"
|
||||
"cmd/internal/sys"
|
||||
"internal/race"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// "Portable" code generation.
|
||||
|
||||
var (
|
||||
nBackendWorkers int // number of concurrent backend workers, set by a compiler flag
|
||||
compilequeue []*Node // functions waiting to be compiled
|
||||
)
|
||||
|
||||
func emitptrargsmap(fn *Node) {
|
||||
if fn.funcname() == "_" || fn.Func.Nname.Sym.Linkname != "" {
|
||||
return
|
||||
}
|
||||
lsym := Ctxt.Lookup(fn.Func.lsym.Name + ".args_stackmap")
|
||||
|
||||
nptr := int(fn.Type.ArgWidth() / int64(Widthptr))
|
||||
bv := bvalloc(int32(nptr) * 2)
|
||||
nbitmap := 1
|
||||
if fn.Type.NumResults() > 0 {
|
||||
nbitmap = 2
|
||||
}
|
||||
off := duint32(lsym, 0, uint32(nbitmap))
|
||||
off = duint32(lsym, off, uint32(bv.n))
|
||||
|
||||
if fn.IsMethod() {
|
||||
onebitwalktype1(fn.Type.Recvs(), 0, bv)
|
||||
}
|
||||
if fn.Type.NumParams() > 0 {
|
||||
onebitwalktype1(fn.Type.Params(), 0, bv)
|
||||
}
|
||||
off = dbvec(lsym, off, bv)
|
||||
|
||||
if fn.Type.NumResults() > 0 {
|
||||
onebitwalktype1(fn.Type.Results(), 0, bv)
|
||||
off = dbvec(lsym, off, bv)
|
||||
}
|
||||
|
||||
ggloblsym(lsym, int32(off), obj.RODATA|obj.LOCAL)
|
||||
}
|
||||
|
||||
// cmpstackvarlt reports whether the stack variable a sorts before b.
|
||||
//
|
||||
// Sort the list of stack variables. Autos after anything else,
|
||||
// within autos, unused after used, within used, things with
|
||||
// pointers first, zeroed things first, and then decreasing size.
|
||||
// Because autos are laid out in decreasing addresses
|
||||
// on the stack, pointers first, zeroed things first and decreasing size
|
||||
// really means, in memory, things with pointers needing zeroing at
|
||||
// the top of the stack and increasing in size.
|
||||
// Non-autos sort on offset.
|
||||
func cmpstackvarlt(a, b *Node) bool {
|
||||
if (a.Class() == PAUTO) != (b.Class() == PAUTO) {
|
||||
return b.Class() == PAUTO
|
||||
}
|
||||
|
||||
if a.Class() != PAUTO {
|
||||
return a.Xoffset < b.Xoffset
|
||||
}
|
||||
|
||||
if a.Name.Used() != b.Name.Used() {
|
||||
return a.Name.Used()
|
||||
}
|
||||
|
||||
ap := a.Type.HasPointers()
|
||||
bp := b.Type.HasPointers()
|
||||
if ap != bp {
|
||||
return ap
|
||||
}
|
||||
|
||||
ap = a.Name.Needzero()
|
||||
bp = b.Name.Needzero()
|
||||
if ap != bp {
|
||||
return ap
|
||||
}
|
||||
|
||||
if a.Type.Width != b.Type.Width {
|
||||
return a.Type.Width > b.Type.Width
|
||||
}
|
||||
|
||||
return a.Sym.Name < b.Sym.Name
|
||||
}
|
||||
|
||||
// byStackvar implements sort.Interface for []*Node using cmpstackvarlt.
|
||||
type byStackVar []*Node
|
||||
|
||||
func (s byStackVar) Len() int { return len(s) }
|
||||
func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) }
|
||||
func (s byStackVar) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
func (s *ssafn) AllocFrame(f *ssa.Func) {
|
||||
s.stksize = 0
|
||||
s.stkptrsize = 0
|
||||
fn := s.curfn.Func
|
||||
|
||||
// Mark the PAUTO's unused.
|
||||
for _, ln := range fn.Dcl {
|
||||
if ln.Class() == PAUTO {
|
||||
ln.Name.SetUsed(false)
|
||||
}
|
||||
}
|
||||
|
||||
for _, l := range f.RegAlloc {
|
||||
if ls, ok := l.(ssa.LocalSlot); ok {
|
||||
ls.N.(*Node).Name.SetUsed(true)
|
||||
}
|
||||
}
|
||||
|
||||
scratchUsed := false
|
||||
for _, b := range f.Blocks {
|
||||
for _, v := range b.Values {
|
||||
if n, ok := v.Aux.(*Node); ok {
|
||||
switch n.Class() {
|
||||
case PPARAM, PPARAMOUT:
|
||||
// Don't modify nodfp; it is a global.
|
||||
if n != nodfp {
|
||||
n.Name.SetUsed(true)
|
||||
}
|
||||
case PAUTO:
|
||||
n.Name.SetUsed(true)
|
||||
}
|
||||
}
|
||||
if !scratchUsed {
|
||||
scratchUsed = v.Op.UsesScratch()
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
if f.Config.NeedsFpScratch && scratchUsed {
|
||||
s.scratchFpMem = tempAt(src.NoXPos, s.curfn, types.Types[TUINT64])
|
||||
}
|
||||
|
||||
sort.Sort(byStackVar(fn.Dcl))
|
||||
|
||||
// Reassign stack offsets of the locals that are used.
|
||||
lastHasPtr := false
|
||||
for i, n := range fn.Dcl {
|
||||
if n.Op != ONAME || n.Class() != PAUTO {
|
||||
continue
|
||||
}
|
||||
if !n.Name.Used() {
|
||||
fn.Dcl = fn.Dcl[:i]
|
||||
break
|
||||
}
|
||||
|
||||
dowidth(n.Type)
|
||||
w := n.Type.Width
|
||||
if w >= thearch.MAXWIDTH || w < 0 {
|
||||
Fatalf("bad width")
|
||||
}
|
||||
if w == 0 && lastHasPtr {
|
||||
// Pad between a pointer-containing object and a zero-sized object.
|
||||
// This prevents a pointer to the zero-sized object from being interpreted
|
||||
// as a pointer to the pointer-containing object (and causing it
|
||||
// to be scanned when it shouldn't be). See issue 24993.
|
||||
w = 1
|
||||
}
|
||||
s.stksize += w
|
||||
s.stksize = Rnd(s.stksize, int64(n.Type.Align))
|
||||
if n.Type.HasPointers() {
|
||||
s.stkptrsize = s.stksize
|
||||
lastHasPtr = true
|
||||
} else {
|
||||
lastHasPtr = false
|
||||
}
|
||||
if thearch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) {
|
||||
s.stksize = Rnd(s.stksize, int64(Widthptr))
|
||||
}
|
||||
n.Xoffset = -s.stksize
|
||||
}
|
||||
|
||||
s.stksize = Rnd(s.stksize, int64(Widthreg))
|
||||
s.stkptrsize = Rnd(s.stkptrsize, int64(Widthreg))
|
||||
}
|
||||
|
||||
func funccompile(fn *Node) {
|
||||
if Curfn != nil {
|
||||
Fatalf("funccompile %v inside %v", fn.Func.Nname.Sym, Curfn.Func.Nname.Sym)
|
||||
}
|
||||
|
||||
if fn.Type == nil {
|
||||
if nerrors == 0 {
|
||||
Fatalf("funccompile missing type")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// assign parameter offsets
|
||||
dowidth(fn.Type)
|
||||
|
||||
if fn.Nbody.Len() == 0 {
|
||||
// Initialize ABI wrappers if necessary.
|
||||
fn.Func.initLSym(false)
|
||||
emitptrargsmap(fn)
|
||||
return
|
||||
}
|
||||
|
||||
dclcontext = PAUTO
|
||||
Curfn = fn
|
||||
|
||||
compile(fn)
|
||||
|
||||
Curfn = nil
|
||||
dclcontext = PEXTERN
|
||||
}
|
||||
|
||||
func compile(fn *Node) {
|
||||
saveerrors()
|
||||
|
||||
order(fn)
|
||||
if nerrors != 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Set up the function's LSym early to avoid data races with the assemblers.
|
||||
// Do this before walk, as walk needs the LSym to set attributes/relocations
|
||||
// (e.g. in markTypeUsedInInterface).
|
||||
fn.Func.initLSym(true)
|
||||
|
||||
walk(fn)
|
||||
if nerrors != 0 {
|
||||
return
|
||||
}
|
||||
if instrumenting {
|
||||
instrument(fn)
|
||||
}
|
||||
|
||||
// From this point, there should be no uses of Curfn. Enforce that.
|
||||
Curfn = nil
|
||||
|
||||
if fn.funcname() == "_" {
|
||||
// We don't need to generate code for this function, just report errors in its body.
|
||||
// At this point we've generated any errors needed.
|
||||
// (Beyond here we generate only non-spec errors, like "stack frame too large".)
|
||||
// See issue 29870.
|
||||
return
|
||||
}
|
||||
|
||||
// Make sure type syms are declared for all types that might
|
||||
// be types of stack objects. We need to do this here
|
||||
// because symbols must be allocated before the parallel
|
||||
// phase of the compiler.
|
||||
for _, n := range fn.Func.Dcl {
|
||||
switch n.Class() {
|
||||
case PPARAM, PPARAMOUT, PAUTO:
|
||||
if livenessShouldTrack(n) && n.Name.Addrtaken() {
|
||||
dtypesym(n.Type)
|
||||
// Also make sure we allocate a linker symbol
|
||||
// for the stack object data, for the same reason.
|
||||
if fn.Func.lsym.Func().StackObjects == nil {
|
||||
fn.Func.lsym.Func().StackObjects = Ctxt.Lookup(fn.Func.lsym.Name + ".stkobj")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if compilenow(fn) {
|
||||
compileSSA(fn, 0)
|
||||
} else {
|
||||
compilequeue = append(compilequeue, fn)
|
||||
}
|
||||
}
|
||||
|
||||
// compilenow reports whether to compile immediately.
|
||||
// If functions are not compiled immediately,
|
||||
// they are enqueued in compilequeue,
|
||||
// which is drained by compileFunctions.
|
||||
func compilenow(fn *Node) bool {
|
||||
// Issue 38068: if this function is a method AND an inline
|
||||
// candidate AND was not inlined (yet), put it onto the compile
|
||||
// queue instead of compiling it immediately. This is in case we
|
||||
// wind up inlining it into a method wrapper that is generated by
|
||||
// compiling a function later on in the xtop list.
|
||||
if fn.IsMethod() && isInlinableButNotInlined(fn) {
|
||||
return false
|
||||
}
|
||||
return nBackendWorkers == 1 && Debug_compilelater == 0
|
||||
}
|
||||
|
||||
// isInlinableButNotInlined returns true if 'fn' was marked as an
|
||||
// inline candidate but then never inlined (presumably because we
|
||||
// found no call sites).
|
||||
func isInlinableButNotInlined(fn *Node) bool {
|
||||
if fn.Func.Nname.Func.Inl == nil {
|
||||
return false
|
||||
}
|
||||
if fn.Sym == nil {
|
||||
return true
|
||||
}
|
||||
return !fn.Sym.Linksym().WasInlined()
|
||||
}
|
||||
|
||||
const maxStackSize = 1 << 30
|
||||
|
||||
// compileSSA builds an SSA backend function,
|
||||
// uses it to generate a plist,
|
||||
// and flushes that plist to machine code.
|
||||
// worker indicates which of the backend workers is doing the processing.
|
||||
func compileSSA(fn *Node, worker int) {
|
||||
f := buildssa(fn, worker)
|
||||
// Note: check arg size to fix issue 25507.
|
||||
if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type.ArgWidth() >= maxStackSize {
|
||||
largeStackFramesMu.Lock()
|
||||
largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: fn.Type.ArgWidth(), pos: fn.Pos})
|
||||
largeStackFramesMu.Unlock()
|
||||
return
|
||||
}
|
||||
pp := newProgs(fn, worker)
|
||||
defer pp.Free()
|
||||
genssa(f, pp)
|
||||
// Check frame size again.
|
||||
// The check above included only the space needed for local variables.
|
||||
// After genssa, the space needed includes local variables and the callee arg region.
|
||||
// We must do this check prior to calling pp.Flush.
|
||||
// If there are any oversized stack frames,
|
||||
// the assembler may emit inscrutable complaints about invalid instructions.
|
||||
if pp.Text.To.Offset >= maxStackSize {
|
||||
largeStackFramesMu.Lock()
|
||||
locals := f.Frontend().(*ssafn).stksize
|
||||
largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: fn.Type.ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos})
|
||||
largeStackFramesMu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
pp.Flush() // assemble, fill in boilerplate, etc.
|
||||
// fieldtrack must be called after pp.Flush. See issue 20014.
|
||||
fieldtrack(pp.Text.From.Sym, fn.Func.FieldTrack)
|
||||
}
|
||||
|
||||
func init() {
|
||||
if race.Enabled {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
}
|
||||
|
||||
// compileFunctions compiles all functions in compilequeue.
|
||||
// It fans out nBackendWorkers to do the work
|
||||
// and waits for them to complete.
|
||||
func compileFunctions() {
|
||||
if len(compilequeue) != 0 {
|
||||
sizeCalculationDisabled = true // not safe to calculate sizes concurrently
|
||||
if race.Enabled {
|
||||
// Randomize compilation order to try to shake out races.
|
||||
tmp := make([]*Node, len(compilequeue))
|
||||
perm := rand.Perm(len(compilequeue))
|
||||
for i, v := range perm {
|
||||
tmp[v] = compilequeue[i]
|
||||
}
|
||||
copy(compilequeue, tmp)
|
||||
} else {
|
||||
// Compile the longest functions first,
|
||||
// since they're most likely to be the slowest.
|
||||
// This helps avoid stragglers.
|
||||
sort.Slice(compilequeue, func(i, j int) bool {
|
||||
return compilequeue[i].Nbody.Len() > compilequeue[j].Nbody.Len()
|
||||
})
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
Ctxt.InParallel = true
|
||||
c := make(chan *Node, nBackendWorkers)
|
||||
for i := 0; i < nBackendWorkers; i++ {
|
||||
wg.Add(1)
|
||||
go func(worker int) {
|
||||
for fn := range c {
|
||||
compileSSA(fn, worker)
|
||||
}
|
||||
wg.Done()
|
||||
}(i)
|
||||
}
|
||||
for _, fn := range compilequeue {
|
||||
c <- fn
|
||||
}
|
||||
close(c)
|
||||
compilequeue = nil
|
||||
wg.Wait()
|
||||
Ctxt.InParallel = false
|
||||
sizeCalculationDisabled = false
|
||||
}
|
||||
}
|
||||
|
||||
func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) {
|
||||
fn := curfn.(*Node)
|
||||
if fn.Func.Nname != nil {
|
||||
if expect := fn.Func.Nname.Sym.Linksym(); fnsym != expect {
|
||||
Fatalf("unexpected fnsym: %v != %v", fnsym, expect)
|
||||
}
|
||||
}
|
||||
|
||||
var apdecls []*Node
|
||||
// Populate decls for fn.
|
||||
for _, n := range fn.Func.Dcl {
|
||||
if n.Op != ONAME { // might be OTYPE or OLITERAL
|
||||
continue
|
||||
}
|
||||
switch n.Class() {
|
||||
case PAUTO:
|
||||
if !n.Name.Used() {
|
||||
// Text == nil -> generating abstract function
|
||||
if fnsym.Func().Text != nil {
|
||||
Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)")
|
||||
}
|
||||
continue
|
||||
}
|
||||
case PPARAM, PPARAMOUT:
|
||||
default:
|
||||
continue
|
||||
}
|
||||
apdecls = append(apdecls, n)
|
||||
fnsym.Func().RecordAutoType(ngotype(n).Linksym())
|
||||
}
|
||||
|
||||
decls, dwarfVars := createDwarfVars(fnsym, fn.Func, apdecls)
|
||||
|
||||
// For each type referenced by the functions auto vars but not
|
||||
// already referenced by a dwarf var, attach a dummy relocation to
|
||||
// the function symbol to insure that the type included in DWARF
|
||||
// processing during linking.
|
||||
typesyms := []*obj.LSym{}
|
||||
for t, _ := range fnsym.Func().Autot {
|
||||
typesyms = append(typesyms, t)
|
||||
}
|
||||
sort.Sort(obj.BySymName(typesyms))
|
||||
for _, sym := range typesyms {
|
||||
r := obj.Addrel(infosym)
|
||||
r.Sym = sym
|
||||
r.Type = objabi.R_USETYPE
|
||||
}
|
||||
fnsym.Func().Autot = nil
|
||||
|
||||
var varScopes []ScopeID
|
||||
for _, decl := range decls {
|
||||
pos := declPos(decl)
|
||||
varScopes = append(varScopes, findScope(fn.Func.Marks, pos))
|
||||
}
|
||||
|
||||
scopes := assembleScopes(fnsym, fn, dwarfVars, varScopes)
|
||||
var inlcalls dwarf.InlCalls
|
||||
if genDwarfInline > 0 {
|
||||
inlcalls = assembleInlines(fnsym, dwarfVars)
|
||||
}
|
||||
return scopes, inlcalls
|
||||
}
|
||||
|
||||
func declPos(decl *Node) src.XPos {
|
||||
if decl.Name.Defn != nil && (decl.Name.Captured() || decl.Name.Byval()) {
|
||||
// It's not clear which position is correct for captured variables here:
|
||||
// * decl.Pos is the wrong position for captured variables, in the inner
|
||||
// function, but it is the right position in the outer function.
|
||||
// * decl.Name.Defn is nil for captured variables that were arguments
|
||||
// on the outer function, however the decl.Pos for those seems to be
|
||||
// correct.
|
||||
// * decl.Name.Defn is the "wrong" thing for variables declared in the
|
||||
// header of a type switch, it's their position in the header, rather
|
||||
// than the position of the case statement. In principle this is the
|
||||
// right thing, but here we prefer the latter because it makes each
|
||||
// instance of the header variable local to the lexical block of its
|
||||
// case statement.
|
||||
// This code is probably wrong for type switch variables that are also
|
||||
// captured.
|
||||
return decl.Name.Defn.Pos
|
||||
}
|
||||
return decl.Pos
|
||||
}
|
||||
|
||||
// createSimpleVars creates a DWARF entry for every variable declared in the
|
||||
// function, claiming that they are permanently on the stack.
|
||||
func createSimpleVars(fnsym *obj.LSym, apDecls []*Node) ([]*Node, []*dwarf.Var, map[*Node]bool) {
|
||||
var vars []*dwarf.Var
|
||||
var decls []*Node
|
||||
selected := make(map[*Node]bool)
|
||||
for _, n := range apDecls {
|
||||
if n.IsAutoTmp() {
|
||||
continue
|
||||
}
|
||||
|
||||
decls = append(decls, n)
|
||||
vars = append(vars, createSimpleVar(fnsym, n))
|
||||
selected[n] = true
|
||||
}
|
||||
return decls, vars, selected
|
||||
}
|
||||
|
||||
func createSimpleVar(fnsym *obj.LSym, n *Node) *dwarf.Var {
|
||||
var abbrev int
|
||||
offs := n.Xoffset
|
||||
|
||||
switch n.Class() {
|
||||
case PAUTO:
|
||||
abbrev = dwarf.DW_ABRV_AUTO
|
||||
if Ctxt.FixedFrameSize() == 0 {
|
||||
offs -= int64(Widthptr)
|
||||
}
|
||||
if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
|
||||
// There is a word space for FP on ARM64 even if the frame pointer is disabled
|
||||
offs -= int64(Widthptr)
|
||||
}
|
||||
|
||||
case PPARAM, PPARAMOUT:
|
||||
abbrev = dwarf.DW_ABRV_PARAM
|
||||
offs += Ctxt.FixedFrameSize()
|
||||
default:
|
||||
Fatalf("createSimpleVar unexpected class %v for node %v", n.Class(), n)
|
||||
}
|
||||
|
||||
typename := dwarf.InfoPrefix + typesymname(n.Type)
|
||||
delete(fnsym.Func().Autot, ngotype(n).Linksym())
|
||||
inlIndex := 0
|
||||
if genDwarfInline > 1 {
|
||||
if n.Name.InlFormal() || n.Name.InlLocal() {
|
||||
inlIndex = posInlIndex(n.Pos) + 1
|
||||
if n.Name.InlFormal() {
|
||||
abbrev = dwarf.DW_ABRV_PARAM
|
||||
}
|
||||
}
|
||||
}
|
||||
declpos := Ctxt.InnermostPos(declPos(n))
|
||||
return &dwarf.Var{
|
||||
Name: n.Sym.Name,
|
||||
IsReturnValue: n.Class() == PPARAMOUT,
|
||||
IsInlFormal: n.Name.InlFormal(),
|
||||
Abbrev: abbrev,
|
||||
StackOffset: int32(offs),
|
||||
Type: Ctxt.Lookup(typename),
|
||||
DeclFile: declpos.RelFilename(),
|
||||
DeclLine: declpos.RelLine(),
|
||||
DeclCol: declpos.Col(),
|
||||
InlIndex: int32(inlIndex),
|
||||
ChildIndex: -1,
|
||||
}
|
||||
}
|
||||
|
||||
// createComplexVars creates recomposed DWARF vars with location lists,
|
||||
// suitable for describing optimized code.
|
||||
func createComplexVars(fnsym *obj.LSym, fn *Func) ([]*Node, []*dwarf.Var, map[*Node]bool) {
|
||||
debugInfo := fn.DebugInfo
|
||||
|
||||
// Produce a DWARF variable entry for each user variable.
|
||||
var decls []*Node
|
||||
var vars []*dwarf.Var
|
||||
ssaVars := make(map[*Node]bool)
|
||||
|
||||
for varID, dvar := range debugInfo.Vars {
|
||||
n := dvar.(*Node)
|
||||
ssaVars[n] = true
|
||||
for _, slot := range debugInfo.VarSlots[varID] {
|
||||
ssaVars[debugInfo.Slots[slot].N.(*Node)] = true
|
||||
}
|
||||
|
||||
if dvar := createComplexVar(fnsym, fn, ssa.VarID(varID)); dvar != nil {
|
||||
decls = append(decls, n)
|
||||
vars = append(vars, dvar)
|
||||
}
|
||||
}
|
||||
|
||||
return decls, vars, ssaVars
|
||||
}
|
||||
|
||||
// createDwarfVars process fn, returning a list of DWARF variables and the
|
||||
// Nodes they represent.
|
||||
func createDwarfVars(fnsym *obj.LSym, fn *Func, apDecls []*Node) ([]*Node, []*dwarf.Var) {
|
||||
// Collect a raw list of DWARF vars.
|
||||
var vars []*dwarf.Var
|
||||
var decls []*Node
|
||||
var selected map[*Node]bool
|
||||
if Ctxt.Flag_locationlists && Ctxt.Flag_optimize && fn.DebugInfo != nil {
|
||||
decls, vars, selected = createComplexVars(fnsym, fn)
|
||||
} else {
|
||||
decls, vars, selected = createSimpleVars(fnsym, apDecls)
|
||||
}
|
||||
|
||||
dcl := apDecls
|
||||
if fnsym.WasInlined() {
|
||||
dcl = preInliningDcls(fnsym)
|
||||
}
|
||||
|
||||
// If optimization is enabled, the list above will typically be
|
||||
// missing some of the original pre-optimization variables in the
|
||||
// function (they may have been promoted to registers, folded into
|
||||
// constants, dead-coded away, etc). Input arguments not eligible
|
||||
// for SSA optimization are also missing. Here we add back in entries
|
||||
// for selected missing vars. Note that the recipe below creates a
|
||||
// conservative location. The idea here is that we want to
|
||||
// communicate to the user that "yes, there is a variable named X
|
||||
// in this function, but no, I don't have enough information to
|
||||
// reliably report its contents."
|
||||
// For non-SSA-able arguments, however, the correct information
|
||||
// is known -- they have a single home on the stack.
|
||||
for _, n := range dcl {
|
||||
if _, found := selected[n]; found {
|
||||
continue
|
||||
}
|
||||
c := n.Sym.Name[0]
|
||||
if c == '.' || n.Type.IsUntyped() {
|
||||
continue
|
||||
}
|
||||
if n.Class() == PPARAM && !canSSAType(n.Type) {
|
||||
// SSA-able args get location lists, and may move in and
|
||||
// out of registers, so those are handled elsewhere.
|
||||
// Autos and named output params seem to get handled
|
||||
// with VARDEF, which creates location lists.
|
||||
// Args not of SSA-able type are treated here; they
|
||||
// are homed on the stack in a single place for the
|
||||
// entire call.
|
||||
vars = append(vars, createSimpleVar(fnsym, n))
|
||||
decls = append(decls, n)
|
||||
continue
|
||||
}
|
||||
typename := dwarf.InfoPrefix + typesymname(n.Type)
|
||||
decls = append(decls, n)
|
||||
abbrev := dwarf.DW_ABRV_AUTO_LOCLIST
|
||||
isReturnValue := (n.Class() == PPARAMOUT)
|
||||
if n.Class() == PPARAM || n.Class() == PPARAMOUT {
|
||||
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
|
||||
} else if n.Class() == PAUTOHEAP {
|
||||
// If dcl in question has been promoted to heap, do a bit
|
||||
// of extra work to recover original class (auto or param);
|
||||
// see issue 30908. This insures that we get the proper
|
||||
// signature in the abstract function DIE, but leaves a
|
||||
// misleading location for the param (we want pointer-to-heap
|
||||
// and not stack).
|
||||
// TODO(thanm): generate a better location expression
|
||||
stackcopy := n.Name.Param.Stackcopy
|
||||
if stackcopy != nil && (stackcopy.Class() == PPARAM || stackcopy.Class() == PPARAMOUT) {
|
||||
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
|
||||
isReturnValue = (stackcopy.Class() == PPARAMOUT)
|
||||
}
|
||||
}
|
||||
inlIndex := 0
|
||||
if genDwarfInline > 1 {
|
||||
if n.Name.InlFormal() || n.Name.InlLocal() {
|
||||
inlIndex = posInlIndex(n.Pos) + 1
|
||||
if n.Name.InlFormal() {
|
||||
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
|
||||
}
|
||||
}
|
||||
}
|
||||
declpos := Ctxt.InnermostPos(n.Pos)
|
||||
vars = append(vars, &dwarf.Var{
|
||||
Name: n.Sym.Name,
|
||||
IsReturnValue: isReturnValue,
|
||||
Abbrev: abbrev,
|
||||
StackOffset: int32(n.Xoffset),
|
||||
Type: Ctxt.Lookup(typename),
|
||||
DeclFile: declpos.RelFilename(),
|
||||
DeclLine: declpos.RelLine(),
|
||||
DeclCol: declpos.Col(),
|
||||
InlIndex: int32(inlIndex),
|
||||
ChildIndex: -1,
|
||||
})
|
||||
// Record go type of to insure that it gets emitted by the linker.
|
||||
fnsym.Func().RecordAutoType(ngotype(n).Linksym())
|
||||
}
|
||||
|
||||
return decls, vars
|
||||
}
|
||||
|
||||
// Given a function that was inlined at some point during the
|
||||
// compilation, return a sorted list of nodes corresponding to the
|
||||
// autos/locals in that function prior to inlining. If this is a
|
||||
// function that is not local to the package being compiled, then the
|
||||
// names of the variables may have been "versioned" to avoid conflicts
|
||||
// with local vars; disregard this versioning when sorting.
|
||||
func preInliningDcls(fnsym *obj.LSym) []*Node {
|
||||
fn := Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*Node)
|
||||
var rdcl []*Node
|
||||
for _, n := range fn.Func.Inl.Dcl {
|
||||
c := n.Sym.Name[0]
|
||||
// Avoid reporting "_" parameters, since if there are more than
|
||||
// one, it can result in a collision later on, as in #23179.
|
||||
if unversion(n.Sym.Name) == "_" || c == '.' || n.Type.IsUntyped() {
|
||||
continue
|
||||
}
|
||||
rdcl = append(rdcl, n)
|
||||
}
|
||||
return rdcl
|
||||
}
|
||||
|
||||
// stackOffset returns the stack location of a LocalSlot relative to the
|
||||
// stack pointer, suitable for use in a DWARF location entry. This has nothing
|
||||
// to do with its offset in the user variable.
|
||||
func stackOffset(slot ssa.LocalSlot) int32 {
|
||||
n := slot.N.(*Node)
|
||||
var base int64
|
||||
switch n.Class() {
|
||||
case PAUTO:
|
||||
if Ctxt.FixedFrameSize() == 0 {
|
||||
base -= int64(Widthptr)
|
||||
}
|
||||
if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
|
||||
// There is a word space for FP on ARM64 even if the frame pointer is disabled
|
||||
base -= int64(Widthptr)
|
||||
}
|
||||
case PPARAM, PPARAMOUT:
|
||||
base += Ctxt.FixedFrameSize()
|
||||
}
|
||||
return int32(base + n.Xoffset + slot.Off)
|
||||
}
|
||||
|
||||
// createComplexVar builds a single DWARF variable entry and location list.
|
||||
func createComplexVar(fnsym *obj.LSym, fn *Func, varID ssa.VarID) *dwarf.Var {
|
||||
debug := fn.DebugInfo
|
||||
n := debug.Vars[varID].(*Node)
|
||||
|
||||
var abbrev int
|
||||
switch n.Class() {
|
||||
case PAUTO:
|
||||
abbrev = dwarf.DW_ABRV_AUTO_LOCLIST
|
||||
case PPARAM, PPARAMOUT:
|
||||
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
gotype := ngotype(n).Linksym()
|
||||
delete(fnsym.Func().Autot, gotype)
|
||||
typename := dwarf.InfoPrefix + gotype.Name[len("type."):]
|
||||
inlIndex := 0
|
||||
if genDwarfInline > 1 {
|
||||
if n.Name.InlFormal() || n.Name.InlLocal() {
|
||||
inlIndex = posInlIndex(n.Pos) + 1
|
||||
if n.Name.InlFormal() {
|
||||
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
|
||||
}
|
||||
}
|
||||
}
|
||||
declpos := Ctxt.InnermostPos(n.Pos)
|
||||
dvar := &dwarf.Var{
|
||||
Name: n.Sym.Name,
|
||||
IsReturnValue: n.Class() == PPARAMOUT,
|
||||
IsInlFormal: n.Name.InlFormal(),
|
||||
Abbrev: abbrev,
|
||||
Type: Ctxt.Lookup(typename),
|
||||
// The stack offset is used as a sorting key, so for decomposed
|
||||
// variables just give it the first one. It's not used otherwise.
|
||||
// This won't work well if the first slot hasn't been assigned a stack
|
||||
// location, but it's not obvious how to do better.
|
||||
StackOffset: stackOffset(debug.Slots[debug.VarSlots[varID][0]]),
|
||||
DeclFile: declpos.RelFilename(),
|
||||
DeclLine: declpos.RelLine(),
|
||||
DeclCol: declpos.Col(),
|
||||
InlIndex: int32(inlIndex),
|
||||
ChildIndex: -1,
|
||||
}
|
||||
list := debug.LocationLists[varID]
|
||||
if len(list) != 0 {
|
||||
dvar.PutLocationList = func(listSym, startPC dwarf.Sym) {
|
||||
debug.PutLocationList(list, Ctxt, listSym.(*obj.LSym), startPC.(*obj.LSym))
|
||||
}
|
||||
}
|
||||
return dvar
|
||||
}
|
||||
|
||||
// fieldtrack adds R_USEFIELD relocations to fnsym to record any
|
||||
// struct fields that it used.
|
||||
func fieldtrack(fnsym *obj.LSym, tracked map[*types.Sym]struct{}) {
|
||||
if fnsym == nil {
|
||||
return
|
||||
}
|
||||
if objabi.Fieldtrack_enabled == 0 || len(tracked) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
trackSyms := make([]*types.Sym, 0, len(tracked))
|
||||
for sym := range tracked {
|
||||
trackSyms = append(trackSyms, sym)
|
||||
}
|
||||
sort.Sort(symByName(trackSyms))
|
||||
for _, sym := range trackSyms {
|
||||
r := obj.Addrel(fnsym)
|
||||
r.Sym = sym.Linksym()
|
||||
r.Type = objabi.R_USEFIELD
|
||||
}
|
||||
}
|
||||
|
||||
type symByName []*types.Sym
|
||||
|
||||
func (a symByName) Len() int { return len(a) }
|
||||
func (a symByName) Less(i, j int) bool { return a[i].Name < a[j].Name }
|
||||
func (a symByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
196
src/cmd/compile/internal/gc/pgen_test.go
Normal file
196
src/cmd/compile/internal/gc/pgen_test.go
Normal file
@@ -0,0 +1,196 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gc
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/types"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func typeWithoutPointers() *types.Type {
|
||||
t := types.New(TSTRUCT)
|
||||
f := &types.Field{Type: types.New(TINT)}
|
||||
t.SetFields([]*types.Field{f})
|
||||
return t
|
||||
}
|
||||
|
||||
func typeWithPointers() *types.Type {
|
||||
t := types.New(TSTRUCT)
|
||||
f := &types.Field{Type: types.NewPtr(types.New(TINT))}
|
||||
t.SetFields([]*types.Field{f})
|
||||
return t
|
||||
}
|
||||
|
||||
func markUsed(n *Node) *Node {
|
||||
n.Name.SetUsed(true)
|
||||
return n
|
||||
}
|
||||
|
||||
func markNeedZero(n *Node) *Node {
|
||||
n.Name.SetNeedzero(true)
|
||||
return n
|
||||
}
|
||||
|
||||
func nodeWithClass(n Node, c Class) *Node {
|
||||
n.SetClass(c)
|
||||
n.Name = new(Name)
|
||||
return &n
|
||||
}
|
||||
|
||||
// Test all code paths for cmpstackvarlt.
|
||||
func TestCmpstackvar(t *testing.T) {
|
||||
testdata := []struct {
|
||||
a, b *Node
|
||||
lt bool
|
||||
}{
|
||||
{
|
||||
nodeWithClass(Node{}, PAUTO),
|
||||
nodeWithClass(Node{}, PFUNC),
|
||||
false,
|
||||
},
|
||||
{
|
||||
nodeWithClass(Node{}, PFUNC),
|
||||
nodeWithClass(Node{}, PAUTO),
|
||||
true,
|
||||
},
|
||||
{
|
||||
nodeWithClass(Node{Xoffset: 0}, PFUNC),
|
||||
nodeWithClass(Node{Xoffset: 10}, PFUNC),
|
||||
true,
|
||||
},
|
||||
{
|
||||
nodeWithClass(Node{Xoffset: 20}, PFUNC),
|
||||
nodeWithClass(Node{Xoffset: 10}, PFUNC),
|
||||
false,
|
||||
},
|
||||
{
|
||||
nodeWithClass(Node{Xoffset: 10}, PFUNC),
|
||||
nodeWithClass(Node{Xoffset: 10}, PFUNC),
|
||||
false,
|
||||
},
|
||||
{
|
||||
nodeWithClass(Node{Xoffset: 10}, PPARAM),
|
||||
nodeWithClass(Node{Xoffset: 20}, PPARAMOUT),
|
||||
true,
|
||||
},
|
||||
{
|
||||
nodeWithClass(Node{Xoffset: 10}, PPARAMOUT),
|
||||
nodeWithClass(Node{Xoffset: 20}, PPARAM),
|
||||
true,
|
||||
},
|
||||
{
|
||||
markUsed(nodeWithClass(Node{}, PAUTO)),
|
||||
nodeWithClass(Node{}, PAUTO),
|
||||
true,
|
||||
},
|
||||
{
|
||||
nodeWithClass(Node{}, PAUTO),
|
||||
markUsed(nodeWithClass(Node{}, PAUTO)),
|
||||
false,
|
||||
},
|
||||
{
|
||||
nodeWithClass(Node{Type: typeWithoutPointers()}, PAUTO),
|
||||
nodeWithClass(Node{Type: typeWithPointers()}, PAUTO),
|
||||
false,
|
||||
},
|
||||
{
|
||||
nodeWithClass(Node{Type: typeWithPointers()}, PAUTO),
|
||||
nodeWithClass(Node{Type: typeWithoutPointers()}, PAUTO),
|
||||
true,
|
||||
},
|
||||
{
|
||||
markNeedZero(nodeWithClass(Node{Type: &types.Type{}}, PAUTO)),
|
||||
nodeWithClass(Node{Type: &types.Type{}, Name: &Name{}}, PAUTO),
|
||||
true,
|
||||
},
|
||||
{
|
||||
nodeWithClass(Node{Type: &types.Type{}, Name: &Name{}}, PAUTO),
|
||||
markNeedZero(nodeWithClass(Node{Type: &types.Type{}}, PAUTO)),
|
||||
false,
|
||||
},
|
||||
{
|
||||
nodeWithClass(Node{Type: &types.Type{Width: 1}, Name: &Name{}}, PAUTO),
|
||||
nodeWithClass(Node{Type: &types.Type{Width: 2}, Name: &Name{}}, PAUTO),
|
||||
false,
|
||||
},
|
||||
{
|
||||
nodeWithClass(Node{Type: &types.Type{Width: 2}, Name: &Name{}}, PAUTO),
|
||||
nodeWithClass(Node{Type: &types.Type{Width: 1}, Name: &Name{}}, PAUTO),
|
||||
true,
|
||||
},
|
||||
{
|
||||
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
|
||||
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO),
|
||||
true,
|
||||
},
|
||||
{
|
||||
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
|
||||
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
|
||||
false,
|
||||
},
|
||||
{
|
||||
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO),
|
||||
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
|
||||
false,
|
||||
},
|
||||
}
|
||||
for _, d := range testdata {
|
||||
got := cmpstackvarlt(d.a, d.b)
|
||||
if got != d.lt {
|
||||
t.Errorf("want %#v < %#v", d.a, d.b)
|
||||
}
|
||||
// If we expect a < b to be true, check that b < a is false.
|
||||
if d.lt && cmpstackvarlt(d.b, d.a) {
|
||||
t.Errorf("unexpected %#v < %#v", d.b, d.a)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStackvarSort(t *testing.T) {
|
||||
inp := []*Node{
|
||||
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
|
||||
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO),
|
||||
nodeWithClass(Node{Xoffset: 0, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
|
||||
nodeWithClass(Node{Xoffset: 10, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
|
||||
nodeWithClass(Node{Xoffset: 20, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
|
||||
markUsed(nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO)),
|
||||
nodeWithClass(Node{Type: typeWithoutPointers(), Sym: &types.Sym{}}, PAUTO),
|
||||
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO),
|
||||
markNeedZero(nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO)),
|
||||
nodeWithClass(Node{Type: &types.Type{Width: 1}, Sym: &types.Sym{}}, PAUTO),
|
||||
nodeWithClass(Node{Type: &types.Type{Width: 2}, Sym: &types.Sym{}}, PAUTO),
|
||||
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
|
||||
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO),
|
||||
}
|
||||
want := []*Node{
|
||||
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
|
||||
nodeWithClass(Node{Xoffset: 0, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
|
||||
nodeWithClass(Node{Xoffset: 10, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
|
||||
nodeWithClass(Node{Xoffset: 20, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
|
||||
markUsed(nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO)),
|
||||
markNeedZero(nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO)),
|
||||
nodeWithClass(Node{Type: &types.Type{Width: 2}, Sym: &types.Sym{}}, PAUTO),
|
||||
nodeWithClass(Node{Type: &types.Type{Width: 1}, Sym: &types.Sym{}}, PAUTO),
|
||||
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO),
|
||||
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO),
|
||||
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
|
||||
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO),
|
||||
nodeWithClass(Node{Type: typeWithoutPointers(), Sym: &types.Sym{}}, PAUTO),
|
||||
}
|
||||
sort.Sort(byStackVar(inp))
|
||||
if !reflect.DeepEqual(want, inp) {
|
||||
t.Error("sort failed")
|
||||
for i := range inp {
|
||||
g := inp[i]
|
||||
w := want[i]
|
||||
eq := reflect.DeepEqual(w, g)
|
||||
if !eq {
|
||||
t.Log(i, w, g)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2,16 +2,14 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssagen
|
||||
package gc
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"fmt"
|
||||
|
||||
"cmd/compile/internal/ir"
|
||||
"cmd/compile/internal/ssa"
|
||||
"cmd/compile/internal/types"
|
||||
"cmd/internal/src"
|
||||
"container/heap"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// This file contains the algorithm to place phi nodes in a function.
|
||||
@@ -24,14 +22,6 @@ const smallBlocks = 500
|
||||
|
||||
const debugPhi = false
|
||||
|
||||
// fwdRefAux wraps an arbitrary ir.Node as an ssa.Aux for use with OpFwdref.
|
||||
type fwdRefAux struct {
|
||||
_ [0]func() // ensure ir.Node isn't compared for equality
|
||||
N ir.Node
|
||||
}
|
||||
|
||||
func (fwdRefAux) CanBeAnSSAAux() {}
|
||||
|
||||
// insertPhis finds all the places in the function where a phi is
|
||||
// necessary and inserts them.
|
||||
// Uses FwdRef ops to find all uses of variables, and s.defvars to find
|
||||
@@ -50,11 +40,11 @@ func (s *state) insertPhis() {
|
||||
}
|
||||
|
||||
type phiState struct {
|
||||
s *state // SSA state
|
||||
f *ssa.Func // function to work on
|
||||
defvars []map[ir.Node]*ssa.Value // defined variables at end of each block
|
||||
s *state // SSA state
|
||||
f *ssa.Func // function to work on
|
||||
defvars []map[*Node]*ssa.Value // defined variables at end of each block
|
||||
|
||||
varnum map[ir.Node]int32 // variable numbering
|
||||
varnum map[*Node]int32 // variable numbering
|
||||
|
||||
// properties of the dominator tree
|
||||
idom []*ssa.Block // dominator parents
|
||||
@@ -69,7 +59,7 @@ type phiState struct {
|
||||
hasDef *sparseSet // has a write of the variable we're processing
|
||||
|
||||
// miscellaneous
|
||||
placeholder *ssa.Value // value to use as a "not set yet" placeholder.
|
||||
placeholder *ssa.Value // dummy value to use as a "not set yet" placeholder.
|
||||
}
|
||||
|
||||
func (s *phiState) insertPhis() {
|
||||
@@ -80,15 +70,15 @@ func (s *phiState) insertPhis() {
|
||||
// Find all the variables for which we need to match up reads & writes.
|
||||
// This step prunes any basic-block-only variables from consideration.
|
||||
// Generate a numbering for these variables.
|
||||
s.varnum = map[ir.Node]int32{}
|
||||
var vars []ir.Node
|
||||
s.varnum = map[*Node]int32{}
|
||||
var vars []*Node
|
||||
var vartypes []*types.Type
|
||||
for _, b := range s.f.Blocks {
|
||||
for _, v := range b.Values {
|
||||
if v.Op != ssa.OpFwdRef {
|
||||
continue
|
||||
}
|
||||
var_ := v.Aux.(fwdRefAux).N
|
||||
var_ := v.Aux.(*Node)
|
||||
|
||||
// Optimization: look back 1 block for the definition.
|
||||
if len(b.Preds) == 1 {
|
||||
@@ -189,16 +179,11 @@ levels:
|
||||
if v.Op == ssa.OpPhi {
|
||||
v.AuxInt = 0
|
||||
}
|
||||
// Any remaining FwdRefs are dead code.
|
||||
if v.Op == ssa.OpFwdRef {
|
||||
v.Op = ssa.OpUnknown
|
||||
v.Aux = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *phiState) insertVarPhis(n int, var_ ir.Node, defs []*ssa.Block, typ *types.Type) {
|
||||
func (s *phiState) insertVarPhis(n int, var_ *Node, defs []*ssa.Block, typ *types.Type) {
|
||||
priq := &s.priq
|
||||
q := s.q
|
||||
queued := s.queued
|
||||
@@ -255,9 +240,7 @@ func (s *phiState) insertVarPhis(n int, var_ ir.Node, defs []*ssa.Block, typ *ty
|
||||
hasPhi.add(c.ID)
|
||||
v := c.NewValue0I(currentRoot.Pos, ssa.OpPhi, typ, int64(n)) // TODO: line number right?
|
||||
// Note: we store the variable number in the phi's AuxInt field. Used temporarily by phi building.
|
||||
if var_.Op() == ir.ONAME {
|
||||
s.s.addNamedValue(var_.(*ir.Name), v)
|
||||
}
|
||||
s.s.addNamedValue(var_, v)
|
||||
for range c.Preds {
|
||||
v.AddArg(s.placeholder) // Actual args will be filled in by resolveFwdRefs.
|
||||
}
|
||||
@@ -335,7 +318,7 @@ func (s *phiState) resolveFwdRefs() {
|
||||
if v.Op != ssa.OpFwdRef {
|
||||
continue
|
||||
}
|
||||
n := s.varnum[v.Aux.(fwdRefAux).N]
|
||||
n := s.varnum[v.Aux.(*Node)]
|
||||
v.Op = ssa.OpCopy
|
||||
v.Aux = nil
|
||||
v.AddArg(values[n])
|
||||
@@ -449,11 +432,11 @@ func (s *sparseSet) clear() {
|
||||
|
||||
// Variant to use for small functions.
|
||||
type simplePhiState struct {
|
||||
s *state // SSA state
|
||||
f *ssa.Func // function to work on
|
||||
fwdrefs []*ssa.Value // list of FwdRefs to be processed
|
||||
defvars []map[ir.Node]*ssa.Value // defined variables at end of each block
|
||||
reachable []bool // which blocks are reachable
|
||||
s *state // SSA state
|
||||
f *ssa.Func // function to work on
|
||||
fwdrefs []*ssa.Value // list of FwdRefs to be processed
|
||||
defvars []map[*Node]*ssa.Value // defined variables at end of each block
|
||||
reachable []bool // which blocks are reachable
|
||||
}
|
||||
|
||||
func (s *simplePhiState) insertPhis() {
|
||||
@@ -466,7 +449,7 @@ func (s *simplePhiState) insertPhis() {
|
||||
continue
|
||||
}
|
||||
s.fwdrefs = append(s.fwdrefs, v)
|
||||
var_ := v.Aux.(fwdRefAux).N
|
||||
var_ := v.Aux.(*Node)
|
||||
if _, ok := s.defvars[b.ID][var_]; !ok {
|
||||
s.defvars[b.ID][var_] = v // treat FwdDefs as definitions.
|
||||
}
|
||||
@@ -480,7 +463,7 @@ loop:
|
||||
v := s.fwdrefs[len(s.fwdrefs)-1]
|
||||
s.fwdrefs = s.fwdrefs[:len(s.fwdrefs)-1]
|
||||
b := v.Block
|
||||
var_ := v.Aux.(fwdRefAux).N
|
||||
var_ := v.Aux.(*Node)
|
||||
if b == s.f.Entry {
|
||||
// No variable should be live at entry.
|
||||
s.s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, var_, v)
|
||||
@@ -528,7 +511,7 @@ loop:
|
||||
}
|
||||
|
||||
// lookupVarOutgoing finds the variable's value at the end of block b.
|
||||
func (s *simplePhiState) lookupVarOutgoing(b *ssa.Block, t *types.Type, var_ ir.Node, line src.XPos) *ssa.Value {
|
||||
func (s *simplePhiState) lookupVarOutgoing(b *ssa.Block, t *types.Type, var_ *Node, line src.XPos) *ssa.Value {
|
||||
for {
|
||||
if v := s.defvars[b.ID][var_]; v != nil {
|
||||
return v
|
||||
@@ -547,11 +530,9 @@ func (s *simplePhiState) lookupVarOutgoing(b *ssa.Block, t *types.Type, var_ ir.
|
||||
}
|
||||
}
|
||||
// Generate a FwdRef for the variable and return that.
|
||||
v := b.NewValue0A(line, ssa.OpFwdRef, t, fwdRefAux{N: var_})
|
||||
v := b.NewValue0A(line, ssa.OpFwdRef, t, var_)
|
||||
s.defvars[b.ID][var_] = v
|
||||
if var_.Op() == ir.ONAME {
|
||||
s.s.addNamedValue(var_.(*ir.Name), v)
|
||||
}
|
||||
s.s.addNamedValue(var_, v)
|
||||
s.fwdrefs = append(s.fwdrefs, v)
|
||||
return v
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,43 +1,15 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package base
|
||||
package gc
|
||||
|
||||
import (
|
||||
"os"
|
||||
"cmd/compile/internal/types"
|
||||
"cmd/internal/src"
|
||||
"cmd/internal/sys"
|
||||
)
|
||||
|
||||
var atExitFuncs []func()
|
||||
|
||||
func AtExit(f func()) {
|
||||
atExitFuncs = append(atExitFuncs, f)
|
||||
}
|
||||
|
||||
func Exit(code int) {
|
||||
for i := len(atExitFuncs) - 1; i >= 0; i-- {
|
||||
f := atExitFuncs[i]
|
||||
atExitFuncs = atExitFuncs[:i]
|
||||
f()
|
||||
}
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
// To enable tracing support (-t flag), set EnableTrace to true.
|
||||
const EnableTrace = false
|
||||
|
||||
func Compiling(pkgs []string) bool {
|
||||
if Ctxt.Pkgpath != "" {
|
||||
for _, p := range pkgs {
|
||||
if Ctxt.Pkgpath == p {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// The racewalk pass is currently handled in three parts.
|
||||
//
|
||||
// First, for flag_race, it inserts calls to racefuncenter and
|
||||
@@ -60,7 +32,7 @@ func Compiling(pkgs []string) bool {
|
||||
|
||||
// Do not instrument the following packages at all,
|
||||
// at best instrumentation would cause infinite recursion.
|
||||
var NoInstrumentPkgs = []string{
|
||||
var omit_pkgs = []string{
|
||||
"runtime/internal/atomic",
|
||||
"runtime/internal/sys",
|
||||
"runtime/internal/math",
|
||||
@@ -72,4 +44,50 @@ var NoInstrumentPkgs = []string{
|
||||
|
||||
// Don't insert racefuncenterfp/racefuncexit into the following packages.
|
||||
// Memory accesses in the packages are either uninteresting or will cause false positives.
|
||||
var NoRacePkgs = []string{"sync", "sync/atomic"}
|
||||
var norace_inst_pkgs = []string{"sync", "sync/atomic"}
|
||||
|
||||
func ispkgin(pkgs []string) bool {
|
||||
if myimportpath != "" {
|
||||
for _, p := range pkgs {
|
||||
if myimportpath == p {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func instrument(fn *Node) {
|
||||
if fn.Func.Pragma&Norace != 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if !flag_race || !ispkgin(norace_inst_pkgs) {
|
||||
fn.Func.SetInstrumentBody(true)
|
||||
}
|
||||
|
||||
if flag_race {
|
||||
lno := lineno
|
||||
lineno = src.NoXPos
|
||||
|
||||
if thearch.LinkArch.Arch.Family != sys.AMD64 {
|
||||
fn.Func.Enter.Prepend(mkcall("racefuncenterfp", nil, nil))
|
||||
fn.Func.Exit.Append(mkcall("racefuncexit", nil, nil))
|
||||
} else {
|
||||
|
||||
// nodpc is the PC of the caller as extracted by
|
||||
// getcallerpc. We use -widthptr(FP) for x86.
|
||||
// This only works for amd64. This will not
|
||||
// work on arm or others that might support
|
||||
// race in the future.
|
||||
nodpc := nodfp.copy()
|
||||
nodpc.Type = types.Types[TUINTPTR]
|
||||
nodpc.Xoffset = int64(-Widthptr)
|
||||
fn.Func.Dcl = append(fn.Func.Dcl, nodpc)
|
||||
fn.Func.Enter.Prepend(mkcall("racefuncenter", nil, nil, nodpc))
|
||||
fn.Func.Exit.Append(mkcall("racefuncexit", nil, nil))
|
||||
}
|
||||
lineno = lno
|
||||
}
|
||||
}
|
||||
628
src/cmd/compile/internal/gc/range.go
Normal file
628
src/cmd/compile/internal/gc/range.go
Normal file
@@ -0,0 +1,628 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gc
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/types"
|
||||
"cmd/internal/sys"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// range
|
||||
func typecheckrange(n *Node) {
|
||||
// Typechecking order is important here:
|
||||
// 0. first typecheck range expression (slice/map/chan),
|
||||
// it is evaluated only once and so logically it is not part of the loop.
|
||||
// 1. typecheck produced values,
|
||||
// this part can declare new vars and so it must be typechecked before body,
|
||||
// because body can contain a closure that captures the vars.
|
||||
// 2. decldepth++ to denote loop body.
|
||||
// 3. typecheck body.
|
||||
// 4. decldepth--.
|
||||
typecheckrangeExpr(n)
|
||||
|
||||
// second half of dance, the first half being typecheckrangeExpr
|
||||
n.SetTypecheck(1)
|
||||
ls := n.List.Slice()
|
||||
for i1, n1 := range ls {
|
||||
if n1.Typecheck() == 0 {
|
||||
ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign)
|
||||
}
|
||||
}
|
||||
|
||||
decldepth++
|
||||
typecheckslice(n.Nbody.Slice(), ctxStmt)
|
||||
decldepth--
|
||||
}
|
||||
|
||||
func typecheckrangeExpr(n *Node) {
|
||||
n.Right = typecheck(n.Right, ctxExpr)
|
||||
|
||||
t := n.Right.Type
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
// delicate little dance. see typecheckas2
|
||||
ls := n.List.Slice()
|
||||
for i1, n1 := range ls {
|
||||
if n1.Name == nil || n1.Name.Defn != n {
|
||||
ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign)
|
||||
}
|
||||
}
|
||||
|
||||
if t.IsPtr() && t.Elem().IsArray() {
|
||||
t = t.Elem()
|
||||
}
|
||||
n.Type = t
|
||||
|
||||
var t1, t2 *types.Type
|
||||
toomany := false
|
||||
switch t.Etype {
|
||||
default:
|
||||
yyerrorl(n.Pos, "cannot range over %L", n.Right)
|
||||
return
|
||||
|
||||
case TARRAY, TSLICE:
|
||||
t1 = types.Types[TINT]
|
||||
t2 = t.Elem()
|
||||
|
||||
case TMAP:
|
||||
t1 = t.Key()
|
||||
t2 = t.Elem()
|
||||
|
||||
case TCHAN:
|
||||
if !t.ChanDir().CanRecv() {
|
||||
yyerrorl(n.Pos, "invalid operation: range %v (receive from send-only type %v)", n.Right, n.Right.Type)
|
||||
return
|
||||
}
|
||||
|
||||
t1 = t.Elem()
|
||||
t2 = nil
|
||||
if n.List.Len() == 2 {
|
||||
toomany = true
|
||||
}
|
||||
|
||||
case TSTRING:
|
||||
t1 = types.Types[TINT]
|
||||
t2 = types.Runetype
|
||||
}
|
||||
|
||||
if n.List.Len() > 2 || toomany {
|
||||
yyerrorl(n.Pos, "too many variables in range")
|
||||
}
|
||||
|
||||
var v1, v2 *Node
|
||||
if n.List.Len() != 0 {
|
||||
v1 = n.List.First()
|
||||
}
|
||||
if n.List.Len() > 1 {
|
||||
v2 = n.List.Second()
|
||||
}
|
||||
|
||||
// this is not only an optimization but also a requirement in the spec.
|
||||
// "if the second iteration variable is the blank identifier, the range
|
||||
// clause is equivalent to the same clause with only the first variable
|
||||
// present."
|
||||
if v2.isBlank() {
|
||||
if v1 != nil {
|
||||
n.List.Set1(v1)
|
||||
}
|
||||
v2 = nil
|
||||
}
|
||||
|
||||
if v1 != nil {
|
||||
if v1.Name != nil && v1.Name.Defn == n {
|
||||
v1.Type = t1
|
||||
} else if v1.Type != nil {
|
||||
if op, why := assignop(t1, v1.Type); op == OXXX {
|
||||
yyerrorl(n.Pos, "cannot assign type %v to %L in range%s", t1, v1, why)
|
||||
}
|
||||
}
|
||||
checkassign(n, v1)
|
||||
}
|
||||
|
||||
if v2 != nil {
|
||||
if v2.Name != nil && v2.Name.Defn == n {
|
||||
v2.Type = t2
|
||||
} else if v2.Type != nil {
|
||||
if op, why := assignop(t2, v2.Type); op == OXXX {
|
||||
yyerrorl(n.Pos, "cannot assign type %v to %L in range%s", t2, v2, why)
|
||||
}
|
||||
}
|
||||
checkassign(n, v2)
|
||||
}
|
||||
}
|
||||
|
||||
func cheapComputableIndex(width int64) bool {
|
||||
switch thearch.LinkArch.Family {
|
||||
// MIPS does not have R+R addressing
|
||||
// Arm64 may lack ability to generate this code in our assembler,
|
||||
// but the architecture supports it.
|
||||
case sys.PPC64, sys.S390X:
|
||||
return width == 1
|
||||
case sys.AMD64, sys.I386, sys.ARM64, sys.ARM:
|
||||
switch width {
|
||||
case 1, 2, 4, 8:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// walkrange transforms various forms of ORANGE into
|
||||
// simpler forms. The result must be assigned back to n.
|
||||
// Node n may also be modified in place, and may also be
|
||||
// the returned node.
|
||||
func walkrange(n *Node) *Node {
|
||||
if isMapClear(n) {
|
||||
m := n.Right
|
||||
lno := setlineno(m)
|
||||
n = mapClear(m)
|
||||
lineno = lno
|
||||
return n
|
||||
}
|
||||
|
||||
// variable name conventions:
|
||||
// ohv1, hv1, hv2: hidden (old) val 1, 2
|
||||
// ha, hit: hidden aggregate, iterator
|
||||
// hn, hp: hidden len, pointer
|
||||
// hb: hidden bool
|
||||
// a, v1, v2: not hidden aggregate, val 1, 2
|
||||
|
||||
t := n.Type
|
||||
|
||||
a := n.Right
|
||||
lno := setlineno(a)
|
||||
n.Right = nil
|
||||
|
||||
var v1, v2 *Node
|
||||
l := n.List.Len()
|
||||
if l > 0 {
|
||||
v1 = n.List.First()
|
||||
}
|
||||
|
||||
if l > 1 {
|
||||
v2 = n.List.Second()
|
||||
}
|
||||
|
||||
if v2.isBlank() {
|
||||
v2 = nil
|
||||
}
|
||||
|
||||
if v1.isBlank() && v2 == nil {
|
||||
v1 = nil
|
||||
}
|
||||
|
||||
if v1 == nil && v2 != nil {
|
||||
Fatalf("walkrange: v2 != nil while v1 == nil")
|
||||
}
|
||||
|
||||
// n.List has no meaning anymore, clear it
|
||||
// to avoid erroneous processing by racewalk.
|
||||
n.List.Set(nil)
|
||||
|
||||
var ifGuard *Node
|
||||
|
||||
translatedLoopOp := OFOR
|
||||
|
||||
var body []*Node
|
||||
var init []*Node
|
||||
switch t.Etype {
|
||||
default:
|
||||
Fatalf("walkrange")
|
||||
|
||||
case TARRAY, TSLICE:
|
||||
if arrayClear(n, v1, v2, a) {
|
||||
lineno = lno
|
||||
return n
|
||||
}
|
||||
|
||||
// order.stmt arranged for a copy of the array/slice variable if needed.
|
||||
ha := a
|
||||
|
||||
hv1 := temp(types.Types[TINT])
|
||||
hn := temp(types.Types[TINT])
|
||||
|
||||
init = append(init, nod(OAS, hv1, nil))
|
||||
init = append(init, nod(OAS, hn, nod(OLEN, ha, nil)))
|
||||
|
||||
n.Left = nod(OLT, hv1, hn)
|
||||
n.Right = nod(OAS, hv1, nod(OADD, hv1, nodintconst(1)))
|
||||
|
||||
// for range ha { body }
|
||||
if v1 == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// for v1 := range ha { body }
|
||||
if v2 == nil {
|
||||
body = []*Node{nod(OAS, v1, hv1)}
|
||||
break
|
||||
}
|
||||
|
||||
// for v1, v2 := range ha { body }
|
||||
if cheapComputableIndex(n.Type.Elem().Width) {
|
||||
// v1, v2 = hv1, ha[hv1]
|
||||
tmp := nod(OINDEX, ha, hv1)
|
||||
tmp.SetBounded(true)
|
||||
// Use OAS2 to correctly handle assignments
|
||||
// of the form "v1, a[v1] := range".
|
||||
a := nod(OAS2, nil, nil)
|
||||
a.List.Set2(v1, v2)
|
||||
a.Rlist.Set2(hv1, tmp)
|
||||
body = []*Node{a}
|
||||
break
|
||||
}
|
||||
|
||||
// TODO(austin): OFORUNTIL is a strange beast, but is
|
||||
// necessary for expressing the control flow we need
|
||||
// while also making "break" and "continue" work. It
|
||||
// would be nice to just lower ORANGE during SSA, but
|
||||
// racewalk needs to see many of the operations
|
||||
// involved in ORANGE's implementation. If racewalk
|
||||
// moves into SSA, consider moving ORANGE into SSA and
|
||||
// eliminating OFORUNTIL.
|
||||
|
||||
// TODO(austin): OFORUNTIL inhibits bounds-check
|
||||
// elimination on the index variable (see #20711).
|
||||
// Enhance the prove pass to understand this.
|
||||
ifGuard = nod(OIF, nil, nil)
|
||||
ifGuard.Left = nod(OLT, hv1, hn)
|
||||
translatedLoopOp = OFORUNTIL
|
||||
|
||||
hp := temp(types.NewPtr(n.Type.Elem()))
|
||||
tmp := nod(OINDEX, ha, nodintconst(0))
|
||||
tmp.SetBounded(true)
|
||||
init = append(init, nod(OAS, hp, nod(OADDR, tmp, nil)))
|
||||
|
||||
// Use OAS2 to correctly handle assignments
|
||||
// of the form "v1, a[v1] := range".
|
||||
a := nod(OAS2, nil, nil)
|
||||
a.List.Set2(v1, v2)
|
||||
a.Rlist.Set2(hv1, nod(ODEREF, hp, nil))
|
||||
body = append(body, a)
|
||||
|
||||
// Advance pointer as part of the late increment.
|
||||
//
|
||||
// This runs *after* the condition check, so we know
|
||||
// advancing the pointer is safe and won't go past the
|
||||
// end of the allocation.
|
||||
a = nod(OAS, hp, addptr(hp, t.Elem().Width))
|
||||
a = typecheck(a, ctxStmt)
|
||||
n.List.Set1(a)
|
||||
|
||||
case TMAP:
|
||||
// order.stmt allocated the iterator for us.
|
||||
// we only use a once, so no copy needed.
|
||||
ha := a
|
||||
|
||||
hit := prealloc[n]
|
||||
th := hit.Type
|
||||
n.Left = nil
|
||||
keysym := th.Field(0).Sym // depends on layout of iterator struct. See reflect.go:hiter
|
||||
elemsym := th.Field(1).Sym // ditto
|
||||
|
||||
fn := syslook("mapiterinit")
|
||||
|
||||
fn = substArgTypes(fn, t.Key(), t.Elem(), th)
|
||||
init = append(init, mkcall1(fn, nil, nil, typename(t), ha, nod(OADDR, hit, nil)))
|
||||
n.Left = nod(ONE, nodSym(ODOT, hit, keysym), nodnil())
|
||||
|
||||
fn = syslook("mapiternext")
|
||||
fn = substArgTypes(fn, th)
|
||||
n.Right = mkcall1(fn, nil, nil, nod(OADDR, hit, nil))
|
||||
|
||||
key := nodSym(ODOT, hit, keysym)
|
||||
key = nod(ODEREF, key, nil)
|
||||
if v1 == nil {
|
||||
body = nil
|
||||
} else if v2 == nil {
|
||||
body = []*Node{nod(OAS, v1, key)}
|
||||
} else {
|
||||
elem := nodSym(ODOT, hit, elemsym)
|
||||
elem = nod(ODEREF, elem, nil)
|
||||
a := nod(OAS2, nil, nil)
|
||||
a.List.Set2(v1, v2)
|
||||
a.Rlist.Set2(key, elem)
|
||||
body = []*Node{a}
|
||||
}
|
||||
|
||||
case TCHAN:
|
||||
// order.stmt arranged for a copy of the channel variable.
|
||||
ha := a
|
||||
|
||||
n.Left = nil
|
||||
|
||||
hv1 := temp(t.Elem())
|
||||
hv1.SetTypecheck(1)
|
||||
if t.Elem().HasPointers() {
|
||||
init = append(init, nod(OAS, hv1, nil))
|
||||
}
|
||||
hb := temp(types.Types[TBOOL])
|
||||
|
||||
n.Left = nod(ONE, hb, nodbool(false))
|
||||
a := nod(OAS2RECV, nil, nil)
|
||||
a.SetTypecheck(1)
|
||||
a.List.Set2(hv1, hb)
|
||||
a.Right = nod(ORECV, ha, nil)
|
||||
n.Left.Ninit.Set1(a)
|
||||
if v1 == nil {
|
||||
body = nil
|
||||
} else {
|
||||
body = []*Node{nod(OAS, v1, hv1)}
|
||||
}
|
||||
// Zero hv1. This prevents hv1 from being the sole, inaccessible
|
||||
// reference to an otherwise GC-able value during the next channel receive.
|
||||
// See issue 15281.
|
||||
body = append(body, nod(OAS, hv1, nil))
|
||||
|
||||
case TSTRING:
|
||||
// Transform string range statements like "for v1, v2 = range a" into
|
||||
//
|
||||
// ha := a
|
||||
// for hv1 := 0; hv1 < len(ha); {
|
||||
// hv1t := hv1
|
||||
// hv2 := rune(ha[hv1])
|
||||
// if hv2 < utf8.RuneSelf {
|
||||
// hv1++
|
||||
// } else {
|
||||
// hv2, hv1 = decoderune(ha, hv1)
|
||||
// }
|
||||
// v1, v2 = hv1t, hv2
|
||||
// // original body
|
||||
// }
|
||||
|
||||
// order.stmt arranged for a copy of the string variable.
|
||||
ha := a
|
||||
|
||||
hv1 := temp(types.Types[TINT])
|
||||
hv1t := temp(types.Types[TINT])
|
||||
hv2 := temp(types.Runetype)
|
||||
|
||||
// hv1 := 0
|
||||
init = append(init, nod(OAS, hv1, nil))
|
||||
|
||||
// hv1 < len(ha)
|
||||
n.Left = nod(OLT, hv1, nod(OLEN, ha, nil))
|
||||
|
||||
if v1 != nil {
|
||||
// hv1t = hv1
|
||||
body = append(body, nod(OAS, hv1t, hv1))
|
||||
}
|
||||
|
||||
// hv2 := rune(ha[hv1])
|
||||
nind := nod(OINDEX, ha, hv1)
|
||||
nind.SetBounded(true)
|
||||
body = append(body, nod(OAS, hv2, conv(nind, types.Runetype)))
|
||||
|
||||
// if hv2 < utf8.RuneSelf
|
||||
nif := nod(OIF, nil, nil)
|
||||
nif.Left = nod(OLT, hv2, nodintconst(utf8.RuneSelf))
|
||||
|
||||
// hv1++
|
||||
nif.Nbody.Set1(nod(OAS, hv1, nod(OADD, hv1, nodintconst(1))))
|
||||
|
||||
// } else {
|
||||
eif := nod(OAS2, nil, nil)
|
||||
nif.Rlist.Set1(eif)
|
||||
|
||||
// hv2, hv1 = decoderune(ha, hv1)
|
||||
eif.List.Set2(hv2, hv1)
|
||||
fn := syslook("decoderune")
|
||||
eif.Rlist.Set1(mkcall1(fn, fn.Type.Results(), nil, ha, hv1))
|
||||
|
||||
body = append(body, nif)
|
||||
|
||||
if v1 != nil {
|
||||
if v2 != nil {
|
||||
// v1, v2 = hv1t, hv2
|
||||
a := nod(OAS2, nil, nil)
|
||||
a.List.Set2(v1, v2)
|
||||
a.Rlist.Set2(hv1t, hv2)
|
||||
body = append(body, a)
|
||||
} else {
|
||||
// v1 = hv1t
|
||||
body = append(body, nod(OAS, v1, hv1t))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
n.Op = translatedLoopOp
|
||||
typecheckslice(init, ctxStmt)
|
||||
|
||||
if ifGuard != nil {
|
||||
ifGuard.Ninit.Append(init...)
|
||||
ifGuard = typecheck(ifGuard, ctxStmt)
|
||||
} else {
|
||||
n.Ninit.Append(init...)
|
||||
}
|
||||
|
||||
typecheckslice(n.Left.Ninit.Slice(), ctxStmt)
|
||||
|
||||
n.Left = typecheck(n.Left, ctxExpr)
|
||||
n.Left = defaultlit(n.Left, nil)
|
||||
n.Right = typecheck(n.Right, ctxStmt)
|
||||
typecheckslice(body, ctxStmt)
|
||||
n.Nbody.Prepend(body...)
|
||||
|
||||
if ifGuard != nil {
|
||||
ifGuard.Nbody.Set1(n)
|
||||
n = ifGuard
|
||||
}
|
||||
|
||||
n = walkstmt(n)
|
||||
|
||||
lineno = lno
|
||||
return n
|
||||
}
|
||||
|
||||
// isMapClear checks if n is of the form:
|
||||
//
|
||||
// for k := range m {
|
||||
// delete(m, k)
|
||||
// }
|
||||
//
|
||||
// where == for keys of map m is reflexive.
|
||||
func isMapClear(n *Node) bool {
|
||||
if Debug.N != 0 || instrumenting {
|
||||
return false
|
||||
}
|
||||
|
||||
if n.Op != ORANGE || n.Type.Etype != TMAP || n.List.Len() != 1 {
|
||||
return false
|
||||
}
|
||||
|
||||
k := n.List.First()
|
||||
if k == nil || k.isBlank() {
|
||||
return false
|
||||
}
|
||||
|
||||
// Require k to be a new variable name.
|
||||
if k.Name == nil || k.Name.Defn != n {
|
||||
return false
|
||||
}
|
||||
|
||||
if n.Nbody.Len() != 1 {
|
||||
return false
|
||||
}
|
||||
|
||||
stmt := n.Nbody.First() // only stmt in body
|
||||
if stmt == nil || stmt.Op != ODELETE {
|
||||
return false
|
||||
}
|
||||
|
||||
m := n.Right
|
||||
if !samesafeexpr(stmt.List.First(), m) || !samesafeexpr(stmt.List.Second(), k) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Keys where equality is not reflexive can not be deleted from maps.
|
||||
if !isreflexive(m.Type.Key()) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// mapClear constructs a call to runtime.mapclear for the map m.
|
||||
func mapClear(m *Node) *Node {
|
||||
t := m.Type
|
||||
|
||||
// instantiate mapclear(typ *type, hmap map[any]any)
|
||||
fn := syslook("mapclear")
|
||||
fn = substArgTypes(fn, t.Key(), t.Elem())
|
||||
n := mkcall1(fn, nil, nil, typename(t), m)
|
||||
|
||||
n = typecheck(n, ctxStmt)
|
||||
n = walkstmt(n)
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
// Lower n into runtime·memclr if possible, for
|
||||
// fast zeroing of slices and arrays (issue 5373).
|
||||
// Look for instances of
|
||||
//
|
||||
// for i := range a {
|
||||
// a[i] = zero
|
||||
// }
|
||||
//
|
||||
// in which the evaluation of a is side-effect-free.
|
||||
//
|
||||
// Parameters are as in walkrange: "for v1, v2 = range a".
|
||||
func arrayClear(n, v1, v2, a *Node) bool {
|
||||
if Debug.N != 0 || instrumenting {
|
||||
return false
|
||||
}
|
||||
|
||||
if v1 == nil || v2 != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if n.Nbody.Len() != 1 || n.Nbody.First() == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
stmt := n.Nbody.First() // only stmt in body
|
||||
if stmt.Op != OAS || stmt.Left.Op != OINDEX {
|
||||
return false
|
||||
}
|
||||
|
||||
if !samesafeexpr(stmt.Left.Left, a) || !samesafeexpr(stmt.Left.Right, v1) {
|
||||
return false
|
||||
}
|
||||
|
||||
elemsize := n.Type.Elem().Width
|
||||
if elemsize <= 0 || !isZero(stmt.Right) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Convert to
|
||||
// if len(a) != 0 {
|
||||
// hp = &a[0]
|
||||
// hn = len(a)*sizeof(elem(a))
|
||||
// memclr{NoHeap,Has}Pointers(hp, hn)
|
||||
// i = len(a) - 1
|
||||
// }
|
||||
n.Op = OIF
|
||||
|
||||
n.Nbody.Set(nil)
|
||||
n.Left = nod(ONE, nod(OLEN, a, nil), nodintconst(0))
|
||||
|
||||
// hp = &a[0]
|
||||
hp := temp(types.Types[TUNSAFEPTR])
|
||||
|
||||
tmp := nod(OINDEX, a, nodintconst(0))
|
||||
tmp.SetBounded(true)
|
||||
tmp = nod(OADDR, tmp, nil)
|
||||
tmp = convnop(tmp, types.Types[TUNSAFEPTR])
|
||||
n.Nbody.Append(nod(OAS, hp, tmp))
|
||||
|
||||
// hn = len(a) * sizeof(elem(a))
|
||||
hn := temp(types.Types[TUINTPTR])
|
||||
|
||||
tmp = nod(OLEN, a, nil)
|
||||
tmp = nod(OMUL, tmp, nodintconst(elemsize))
|
||||
tmp = conv(tmp, types.Types[TUINTPTR])
|
||||
n.Nbody.Append(nod(OAS, hn, tmp))
|
||||
|
||||
var fn *Node
|
||||
if a.Type.Elem().HasPointers() {
|
||||
// memclrHasPointers(hp, hn)
|
||||
Curfn.Func.setWBPos(stmt.Pos)
|
||||
fn = mkcall("memclrHasPointers", nil, nil, hp, hn)
|
||||
} else {
|
||||
// memclrNoHeapPointers(hp, hn)
|
||||
fn = mkcall("memclrNoHeapPointers", nil, nil, hp, hn)
|
||||
}
|
||||
|
||||
n.Nbody.Append(fn)
|
||||
|
||||
// i = len(a) - 1
|
||||
v1 = nod(OAS, v1, nod(OSUB, nod(OLEN, a, nil), nodintconst(1)))
|
||||
|
||||
n.Nbody.Append(v1)
|
||||
|
||||
n.Left = typecheck(n.Left, ctxExpr)
|
||||
n.Left = defaultlit(n.Left, nil)
|
||||
typecheckslice(n.Nbody.Slice(), ctxStmt)
|
||||
n = walkstmt(n)
|
||||
return true
|
||||
}
|
||||
|
||||
// addptr returns (*T)(uintptr(p) + n).
|
||||
func addptr(p *Node, n int64) *Node {
|
||||
t := p.Type
|
||||
|
||||
p = nod(OCONVNOP, p, nil)
|
||||
p.Type = types.Types[TUINTPTR]
|
||||
|
||||
p = nod(OADD, p, nodintconst(n))
|
||||
|
||||
p = nod(OCONVNOP, p, nil)
|
||||
p.Type = t
|
||||
|
||||
return p
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package test
|
||||
package gc_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ir
|
||||
package gc
|
||||
|
||||
// Strongly connected components.
|
||||
//
|
||||
@@ -30,13 +30,13 @@ package ir
|
||||
// when analyzing a set of mutually recursive functions.
|
||||
|
||||
type bottomUpVisitor struct {
|
||||
analyze func([]*Func, bool)
|
||||
analyze func([]*Node, bool)
|
||||
visitgen uint32
|
||||
nodeID map[*Func]uint32
|
||||
stack []*Func
|
||||
nodeID map[*Node]uint32
|
||||
stack []*Node
|
||||
}
|
||||
|
||||
// VisitFuncsBottomUp invokes analyze on the ODCLFUNC nodes listed in list.
|
||||
// visitBottomUp invokes analyze on the ODCLFUNC nodes listed in list.
|
||||
// It calls analyze with successive groups of functions, working from
|
||||
// the bottom of the call graph upward. Each time analyze is called with
|
||||
// a list of functions, every function on that list only calls other functions
|
||||
@@ -49,21 +49,18 @@ type bottomUpVisitor struct {
|
||||
// If recursive is false, the list consists of only a single function and its closures.
|
||||
// If recursive is true, the list may still contain only a single function,
|
||||
// if that function is itself recursive.
|
||||
func VisitFuncsBottomUp(list []Node, analyze func(list []*Func, recursive bool)) {
|
||||
func visitBottomUp(list []*Node, analyze func(list []*Node, recursive bool)) {
|
||||
var v bottomUpVisitor
|
||||
v.analyze = analyze
|
||||
v.nodeID = make(map[*Func]uint32)
|
||||
v.nodeID = make(map[*Node]uint32)
|
||||
for _, n := range list {
|
||||
if n.Op() == ODCLFUNC {
|
||||
n := n.(*Func)
|
||||
if !n.IsHiddenClosure() {
|
||||
v.visit(n)
|
||||
}
|
||||
if n.Op == ODCLFUNC && !n.Func.IsHiddenClosure() {
|
||||
v.visit(n)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (v *bottomUpVisitor) visit(n *Func) uint32 {
|
||||
func (v *bottomUpVisitor) visit(n *Node) uint32 {
|
||||
if id := v.nodeID[n]; id > 0 {
|
||||
// already visited
|
||||
return id
|
||||
@@ -76,31 +73,42 @@ func (v *bottomUpVisitor) visit(n *Func) uint32 {
|
||||
min := v.visitgen
|
||||
v.stack = append(v.stack, n)
|
||||
|
||||
do := func(defn Node) {
|
||||
if defn != nil {
|
||||
if m := v.visit(defn.(*Func)); m < min {
|
||||
inspectList(n.Nbody, func(n *Node) bool {
|
||||
switch n.Op {
|
||||
case ONAME:
|
||||
if n.Class() == PFUNC {
|
||||
if n.isMethodExpression() {
|
||||
n = asNode(n.Type.Nname())
|
||||
}
|
||||
if n != nil && n.Name.Defn != nil {
|
||||
if m := v.visit(n.Name.Defn); m < min {
|
||||
min = m
|
||||
}
|
||||
}
|
||||
}
|
||||
case ODOTMETH:
|
||||
fn := asNode(n.Type.Nname())
|
||||
if fn != nil && fn.Op == ONAME && fn.Class() == PFUNC && fn.Name.Defn != nil {
|
||||
if m := v.visit(fn.Name.Defn); m < min {
|
||||
min = m
|
||||
}
|
||||
}
|
||||
case OCALLPART:
|
||||
fn := asNode(callpartMethod(n).Type.Nname())
|
||||
if fn != nil && fn.Op == ONAME && fn.Class() == PFUNC && fn.Name.Defn != nil {
|
||||
if m := v.visit(fn.Name.Defn); m < min {
|
||||
min = m
|
||||
}
|
||||
}
|
||||
case OCLOSURE:
|
||||
if m := v.visit(n.Func.Closure); m < min {
|
||||
min = m
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Visit(n, func(n Node) {
|
||||
switch n.Op() {
|
||||
case ONAME:
|
||||
if n := n.(*Name); n.Class == PFUNC {
|
||||
do(n.Defn)
|
||||
}
|
||||
case ODOTMETH, OCALLPART, OMETHEXPR:
|
||||
if fn := MethodExprName(n); fn != nil {
|
||||
do(fn.Defn)
|
||||
}
|
||||
case OCLOSURE:
|
||||
n := n.(*ClosureExpr)
|
||||
do(n.Func)
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
if (min == id || min == id+1) && !n.IsHiddenClosure() {
|
||||
if (min == id || min == id+1) && !n.Func.IsHiddenClosure() {
|
||||
// This node is the root of a strongly connected component.
|
||||
|
||||
// The original min passed to visitcodelist was v.nodeID[n]+1.
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user