mirror of
https://github.com/golang/go.git
synced 2026-01-29 15:12:08 +03:00
Compare commits
50 Commits
dev.boring
...
go1.14.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
96745b980c | ||
|
|
bec8e9c9bd | ||
|
|
2e44aa30f0 | ||
|
|
c8d1e4cf83 | ||
|
|
ea3a94c92e | ||
|
|
edea4a79e8 | ||
|
|
8980ff45cf | ||
|
|
cdd55a324b | ||
|
|
e6f15fab0a | ||
|
|
9d7dad18db | ||
|
|
612ef03a23 | ||
|
|
b43b463d8f | ||
|
|
21f453b848 | ||
|
|
74870669fc | ||
|
|
ca153f4db7 | ||
|
|
f75a45c4d7 | ||
|
|
ab9d037401 | ||
|
|
564c76a268 | ||
|
|
b620f6fde5 | ||
|
|
e577ba98d8 | ||
|
|
229247d33b | ||
|
|
c5125098b2 | ||
|
|
adba22a9ae | ||
|
|
fd85ff5ee0 | ||
|
|
8e804f19b6 | ||
|
|
6717d27be2 | ||
|
|
9c41c1d8dc | ||
|
|
2e08d80732 | ||
|
|
76a6adcf3a | ||
|
|
0e9f7ac7ca | ||
|
|
e6036e7da5 | ||
|
|
c54e36905b | ||
|
|
329317472f | ||
|
|
99f8de7339 | ||
|
|
3dcb516d42 | ||
|
|
20a838ab94 | ||
|
|
c49910abc3 | ||
|
|
6a7f08952e | ||
|
|
8ced42e78b | ||
|
|
f63e55b541 | ||
|
|
17acbdb357 | ||
|
|
babeec29aa | ||
|
|
b4dca6416f | ||
|
|
f5293d77a9 | ||
|
|
51534757da | ||
|
|
d898c7b544 | ||
|
|
29ccdfc853 | ||
|
|
3f0cdedfdd | ||
|
|
a068054af1 | ||
|
|
331b8661a0 |
22
CONTRIBUTORS
22
CONTRIBUTORS
@@ -26,6 +26,7 @@
|
||||
|
||||
Aamir Khan <syst3m.w0rm@gmail.com>
|
||||
Aaron Beitch <aaronb@arista.com>
|
||||
Aaron Bieber <deftly@gmail.com>
|
||||
Aaron Cannon <cannona@fireantproductions.com>
|
||||
Aaron France <aaron.l.france@gmail.com>
|
||||
Aaron Jacobs <jacobsa@google.com>
|
||||
@@ -48,6 +49,7 @@ Adam Shannon <adamkshannon@gmail.com>
|
||||
Adam Shelton <aashelt90@gmail.com>
|
||||
Adam Sindelar <adamsh@google.com>
|
||||
Adam Thomason <athomason@gmail.com>
|
||||
Adam Williams <pwnfactory@gmail.com>
|
||||
Adam Woodbeck <adam@woodbeck.net>
|
||||
Adarsh Ravichandran <adarshravichandran91@gmail.com>
|
||||
Aditya Harindar <aditya.harindar@gmail.com>
|
||||
@@ -276,6 +278,7 @@ Awn Umar <awn@cryptolosophy.io>
|
||||
Axel Wagner <axel.wagner.hh@googlemail.com>
|
||||
Ayan George <ayan@ayan.net>
|
||||
Ayanamist Yang <ayanamist@gmail.com>
|
||||
Ayke van Laethem <aykevanlaethem@gmail.com>
|
||||
Aymerick Jéhanne <aymerick@jehanne.org>
|
||||
Azat Kaumov <kaumov.a.r@gmail.com>
|
||||
Baiju Muthukadan <baiju.m.mail@gmail.com>
|
||||
@@ -338,6 +341,7 @@ Brad Jones <rbjones@google.com>
|
||||
Brad Morgan <brad@morgabra.com>
|
||||
Brad Whitaker <bwhitaker@fastly.com>
|
||||
Braden Bassingthwaite <bbassingthwaite@vendasta.com>
|
||||
Bradford Lamson-Scribner <brad.lamson@gmail.com>
|
||||
Bradley Falzon <brad@teambrad.net>
|
||||
Brady Catherman <brady@gmail.com>
|
||||
Brady Sullivan <brady@bsull.com>
|
||||
@@ -351,6 +355,7 @@ Brett Cannon <bcannon@gmail.com>
|
||||
Brett Merrill <brett.j.merrill94@gmail.com>
|
||||
Brian Dellisanti <briandellisanti@gmail.com>
|
||||
Brian Downs <brian.downs@gmail.com>
|
||||
Brian Falk <falk@logicparty.org>
|
||||
Brian G. Merrell <bgmerrell@gmail.com>
|
||||
Brian Gitonga Marete <marete@toshnix.com> <bgmarete@gmail.com> <bgm@google.com>
|
||||
Brian Kennedy <btkennedy@gmail.com>
|
||||
@@ -404,6 +409,7 @@ Charles L. Dorian <cldorian@gmail.com>
|
||||
Charles Lee <zombie.fml@gmail.com>
|
||||
Charles Weill <weill@google.com>
|
||||
Chauncy Cullitan <chauncyc@google.com>
|
||||
Chen Zhihan <energiehund@gmail.com>
|
||||
Cherry Zhang <cherryyz@google.com>
|
||||
Chew Choon Keat <choonkeat@gmail.com>
|
||||
Cholerae Hu <choleraehyq@gmail.com>
|
||||
@@ -442,6 +448,7 @@ Christopher Cahoon <chris.cahoon@gmail.com>
|
||||
Christopher Guiney <chris@guiney.net>
|
||||
Christopher Henderson <chris@chenderson.org>
|
||||
Christopher Koch <chrisko@google.com>
|
||||
Christopher Loessl <cloessl+github@gmail.com>
|
||||
Christopher Nelson <nadiasvertex@gmail.com>
|
||||
Christopher Nielsen <m4dh4tt3r@gmail.com>
|
||||
Christopher Redden <christopher.redden@gmail.com>
|
||||
@@ -739,12 +746,14 @@ Frank Somers <fsomers@arista.com>
|
||||
Frederic Guillot <frederic.guillot@gmail.com>
|
||||
Frederick Kelly Mayle III <frederickmayle@gmail.com>
|
||||
Frederik Ring <frederik.ring@gmail.com>
|
||||
Frederik Zipp <fzipp@gmx.de>
|
||||
Fredrik Enestad <fredrik.enestad@soundtrackyourbrand.com>
|
||||
Fredrik Forsmo <fredrik.forsmo@gmail.com>
|
||||
Fredrik Wallgren <fredrik.wallgren@gmail.com>
|
||||
Frew Schmidt <github@frew.co>
|
||||
Frithjof Schulze <schulze@math.uni-hannover.de> <sfrithjof@gmail.com>
|
||||
Frits van Bommel <fvbommel@gmail.com>
|
||||
Fujimoto Kyosuke <kyoro.f@gmail.com>
|
||||
Fumitoshi Ukai <ukai@google.com>
|
||||
G. Hussain Chinoy <ghchinoy@gmail.com>
|
||||
Gaal Yahas <gaal@google.com>
|
||||
@@ -803,6 +812,7 @@ GitHub User @frennkie (6499251) <mail@rhab.de>
|
||||
GitHub User @hengwu0 (41297446) <41297446+hengwu0@users.noreply.github.com>
|
||||
GitHub User @itchyny (375258) <itchyny@hatena.ne.jp>
|
||||
GitHub User @jinmiaoluo (39730824) <jinmiaoluo@icloud.com>
|
||||
GitHub User @jopbrown (6345470) <msshane2008@gmail.com>
|
||||
GitHub User @kazyshr (30496953) <kazyshr0301@gmail.com>
|
||||
GitHub User @kc1212 (1093806) <kc1212@users.noreply.github.com>
|
||||
GitHub User @Kropekk (13366453) <kamilkropiewnicki@gmail.com>
|
||||
@@ -828,6 +838,7 @@ GitHub User @uhei (2116845) <uhei@users.noreply.github.com>
|
||||
GitHub User @uropek (39370426) <uropek@gmail.com>
|
||||
GitHub User @utkarsh-extc (53217283) <utkarsh.extc@gmail.com>
|
||||
GitHub User @witchard (4994659) <witchard@hotmail.co.uk>
|
||||
GitHub User @yah01 (12216890) <kagaminehuan@gmail.com>
|
||||
GitHub User @yuanhh (1298735) <yuan415030@gmail.com>
|
||||
GitHub User @zikaeroh (48577114) <zikaeroh@gmail.com>
|
||||
GitHub User @ZZMarquis (7624583) <zhonglingjian3821@163.com>
|
||||
@@ -897,6 +908,7 @@ Heschi Kreinick <heschi@google.com>
|
||||
Hidetatsu Yaginuma <ygnmhdtt@gmail.com>
|
||||
Hilko Bengen <bengen@hilluzination.de>
|
||||
Hiroaki Nakamura <hnakamur@gmail.com>
|
||||
Hiromichi Ema <ema.hiro@gmail.com>
|
||||
Hironao OTSUBO <motemen@gmail.com>
|
||||
Hiroshi Ioka <hirochachacha@gmail.com>
|
||||
Hitoshi Mitake <mitake.hitoshi@gmail.com>
|
||||
@@ -973,6 +985,7 @@ Jakob Borg <jakob@nym.se>
|
||||
Jakob Weisblat <jakobw@mit.edu>
|
||||
Jakub Čajka <jcajka@redhat.com>
|
||||
Jakub Ryszard Czarnowicz <j.czarnowicz@gmail.com>
|
||||
Jamal Carvalho <jamal.a.carvalho@gmail.com>
|
||||
James Aguilar <jaguilar@google.com>
|
||||
James Bardin <j.bardin@gmail.com>
|
||||
James Chacon <jchacon@google.com>
|
||||
@@ -1020,6 +1033,7 @@ Jannis Andrija Schnitzer <jannis@schnitzer.im>
|
||||
Jared Culp <jculp14@gmail.com>
|
||||
Jaroslavas Počepko <jp@webmaster.ms>
|
||||
Jason A. Donenfeld <Jason@zx2c4.com>
|
||||
Jason Baker <jason-baker@users.noreply.github.com>
|
||||
Jason Barnett <jason.w.barnett@gmail.com>
|
||||
Jason Buberel <jbuberel@google.com>
|
||||
Jason Chu <jasonchujc@gmail.com>
|
||||
@@ -1213,6 +1227,7 @@ Kamil Chmielewski <kamil.chm@gmail.com>
|
||||
Kamil Kisiel <kamil@kamilkisiel.net> <kamil.kisiel@gmail.com>
|
||||
Kamil Rytarowski <krytarowski@users.noreply.github.com>
|
||||
Kang Hu <hukangustc@gmail.com>
|
||||
Kanta Ebihara <kantaebihara@gmail.com>
|
||||
Karan Dhiman <karandhi@ca.ibm.com>
|
||||
Karel Pazdera <pazderak@gmail.com>
|
||||
Karoly Negyesi <chx1975@gmail.com>
|
||||
@@ -1252,6 +1267,7 @@ Ketan Parmar <ketanbparmar@gmail.com>
|
||||
Kevan Swanberg <kevswanberg@gmail.com>
|
||||
Kevin Ballard <kevin@sb.org>
|
||||
Kevin Burke <kev@inburke.com>
|
||||
Kévin Dunglas <dunglas@gmail.com>
|
||||
Kevin Gillette <extemporalgenome@gmail.com>
|
||||
Kevin Kirsche <kev.kirsche@gmail.com>
|
||||
Kevin Klues <klueska@gmail.com> <klueska@google.com>
|
||||
@@ -1265,6 +1281,7 @@ Kim Yongbin <kybinz@gmail.com>
|
||||
Kir Kolyshkin <kolyshkin@gmail.com>
|
||||
Kirill Motkov <Motkov.Kirill@gmail.com>
|
||||
Kirill Smelkov <kirr@nexedi.com>
|
||||
Kirill Tatchihin <kirabsuir@gmail.com>
|
||||
Kirk Han <kirk91.han@gmail.com>
|
||||
Kirklin McDonald <kirklin.mcdonald@gmail.com>
|
||||
Klaus Post <klauspost@gmail.com>
|
||||
@@ -1378,6 +1395,7 @@ Marcelo E. Magallon <marcelo.magallon@gmail.com>
|
||||
Marco Hennings <marco.hennings@freiheit.com>
|
||||
Marcus Willock <crazcalm@gmail.com>
|
||||
Marga Manterola <marga@google.com>
|
||||
Mariano Cano <mariano@smallstep.com>
|
||||
Marin Bašić <marin.basic02@gmail.com>
|
||||
Mario Arranz <marioarranzr@gmail.com>
|
||||
Marius A. Eriksen <marius@grailbio.com>
|
||||
@@ -1949,6 +1967,7 @@ Sergey 'SnakE' Gromov <snake.scaly@gmail.com>
|
||||
Sergey Arseev <sergey.arseev@intel.com>
|
||||
Sergey Dobrodey <sergey.dobrodey@synesis.ru>
|
||||
Sergey Frolov <sfrolov@google.com>
|
||||
Sergey Ivanov <ser1325@gmail.com>
|
||||
Sergey Lukjanov <me@slukjanov.name>
|
||||
Sergey Mishin <sergeymishine@gmail.com>
|
||||
Sergey Mudrik <sergey.mudrik@gmail.com>
|
||||
@@ -2090,6 +2109,7 @@ Thomas Desrosiers <thomasdesr@gmail.com>
|
||||
Thomas Habets <habets@google.com>
|
||||
Thomas Kappler <tkappler@gmail.com>
|
||||
Thomas Meson <zllak@hycik.org>
|
||||
Thomas Symborski <thomas.symborski@gmail.com>
|
||||
Thomas Wanielista <tomwans@gmail.com>
|
||||
Thorben Krueger <thorben.krueger@gmail.com>
|
||||
Thordur Bjornsson <thorduri@secnorth.net>
|
||||
@@ -2130,6 +2150,7 @@ Tom Thorogood <me+google@tomthorogood.co.uk>
|
||||
Tom Wilkie <tom@weave.works>
|
||||
Tomas Dabasinskas <tomas@dabasinskas.net>
|
||||
Tommy Schaefer <tommy.schaefer@teecom.com>
|
||||
Tomohiro Kusumoto <zabio1192@gmail.com>
|
||||
Tomoya Ishizaki <zaq1tomo@gmail.com>
|
||||
Tonis Tiigi <tonistiigi@gmail.com>
|
||||
Tony Reix <tony.reix@bull.net>
|
||||
@@ -2240,6 +2261,7 @@ Xi Ruoyao <xry23333@gmail.com>
|
||||
Xia Bin <snyh@snyh.org>
|
||||
Xiangdong Ji <xiangdong.ji@arm.com>
|
||||
Xing Xing <mikespook@gmail.com>
|
||||
Xingqang Bai <bxq2011hust@qq.com>
|
||||
Xu Fei <badgangkiller@gmail.com>
|
||||
Xudong Zhang <felixmelon@gmail.com>
|
||||
Xudong Zheng <7pkvm5aw@slicealias.com>
|
||||
|
||||
@@ -395,3 +395,14 @@ func (w *Watchdog) Start() {
|
||||
The cost of race detection varies by program, but for a typical program, memory
|
||||
usage may increase by 5-10x and execution time by 2-20x.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
The race detector currently allocates an extra 8 bytes per <code>defer</code>
|
||||
and <code>recover</code> statement. Those extra allocations <a
|
||||
href="https://golang.org/issue/26813">are not recovered until the goroutine
|
||||
exits</a>. This means that if you have a long-running goroutine that is
|
||||
periodically issuing <code>defer</code> and <code>recover</code> calls,
|
||||
the program memory usage may grow without bound. These memory allocations
|
||||
will not show up in the output of <code>runtime.ReadMemStats</code> or
|
||||
<code>runtime/pprof</code>.
|
||||
</p>
|
||||
|
||||
@@ -34,6 +34,7 @@ We encourage all Go users to subscribe to
|
||||
<p>A <a href="/doc/devel/release.html">summary</a> of the changes between Go releases. Notes for the major releases:</p>
|
||||
|
||||
<ul>
|
||||
<li><a href="/doc/go1.14">Go 1.14</a> <small>(February 2020)</small></li>
|
||||
<li><a href="/doc/go1.13">Go 1.13</a> <small>(September 2019)</small></li>
|
||||
<li><a href="/doc/go1.12">Go 1.12</a> <small>(February 2019)</small></li>
|
||||
<li><a href="/doc/go1.11">Go 1.11</a> <small>(August 2018)</small></li>
|
||||
|
||||
@@ -14,24 +14,24 @@ Do not send CLs removing the interior tags from such phrases.
|
||||
main ul li { margin: 0.5em 0; }
|
||||
</style>
|
||||
|
||||
<h2 id="introduction">DRAFT RELEASE NOTES — Introduction to Go 1.14</h2>
|
||||
<h2 id="introduction">Introduction to Go 1.14</h2>
|
||||
|
||||
<p>
|
||||
<strong>
|
||||
Go 1.14 is not yet released. These are work-in-progress
|
||||
release notes. Go 1.14 is expected to be released in February 2020.
|
||||
</strong>
|
||||
The latest Go release, version 1.14, arrives six months after <a href="go1.13">Go 1.13</a>.
|
||||
Most of its changes are in the implementation of the toolchain, runtime, and libraries.
|
||||
As always, the release maintains the Go 1 <a href="/doc/go1compat.html">promise of compatibility</a>.
|
||||
We expect almost all Go programs to continue to compile and run as before.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Module support in the <code>go</code> command is now ready for production use,
|
||||
and we encourage all users to migrate to Go modules for dependency management.
|
||||
If you are unable to migrate due to a problem in the Go toolchain,
|
||||
please ensure that the problem has an
|
||||
<a href="https://golang.org/issue?q=is%3Aissue+is%3Aopen+label%3Amodules">open issue</a>
|
||||
filed. (If the issue is not on the <code>Go1.15</code> milestone, please let us
|
||||
know why it prevents you from migrating so that we can prioritize it
|
||||
appropriately.)
|
||||
Module support in the <code>go</code> command is now ready for production use,
|
||||
and we encourage all users to <a href="https://blog.golang.org/migrating-to-go-modules">migrate to Go
|
||||
modules for dependency management</a>. If you are unable to migrate due to a problem in the Go
|
||||
toolchain, please ensure that the problem has an
|
||||
<a href="https://golang.org/issue?q=is%3Aissue+is%3Aopen+label%3Amodules">open issue</a>
|
||||
filed. (If the issue is not on the <code>Go1.15</code> milestone, please let us
|
||||
know why it prevents you from migrating so that we can prioritize it
|
||||
appropriately.)
|
||||
</p>
|
||||
|
||||
<h2 id="language">Changes to the language</h2>
|
||||
@@ -77,6 +77,18 @@ appropriately.)
|
||||
(Data Execution Prevention)</a> enabled.
|
||||
</p>
|
||||
|
||||
<p><!-- CL 202439 -->
|
||||
On Windows, creating a file
|
||||
via <a href="/pkg/os#CreateFile"><code>os.OpenFile</code></a> with
|
||||
the <a href="/pkg/os/#O_CREATE"><code>os.O_CREATE</code></a> flag, or
|
||||
via <a href="/pkg/syscall#Open"><code>syscall.Open</code></a> with
|
||||
the <a href="/pkg/syscall#O_CREAT"><code>syscall.O_CREAT</code></a>
|
||||
flag, will now create the file as read-only if the
|
||||
bit <code>0o200</code> (owner write permission) is not set in the
|
||||
permission argument. This makes the behavior on Windows more like
|
||||
that on Unix systems.
|
||||
</p>
|
||||
|
||||
<h3 id="wasm">WebAssembly</h3>
|
||||
|
||||
<p><!-- CL 203600 -->
|
||||
@@ -108,7 +120,7 @@ appropriately.)
|
||||
<h3 id="freebsd">FreeBSD</h3>
|
||||
|
||||
<p><!-- CL 199919 -->
|
||||
Go now supports the 64-bit ARM architecture on FreeBSD (the
|
||||
Go now supports the 64-bit ARM architecture on FreeBSD 12.0 or later (the
|
||||
<code>freebsd/arm64</code> port).
|
||||
</p>
|
||||
|
||||
@@ -393,7 +405,7 @@ appropriately.)
|
||||
<p><!-- CL 202117 -->
|
||||
This release includes experimental support for compiler-inserted
|
||||
coverage instrumentation for fuzzing.
|
||||
See <a href="https://golang.org/issue/14565">the issue</a> for more
|
||||
See <a href="https://golang.org/issue/14565">issue 14565</a> for more
|
||||
details.
|
||||
This API may change in future releases.
|
||||
</p>
|
||||
@@ -582,6 +594,13 @@ appropriately.)
|
||||
was never a documented feature. For proper escaping, see <a
|
||||
href="/pkg/encoding/json/#HTMLEscape"><code>HTMLEscape</code></a>.
|
||||
</p>
|
||||
|
||||
<p><!-- CL 195045 -->
|
||||
<a href="/pkg/encoding/json/#Number"><code>Number</code></a> no longer
|
||||
accepts invalid numbers, to follow the documented behavior more closely.
|
||||
If a program needs to accept invalid numbers like the empty string,
|
||||
consider wrapping the type with <a href="/pkg/encoding/json/#Unmarshaler"><code>Unmarshaler</code></a>.
|
||||
</p>
|
||||
</dd>
|
||||
</dl><!-- encoding/json -->
|
||||
|
||||
@@ -752,6 +771,19 @@ appropriately.)
|
||||
</dd>
|
||||
</dl><!-- net/textproto -->
|
||||
|
||||
<dl id="net/url"><dt><a href="/pkg/net/url/">net/url</a></dt>
|
||||
<dd>
|
||||
<p><!-- CL 185117 -->
|
||||
When parsing of a URL fails
|
||||
(for example by <a href="/pkg/net/url/#Parse"><code>Parse</code></a>
|
||||
or <a href="/pkg/net/url/#ParseRequestURI"><code>ParseRequestURI</code></a>),
|
||||
the resulting <a href="/pkg/net/url/#Error.Error"><code>Error</code></a> message
|
||||
will now quote the unparsable URL.
|
||||
This provides clearer structure and consistency with other parsing errors.
|
||||
</p>
|
||||
</dd>
|
||||
</dl><!-- net/url -->
|
||||
|
||||
<dl id="os/signal"><dt><a href="/pkg/os/signal/">os/signal</a></dt>
|
||||
<dd>
|
||||
<p><!-- CL 187739 -->
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
<p>
|
||||
<a href="/dl/" target="_blank">Official binary
|
||||
distributions</a> are available for the FreeBSD (release 10-STABLE and above),
|
||||
Linux, macOS (10.10 and above), and Windows operating systems and
|
||||
Linux, macOS (10.11 and above), and Windows operating systems and
|
||||
the 32-bit (<code>386</code>) and 64-bit (<code>amd64</code>) x86 processor
|
||||
architectures.
|
||||
</p>
|
||||
@@ -49,7 +49,7 @@ If your OS or architecture is not on the list, you may be able to
|
||||
<tr><td colspan="3"><hr></td></tr>
|
||||
<tr><td>FreeBSD 10.3 or later</td> <td>amd64, 386</td> <td>Debian GNU/kFreeBSD not supported</td></tr>
|
||||
<tr valign='top'><td>Linux 2.6.23 or later with glibc</td> <td>amd64, 386, arm, arm64,<br>s390x, ppc64le</td> <td>CentOS/RHEL 5.x not supported.<br>Install from source for other libc.</td></tr>
|
||||
<tr><td>macOS 10.10 or later</td> <td>amd64</td> <td>use the clang or gcc<sup>†</sup> that comes with Xcode<sup>‡</sup> for <code>cgo</code> support</td></tr>
|
||||
<tr><td>macOS 10.11 or later</td> <td>amd64</td> <td>use the clang or gcc<sup>†</sup> that comes with Xcode<sup>‡</sup> for <code>cgo</code> support</td></tr>
|
||||
<tr valign='top'><td>Windows 7, Server 2008R2 or later</td> <td>amd64, 386</td> <td>use MinGW (<code>386</code>) or MinGW-W64 (<code>amd64</code>) gcc<sup>†</sup>.<br>No need for cygwin or msys.</td></tr>
|
||||
</table>
|
||||
|
||||
|
||||
33
misc/cgo/testgodefs/testdata/issue37479.go
vendored
Normal file
33
misc/cgo/testgodefs/testdata/issue37479.go
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
//
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
/*
|
||||
typedef struct A A;
|
||||
|
||||
typedef struct {
|
||||
struct A *next;
|
||||
struct A **prev;
|
||||
} N;
|
||||
|
||||
struct A
|
||||
{
|
||||
N n;
|
||||
};
|
||||
|
||||
typedef struct B
|
||||
{
|
||||
A* a;
|
||||
} B;
|
||||
*/
|
||||
import "C"
|
||||
|
||||
type N C.N
|
||||
|
||||
type A C.A
|
||||
|
||||
type B C.B
|
||||
23
misc/cgo/testgodefs/testdata/issue37621.go
vendored
Normal file
23
misc/cgo/testgodefs/testdata/issue37621.go
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
//
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
/*
|
||||
struct tt {
|
||||
long long a;
|
||||
long long b;
|
||||
};
|
||||
|
||||
struct s {
|
||||
struct tt ts[3];
|
||||
};
|
||||
*/
|
||||
import "C"
|
||||
|
||||
type TT C.struct_tt
|
||||
|
||||
type S C.struct_s
|
||||
8
misc/cgo/testgodefs/testdata/main.go
vendored
8
misc/cgo/testgodefs/testdata/main.go
vendored
@@ -11,5 +11,13 @@ var v2 = v1.L
|
||||
// Test that P, Q, and R all point to byte.
|
||||
var v3 = Issue8478{P: (*byte)(nil), Q: (**byte)(nil), R: (***byte)(nil)}
|
||||
|
||||
// Test that N, A and B are fully defined
|
||||
var v4 = N{}
|
||||
var v5 = A{}
|
||||
var v6 = B{}
|
||||
|
||||
// Test that S is fully defined
|
||||
var v7 = S{}
|
||||
|
||||
func main() {
|
||||
}
|
||||
|
||||
@@ -21,6 +21,8 @@ var filePrefixes = []string{
|
||||
"anonunion",
|
||||
"issue8478",
|
||||
"fieldtypedef",
|
||||
"issue37479",
|
||||
"issue37621",
|
||||
}
|
||||
|
||||
func TestGoDefs(t *testing.T) {
|
||||
|
||||
@@ -1,17 +1,41 @@
|
||||
This directory contains helper file for trace viewer (`go tool trace`).
|
||||
## Resources for Go's trace viewer
|
||||
|
||||
`trace_viewer_full.html` was generated by following
|
||||
[instructions](https://github.com/catapult-project/catapult/blob/master/tracing/docs/embedding-trace-viewer.md)
|
||||
on revision `dc970d3e1f7b3da5a2849de70ff253acdb70148f`
|
||||
of [catapult](https://github.com/catapult-project/catapult) using:
|
||||
Go execution trace UI (`go tool trace`) embeds
|
||||
Chrome's trace viewer (Catapult) following the
|
||||
[instructions](
|
||||
https://chromium.googlesource.com/catapult/+/refs/heads/master/tracing/docs/embedding-trace-viewer.md). This directory contains
|
||||
the helper files to embed Chrome's trace viewer.
|
||||
|
||||
The current resources were generated/copied from
|
||||
[`Catapult@9508452e18f130c98499cb4c4f1e1efaedee8962`](
|
||||
https://chromium.googlesource.com/catapult/+/9508452e18f130c98499cb4c4f1e1efaedee8962).
|
||||
|
||||
### Updating `trace_viewer_full.html`
|
||||
|
||||
The file was generated by catapult's `vulcanize_trace_viewer` command.
|
||||
```
|
||||
catapult$ ./tracing/bin/vulcanize_trace_viewer --config=full
|
||||
catapult$ cp tracing/bin/trace_viewer_full.html $GOROOT/misc/trace/trace_viewer_lean.html
|
||||
$ git clone https://chromium.googlesource.com/catapult
|
||||
$ cd catapult
|
||||
$ ./tracing/bin/vulcanize_trace_viewer --config=full
|
||||
$ cp tracing/bin/trace_viewer_full.html $GOROOT/misc/trace/trace_viewer_full.html
|
||||
```
|
||||
|
||||
We are supposed to use --config=lean (produces smaller html),
|
||||
but it is broken at the moment:
|
||||
https://github.com/catapult-project/catapult/issues/2247
|
||||
|
||||
### Updating `webcomponents.min.js`
|
||||
|
||||
`webcomponents.min.js` is necessary to let the trace viewer page
|
||||
to import the `trace_viewer_full.html`.
|
||||
This is copied from the catapult repo.
|
||||
|
||||
```
|
||||
$ cp third_party/polymer/components/webcomponentsjs/webcomponents.min.js $GOROOT/misc/trace/webcomponents.min.js
|
||||
```
|
||||
|
||||
## Licenses
|
||||
|
||||
The license for trace-viewer is as follows:
|
||||
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
|
||||
//
|
||||
@@ -40,3 +64,42 @@ The license for trace-viewer is as follows:
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
The license for webcomponents.min.js is as follows:
|
||||
|
||||
/**
|
||||
* @license
|
||||
* Copyright (c) 2014 The Polymer Project Authors. All rights reserved.
|
||||
* This code may only be used under the BSD style license found at http://polymer.github.io/LICENSE.txt
|
||||
* The complete set of authors may be found at http://polymer.github.io/AUTHORS.txt
|
||||
* The complete set of contributors may be found at http://polymer.github.io/CONTRIBUTORS.txt
|
||||
* Code distributed by Google as part of the polymer project is also
|
||||
* subject to an additional IP rights grant found at http://polymer.github.io/PATENTS.txt
|
||||
*/
|
||||
// Copyright (c) 2014 The Polymer Authors. All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
File diff suppressed because one or more lines are too long
14
misc/trace/webcomponents.min.js
vendored
Normal file
14
misc/trace/webcomponents.min.js
vendored
Normal file
File diff suppressed because one or more lines are too long
@@ -2243,7 +2243,7 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ
|
||||
// Translate to zero-length array instead.
|
||||
count = 0
|
||||
}
|
||||
sub := c.loadType(dt.Type, pos, key)
|
||||
sub := c.Type(dt.Type, pos)
|
||||
t.Align = sub.Align
|
||||
t.Go = &ast.ArrayType{
|
||||
Len: c.intExpr(count),
|
||||
@@ -2388,7 +2388,7 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ
|
||||
c.ptrs[key] = append(c.ptrs[key], t)
|
||||
|
||||
case *dwarf.QualType:
|
||||
t1 := c.loadType(dt.Type, pos, key)
|
||||
t1 := c.Type(dt.Type, pos)
|
||||
t.Size = t1.Size
|
||||
t.Align = t1.Align
|
||||
t.Go = t1.Go
|
||||
@@ -2472,7 +2472,13 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ
|
||||
}
|
||||
name := c.Ident("_Ctype_" + dt.Name)
|
||||
goIdent[name.Name] = name
|
||||
sub := c.loadType(dt.Type, pos, key)
|
||||
akey := ""
|
||||
if c.anonymousStructTypedef(dt) {
|
||||
// only load type recursively for typedefs of anonymous
|
||||
// structs, see issues 37479 and 37621.
|
||||
akey = key
|
||||
}
|
||||
sub := c.loadType(dt.Type, pos, akey)
|
||||
if c.badPointerTypedef(dt) {
|
||||
// Treat this typedef as a uintptr.
|
||||
s := *sub
|
||||
@@ -2993,6 +2999,13 @@ func fieldPrefix(fld []*ast.Field) string {
|
||||
return prefix
|
||||
}
|
||||
|
||||
// anonymousStructTypedef reports whether dt is a C typedef for an anonymous
|
||||
// struct.
|
||||
func (c *typeConv) anonymousStructTypedef(dt *dwarf.TypedefType) bool {
|
||||
st, ok := dt.Type.(*dwarf.StructType)
|
||||
return ok && st.StructName == ""
|
||||
}
|
||||
|
||||
// badPointerTypedef reports whether t is a C typedef that should not be considered a pointer in Go.
|
||||
// A typedef is bad if C code sometimes stores non-pointers in this type.
|
||||
// TODO: Currently our best solution is to find these manually and list them as
|
||||
|
||||
@@ -186,6 +186,7 @@ func algtype1(t *types.Type) (AlgKind, *types.Type) {
|
||||
|
||||
// genhash returns a symbol which is the closure used to compute
|
||||
// the hash of a value of type t.
|
||||
// Note: the generated function must match runtime.typehash exactly.
|
||||
func genhash(t *types.Type) *obj.LSym {
|
||||
switch algtype(t) {
|
||||
default:
|
||||
|
||||
@@ -2506,13 +2506,21 @@
|
||||
// The "go get" command remains permitted to update go.mod even with -mod=readonly,
|
||||
// and the "go mod" commands do not take the -mod flag (or any other build flags).
|
||||
//
|
||||
// If invoked with -mod=vendor, the go command assumes that the vendor
|
||||
// directory holds the correct copies of dependencies and ignores
|
||||
// the dependency descriptions in go.mod.
|
||||
// If invoked with -mod=vendor, the go command loads packages from the main
|
||||
// module's vendor directory instead of downloading modules to and loading packages
|
||||
// from the module cache. The go command assumes the vendor directory holds
|
||||
// correct copies of dependencies, and it does not compute the set of required
|
||||
// module versions from go.mod files. However, the go command does check that
|
||||
// vendor/modules.txt (generated by 'go mod vendor') contains metadata consistent
|
||||
// with go.mod.
|
||||
//
|
||||
// If invoked with -mod=mod, the go command loads modules from the module cache
|
||||
// even if there is a vendor directory present.
|
||||
//
|
||||
// If the go command is not invoked with a -mod flag and the vendor directory
|
||||
// is present and the "go" version in go.mod is 1.14 or higher, the go command
|
||||
// will act as if it were invoked with -mod=vendor.
|
||||
//
|
||||
// Pseudo-versions
|
||||
//
|
||||
// The go.mod file and the go command more generally use semantic versions as
|
||||
@@ -2710,22 +2718,28 @@
|
||||
//
|
||||
// Modules and vendoring
|
||||
//
|
||||
// When using modules, the go command completely ignores vendor directories.
|
||||
// When using modules, the go command typically satisfies dependencies by
|
||||
// downloading modules from their sources and using those downloaded copies
|
||||
// (after verification, as described in the previous section). Vendoring may
|
||||
// be used to allow interoperation with older versions of Go, or to ensure
|
||||
// that all files used for a build are stored together in a single file tree.
|
||||
//
|
||||
// By default, the go command satisfies dependencies by downloading modules
|
||||
// from their sources and using those downloaded copies (after verification,
|
||||
// as described in the previous section). To allow interoperation with older
|
||||
// versions of Go, or to ensure that all files used for a build are stored
|
||||
// together in a single file tree, 'go mod vendor' creates a directory named
|
||||
// vendor in the root directory of the main module and stores there all the
|
||||
// packages from dependency modules that are needed to support builds and
|
||||
// tests of packages in the main module.
|
||||
// The command 'go mod vendor' constructs a directory named vendor in the main
|
||||
// module's root directory that contains copies of all packages needed to support
|
||||
// builds and tests of packages in the main module. 'go mod vendor' also
|
||||
// creates the file vendor/modules.txt that contains metadata about vendored
|
||||
// packages and module versions. This file should be kept consistent with go.mod:
|
||||
// when vendoring is used, 'go mod vendor' should be run after go.mod is updated.
|
||||
//
|
||||
// To build using the main module's top-level vendor directory to satisfy
|
||||
// dependencies (disabling use of the usual network sources and local
|
||||
// caches), use 'go build -mod=vendor'. Note that only the main module's
|
||||
// top-level vendor directory is used; vendor directories in other locations
|
||||
// are still ignored.
|
||||
// If the vendor directory is present in the main module's root directory, it will
|
||||
// be used automatically if the "go" version in the main module's go.mod file is
|
||||
// 1.14 or higher. Build commands like 'go build' and 'go test' will load packages
|
||||
// from the vendor directory instead of accessing the network or the local module
|
||||
// cache. To explicitly enable vendoring, invoke the go command with the flag
|
||||
// -mod=vendor. To disable vendoring, use the flag -mod=mod.
|
||||
//
|
||||
// Unlike vendoring in GOPATH, the go command ignores vendor directories in
|
||||
// locations other than the main module's root directory.
|
||||
//
|
||||
//
|
||||
// Module authentication using go.sum
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"cmd/go/internal/cfg"
|
||||
"cmd/go/internal/load"
|
||||
"cmd/go/internal/modload"
|
||||
"cmd/go/internal/str"
|
||||
"cmd/go/internal/work"
|
||||
)
|
||||
|
||||
@@ -438,7 +439,7 @@ func (g *Generator) exec(words []string) {
|
||||
cmd.Stderr = os.Stderr
|
||||
// Run the command in the package directory.
|
||||
cmd.Dir = g.dir
|
||||
cmd.Env = append(cfg.OrigEnv, g.env...)
|
||||
cmd.Env = str.StringList(cfg.OrigEnv, g.env)
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
g.errorf("running %q: %s", words[0], err)
|
||||
|
||||
@@ -6,6 +6,7 @@ package modcmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@@ -67,12 +68,10 @@ func verifyMod(mod module.Version) bool {
|
||||
_, zipErr = os.Stat(zip)
|
||||
}
|
||||
dir, dirErr := modfetch.DownloadDir(mod)
|
||||
if dirErr == nil {
|
||||
_, dirErr = os.Stat(dir)
|
||||
}
|
||||
data, err := ioutil.ReadFile(zip + "hash")
|
||||
if err != nil {
|
||||
if zipErr != nil && os.IsNotExist(zipErr) && dirErr != nil && os.IsNotExist(dirErr) {
|
||||
if zipErr != nil && errors.Is(zipErr, os.ErrNotExist) &&
|
||||
dirErr != nil && errors.Is(dirErr, os.ErrNotExist) {
|
||||
// Nothing downloaded yet. Nothing to verify.
|
||||
return true
|
||||
}
|
||||
@@ -81,7 +80,7 @@ func verifyMod(mod module.Version) bool {
|
||||
}
|
||||
h := string(bytes.TrimSpace(data))
|
||||
|
||||
if zipErr != nil && os.IsNotExist(zipErr) {
|
||||
if zipErr != nil && errors.Is(zipErr, os.ErrNotExist) {
|
||||
// ok
|
||||
} else {
|
||||
hZ, err := dirhash.HashZip(zip, dirhash.DefaultHash)
|
||||
@@ -93,7 +92,7 @@ func verifyMod(mod module.Version) bool {
|
||||
ok = false
|
||||
}
|
||||
}
|
||||
if dirErr != nil && os.IsNotExist(dirErr) {
|
||||
if dirErr != nil && errors.Is(dirErr, os.ErrNotExist) {
|
||||
// ok
|
||||
} else {
|
||||
hD, err := dirhash.HashDir(dir, mod.Path+"@"+mod.Version, dirhash.DefaultHash)
|
||||
|
||||
@@ -7,6 +7,7 @@ package modfetch
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -56,8 +57,11 @@ func CachePath(m module.Version, suffix string) (string, error) {
|
||||
return filepath.Join(dir, encVer+"."+suffix), nil
|
||||
}
|
||||
|
||||
// DownloadDir returns the directory to which m should be downloaded.
|
||||
// Note that the directory may not yet exist.
|
||||
// DownloadDir returns the directory to which m should have been downloaded.
|
||||
// An error will be returned if the module path or version cannot be escaped.
|
||||
// An error satisfying errors.Is(err, os.ErrNotExist) will be returned
|
||||
// along with the directory if the directory does not exist or if the directory
|
||||
// is not completely populated.
|
||||
func DownloadDir(m module.Version) (string, error) {
|
||||
if PkgMod == "" {
|
||||
return "", fmt.Errorf("internal error: modfetch.PkgMod not set")
|
||||
@@ -76,9 +80,39 @@ func DownloadDir(m module.Version) (string, error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(PkgMod, enc+"@"+encVer), nil
|
||||
|
||||
dir := filepath.Join(PkgMod, enc+"@"+encVer)
|
||||
if fi, err := os.Stat(dir); os.IsNotExist(err) {
|
||||
return dir, err
|
||||
} else if err != nil {
|
||||
return dir, &DownloadDirPartialError{dir, err}
|
||||
} else if !fi.IsDir() {
|
||||
return dir, &DownloadDirPartialError{dir, errors.New("not a directory")}
|
||||
}
|
||||
partialPath, err := CachePath(m, "partial")
|
||||
if err != nil {
|
||||
return dir, err
|
||||
}
|
||||
if _, err := os.Stat(partialPath); err == nil {
|
||||
return dir, &DownloadDirPartialError{dir, errors.New("not completely extracted")}
|
||||
} else if !os.IsNotExist(err) {
|
||||
return dir, err
|
||||
}
|
||||
return dir, nil
|
||||
}
|
||||
|
||||
// DownloadDirPartialError is returned by DownloadDir if a module directory
|
||||
// exists but was not completely populated.
|
||||
//
|
||||
// DownloadDirPartialError is equivalent to os.ErrNotExist.
|
||||
type DownloadDirPartialError struct {
|
||||
Dir string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *DownloadDirPartialError) Error() string { return fmt.Sprintf("%s: %v", e.Dir, e.Err) }
|
||||
func (e *DownloadDirPartialError) Is(err error) bool { return err == os.ErrNotExist }
|
||||
|
||||
// lockVersion locks a file within the module cache that guards the downloading
|
||||
// and extraction of the zipfile for the given module version.
|
||||
func lockVersion(mod module.Version) (unlock func(), err error) {
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"cmd/go/internal/lockedfile"
|
||||
"cmd/go/internal/par"
|
||||
"cmd/go/internal/renameio"
|
||||
"cmd/go/internal/robustio"
|
||||
|
||||
"golang.org/x/mod/module"
|
||||
"golang.org/x/mod/sumdb/dirhash"
|
||||
@@ -45,24 +46,27 @@ func Download(mod module.Version) (dir string, err error) {
|
||||
err error
|
||||
}
|
||||
c := downloadCache.Do(mod, func() interface{} {
|
||||
dir, err := DownloadDir(mod)
|
||||
dir, err := download(mod)
|
||||
if err != nil {
|
||||
return cached{"", err}
|
||||
}
|
||||
if err := download(mod, dir); err != nil {
|
||||
return cached{"", err}
|
||||
}
|
||||
checkMod(mod)
|
||||
return cached{dir, nil}
|
||||
}).(cached)
|
||||
return c.dir, c.err
|
||||
}
|
||||
|
||||
func download(mod module.Version, dir string) (err error) {
|
||||
// If the directory exists, the module has already been extracted.
|
||||
fi, err := os.Stat(dir)
|
||||
if err == nil && fi.IsDir() {
|
||||
return nil
|
||||
func download(mod module.Version) (dir string, err error) {
|
||||
// If the directory exists, and no .partial file exists,
|
||||
// the module has already been completely extracted.
|
||||
// .partial files may be created when future versions of cmd/go
|
||||
// extract module zip directories in place instead of extracting
|
||||
// to a random temporary directory and renaming.
|
||||
dir, err = DownloadDir(mod)
|
||||
if err == nil {
|
||||
return dir, nil
|
||||
} else if dir == "" || !errors.Is(err, os.ErrNotExist) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// To avoid cluttering the cache with extraneous files,
|
||||
@@ -70,22 +74,24 @@ func download(mod module.Version, dir string) (err error) {
|
||||
// Invoke DownloadZip before locking the file.
|
||||
zipfile, err := DownloadZip(mod)
|
||||
if err != nil {
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
|
||||
unlock, err := lockVersion(mod)
|
||||
if err != nil {
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
// Check whether the directory was populated while we were waiting on the lock.
|
||||
fi, err = os.Stat(dir)
|
||||
if err == nil && fi.IsDir() {
|
||||
return nil
|
||||
_, dirErr := DownloadDir(mod)
|
||||
if dirErr == nil {
|
||||
return dir, nil
|
||||
}
|
||||
_, dirExists := dirErr.(*DownloadDirPartialError)
|
||||
|
||||
// Clean up any remaining temporary directories from previous runs.
|
||||
// Clean up any remaining temporary directories from previous runs, as well
|
||||
// as partially extracted diectories created by future versions of cmd/go.
|
||||
// This is only safe to do because the lock file ensures that their writers
|
||||
// are no longer active.
|
||||
parentDir := filepath.Dir(dir)
|
||||
@@ -95,6 +101,19 @@ func download(mod module.Version, dir string) (err error) {
|
||||
RemoveAll(path) // best effort
|
||||
}
|
||||
}
|
||||
if dirExists {
|
||||
if err := RemoveAll(dir); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
partialPath, err := CachePath(mod, "partial")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := os.Remove(partialPath); err != nil && !os.IsNotExist(err) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Extract the zip file to a temporary directory, then rename it to the
|
||||
// final path. That way, we can use the existence of the source directory to
|
||||
@@ -102,11 +121,11 @@ func download(mod module.Version, dir string) (err error) {
|
||||
// the entire directory (e.g. as an attempt to prune out file corruption)
|
||||
// the module cache will still be left in a recoverable state.
|
||||
if err := os.MkdirAll(parentDir, 0777); err != nil {
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
tmpDir, err := ioutil.TempDir(parentDir, tmpPrefix)
|
||||
if err != nil {
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
@@ -116,11 +135,11 @@ func download(mod module.Version, dir string) (err error) {
|
||||
|
||||
if err := modzip.Unzip(tmpDir, mod, zipfile); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "-> %s\n", err)
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err := os.Rename(tmpDir, dir); err != nil {
|
||||
return err
|
||||
if err := robustio.Rename(tmpDir, dir); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if !cfg.ModCacheRW {
|
||||
@@ -128,7 +147,7 @@ func download(mod module.Version, dir string) (err error) {
|
||||
// os.Rename was observed to fail for read-only directories on macOS.
|
||||
makeDirsReadOnly(dir)
|
||||
}
|
||||
return nil
|
||||
return dir, nil
|
||||
}
|
||||
|
||||
var downloadZipCache par.Cache
|
||||
|
||||
@@ -148,9 +148,7 @@ func moduleInfo(m module.Version, fromBuildList bool) *modinfo.ModulePublic {
|
||||
}
|
||||
dir, err := modfetch.DownloadDir(mod)
|
||||
if err == nil {
|
||||
if info, err := os.Stat(dir); err == nil && info.IsDir() {
|
||||
m.Dir = dir
|
||||
}
|
||||
m.Dir = dir
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -176,13 +176,21 @@ not need updates, such as in a continuous integration and testing system.
|
||||
The "go get" command remains permitted to update go.mod even with -mod=readonly,
|
||||
and the "go mod" commands do not take the -mod flag (or any other build flags).
|
||||
|
||||
If invoked with -mod=vendor, the go command assumes that the vendor
|
||||
directory holds the correct copies of dependencies and ignores
|
||||
the dependency descriptions in go.mod.
|
||||
If invoked with -mod=vendor, the go command loads packages from the main
|
||||
module's vendor directory instead of downloading modules to and loading packages
|
||||
from the module cache. The go command assumes the vendor directory holds
|
||||
correct copies of dependencies, and it does not compute the set of required
|
||||
module versions from go.mod files. However, the go command does check that
|
||||
vendor/modules.txt (generated by 'go mod vendor') contains metadata consistent
|
||||
with go.mod.
|
||||
|
||||
If invoked with -mod=mod, the go command loads modules from the module cache
|
||||
even if there is a vendor directory present.
|
||||
|
||||
If the go command is not invoked with a -mod flag and the vendor directory
|
||||
is present and the "go" version in go.mod is 1.14 or higher, the go command
|
||||
will act as if it were invoked with -mod=vendor.
|
||||
|
||||
Pseudo-versions
|
||||
|
||||
The go.mod file and the go command more generally use semantic versions as
|
||||
@@ -380,22 +388,28 @@ the format of the cached downloaded packages.
|
||||
|
||||
Modules and vendoring
|
||||
|
||||
When using modules, the go command completely ignores vendor directories.
|
||||
When using modules, the go command typically satisfies dependencies by
|
||||
downloading modules from their sources and using those downloaded copies
|
||||
(after verification, as described in the previous section). Vendoring may
|
||||
be used to allow interoperation with older versions of Go, or to ensure
|
||||
that all files used for a build are stored together in a single file tree.
|
||||
|
||||
By default, the go command satisfies dependencies by downloading modules
|
||||
from their sources and using those downloaded copies (after verification,
|
||||
as described in the previous section). To allow interoperation with older
|
||||
versions of Go, or to ensure that all files used for a build are stored
|
||||
together in a single file tree, 'go mod vendor' creates a directory named
|
||||
vendor in the root directory of the main module and stores there all the
|
||||
packages from dependency modules that are needed to support builds and
|
||||
tests of packages in the main module.
|
||||
The command 'go mod vendor' constructs a directory named vendor in the main
|
||||
module's root directory that contains copies of all packages needed to support
|
||||
builds and tests of packages in the main module. 'go mod vendor' also
|
||||
creates the file vendor/modules.txt that contains metadata about vendored
|
||||
packages and module versions. This file should be kept consistent with go.mod:
|
||||
when vendoring is used, 'go mod vendor' should be run after go.mod is updated.
|
||||
|
||||
To build using the main module's top-level vendor directory to satisfy
|
||||
dependencies (disabling use of the usual network sources and local
|
||||
caches), use 'go build -mod=vendor'. Note that only the main module's
|
||||
top-level vendor directory is used; vendor directories in other locations
|
||||
are still ignored.
|
||||
If the vendor directory is present in the main module's root directory, it will
|
||||
be used automatically if the "go" version in the main module's go.mod file is
|
||||
1.14 or higher. Build commands like 'go build' and 'go test' will load packages
|
||||
from the vendor directory instead of accessing the network or the local module
|
||||
cache. To explicitly enable vendoring, invoke the go command with the flag
|
||||
-mod=vendor. To disable vendoring, use the flag -mod=mod.
|
||||
|
||||
Unlike vendoring in GOPATH, the go command ignores vendor directories in
|
||||
locations other than the main module's root directory.
|
||||
`,
|
||||
}
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const arbitraryTimeout = 500 * time.Millisecond
|
||||
const arbitraryTimeout = 2000 * time.Millisecond
|
||||
|
||||
// retry retries ephemeral errors from f up to an arbitrary timeout
|
||||
// to work around filesystem flakiness on Windows and Darwin.
|
||||
|
||||
@@ -1142,7 +1142,7 @@ func (c *runCache) builderRunTest(b *work.Builder, a *work.Action) error {
|
||||
|
||||
cmd := exec.Command(args[0], args[1:]...)
|
||||
cmd.Dir = a.Package.Dir
|
||||
cmd.Env = base.EnvForDir(cmd.Dir, cfg.OrigEnv)
|
||||
cmd.Env = base.EnvForDir(cmd.Dir, cfg.OrigEnv[:len(cfg.OrigEnv):len(cfg.OrigEnv)])
|
||||
cmd.Stdout = stdout
|
||||
cmd.Stderr = stdout
|
||||
|
||||
@@ -1224,6 +1224,14 @@ func (c *runCache) builderRunTest(b *work.Builder, a *work.Action) error {
|
||||
if len(out) == 0 {
|
||||
fmt.Fprintf(cmd.Stdout, "%s\n", err)
|
||||
}
|
||||
// NOTE(golang.org/issue/37555): test2json reports that a test passes
|
||||
// unless "FAIL" is printed at the beginning of a line. The test may not
|
||||
// actually print that if it panics, exits, or terminates abnormally,
|
||||
// so we print it here. We can't always check whether it was printed
|
||||
// because some tests need stdout to be a terminal (golang.org/issue/34791),
|
||||
// not a pipe.
|
||||
// TODO(golang.org/issue/29062): tests that exit with status 0 without
|
||||
// printing a final result should fail.
|
||||
fmt.Fprintf(cmd.Stdout, "FAIL\t%s\t%s\n", a.Package.ImportPath, t)
|
||||
}
|
||||
|
||||
|
||||
@@ -213,6 +213,9 @@ func (b *Builder) buildActionID(a *Action) cache.ActionID {
|
||||
} else if cfg.BuildTrimpath && p.Module != nil {
|
||||
fmt.Fprintf(h, "module %s@%s\n", p.Module.Path, p.Module.Version)
|
||||
}
|
||||
if p.Module != nil {
|
||||
fmt.Fprintf(h, "go %s\n", p.Module.GoVersion)
|
||||
}
|
||||
fmt.Fprintf(h, "goos %s goarch %s\n", cfg.Goos, cfg.Goarch)
|
||||
fmt.Fprintf(h, "import %q\n", p.ImportPath)
|
||||
fmt.Fprintf(h, "omitdebug %v standard %v local %v prefix %q\n", p.Internal.OmitDebug, p.Standard, p.Internal.Local, p.Internal.LocalPrefix)
|
||||
|
||||
54
src/cmd/go/testdata/script/mod_download_partial.txt
vendored
Normal file
54
src/cmd/go/testdata/script/mod_download_partial.txt
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
# Download a module
|
||||
go mod download -modcacherw rsc.io/quote
|
||||
exists $GOPATH/pkg/mod/rsc.io/quote@v1.5.2/go.mod
|
||||
|
||||
# 'go mod verify' should fail if we delete a file.
|
||||
go mod verify
|
||||
rm $GOPATH/pkg/mod/rsc.io/quote@v1.5.2/go.mod
|
||||
! go mod verify
|
||||
|
||||
# Create a .partial file to simulate an failure extracting the zip file.
|
||||
cp empty $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.partial
|
||||
|
||||
# 'go mod verify' should not fail, since the module hasn't been completely
|
||||
# ingested into the cache.
|
||||
go mod verify
|
||||
|
||||
# 'go list' should not load packages from the directory.
|
||||
# NOTE: the message "directory $dir outside available modules" is reported
|
||||
# for directories not in the main module, active modules in the module cache,
|
||||
# or local replacements. In this case, the directory is in the right place,
|
||||
# but it's incomplete, so 'go list' acts as if it's not an active module.
|
||||
! go list $GOPATH/pkg/mod/rsc.io/quote@v1.5.2
|
||||
stderr 'outside available modules'
|
||||
|
||||
# 'go list -m' should not print the directory.
|
||||
go list -m -f '{{.Dir}}' rsc.io/quote
|
||||
! stdout .
|
||||
|
||||
# 'go mod download' should re-extract the module and remove the .partial file.
|
||||
go mod download -modcacherw rsc.io/quote
|
||||
! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.partial
|
||||
exists $GOPATH/pkg/mod/rsc.io/quote@v1.5.2/go.mod
|
||||
|
||||
# 'go list' should succeed.
|
||||
go list $GOPATH/pkg/mod/rsc.io/quote@v1.5.2
|
||||
stdout '^rsc.io/quote$'
|
||||
|
||||
# 'go list -m' should print the directory.
|
||||
go list -m -f '{{.Dir}}' rsc.io/quote
|
||||
stdout 'pkg[/\\]mod[/\\]rsc.io[/\\]quote@v1.5.2'
|
||||
|
||||
# go mod verify should fail if we delete a file.
|
||||
go mod verify
|
||||
rm $GOPATH/pkg/mod/rsc.io/quote@v1.5.2/go.mod
|
||||
! go mod verify
|
||||
|
||||
-- go.mod --
|
||||
module m
|
||||
|
||||
go 1.14
|
||||
|
||||
require rsc.io/quote v1.5.2
|
||||
|
||||
-- empty --
|
||||
7
src/cmd/go/testdata/script/mod_edit_go.txt
vendored
7
src/cmd/go/testdata/script/mod_edit_go.txt
vendored
@@ -7,6 +7,13 @@ go mod edit -go=1.9
|
||||
grep 'go 1.9' go.mod
|
||||
go build
|
||||
|
||||
# Reverting the version should force a rebuild and error instead of using
|
||||
# the cached 1.9 build. (https://golang.org/issue/37804)
|
||||
go mod edit -go=1.8
|
||||
! go build
|
||||
stderr 'type aliases only supported as of'
|
||||
|
||||
|
||||
-- go.mod --
|
||||
module m
|
||||
go 1.8
|
||||
|
||||
69
src/cmd/go/testdata/script/test_json_panic_exit.txt
vendored
Normal file
69
src/cmd/go/testdata/script/test_json_panic_exit.txt
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
# Verifies golang.org/issue/37555.
|
||||
|
||||
[short] skip
|
||||
|
||||
# 'go test -json' should say a test passes if it says it passes.
|
||||
go test -json ./pass
|
||||
stdout '"Action":"pass".*\n\z'
|
||||
! stdout '"Test":.*\n\z'
|
||||
|
||||
# 'go test -json' should say a test passes if it exits 0 and prints nothing.
|
||||
# TODO(golang.org/issue/29062): this should fail in the future.
|
||||
go test -json ./exit0main
|
||||
stdout '"Action":"pass".*\n\z'
|
||||
! stdout '"Test":.*\n\z'
|
||||
|
||||
# 'go test -json' should say a test fails if it exits 1 and prints nothing.
|
||||
! go test -json ./exit1main
|
||||
stdout '"Action":"fail".*\n\z'
|
||||
! stdout '"Test":.*\n\z'
|
||||
|
||||
# 'go test -json' should say a test fails if it panics.
|
||||
! go test -json ./panic
|
||||
stdout '"Action":"fail".*\n\z'
|
||||
! stdout '"Test":.*\n\z'
|
||||
|
||||
-- go.mod --
|
||||
module example.com/test
|
||||
|
||||
go 1.14
|
||||
|
||||
-- pass/pass_test.go --
|
||||
package pass_test
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestPass(t *testing.T) {}
|
||||
|
||||
-- exit0main/exit0main_test.go --
|
||||
package exit0_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
-- exit1main/exit1main_test.go --
|
||||
package exit1_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
-- panic/panic_test.go --
|
||||
package panic_test
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestPanic(t *testing.T) {
|
||||
panic("oh no")
|
||||
}
|
||||
30
src/cmd/go/testdata/script/test_main_panic.txt
vendored
Normal file
30
src/cmd/go/testdata/script/test_main_panic.txt
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
[short] skip
|
||||
[!race] skip
|
||||
|
||||
! go test -v -race main_panic/testmain_parallel_sub_panic_test.go
|
||||
! stdout 'DATA RACE'
|
||||
-- main_panic/testmain_parallel_sub_panic_test.go --
|
||||
package testmain_parallel_sub_panic_test
|
||||
|
||||
import "testing"
|
||||
|
||||
func setup() { println("setup()") }
|
||||
func teardown() { println("teardown()") }
|
||||
func TestA(t *testing.T) {
|
||||
t.Run("1", func(t *testing.T) {
|
||||
t.Run("1", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
panic("A/1/1 panics")
|
||||
})
|
||||
t.Run("2", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
println("A/1/2 is ok")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
setup()
|
||||
defer teardown()
|
||||
m.Run()
|
||||
}
|
||||
@@ -128,9 +128,16 @@ func (c *converter) Write(b []byte) (int, error) {
|
||||
}
|
||||
|
||||
var (
|
||||
// printed by test on successful run.
|
||||
bigPass = []byte("PASS\n")
|
||||
|
||||
// printed by test after a normal test failure.
|
||||
bigFail = []byte("FAIL\n")
|
||||
|
||||
// printed by 'go test' along with an error if the test binary terminates
|
||||
// with an error.
|
||||
bigFailErrorPrefix = []byte("FAIL\t")
|
||||
|
||||
updates = [][]byte{
|
||||
[]byte("=== RUN "),
|
||||
[]byte("=== PAUSE "),
|
||||
@@ -155,7 +162,7 @@ var (
|
||||
// before or after emitting other events.
|
||||
func (c *converter) handleInputLine(line []byte) {
|
||||
// Final PASS or FAIL.
|
||||
if bytes.Equal(line, bigPass) || bytes.Equal(line, bigFail) {
|
||||
if bytes.Equal(line, bigPass) || bytes.Equal(line, bigFail) || bytes.HasPrefix(line, bigFailErrorPrefix) {
|
||||
c.flushReport(0)
|
||||
c.output.write(line)
|
||||
if bytes.Equal(line, bigPass) {
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
{"Action":"output","Test":"TestPanic","Output":"\tgo/src/testing/testing.go:909 +0xc9\n"}
|
||||
{"Action":"output","Test":"TestPanic","Output":"created by testing.(*T).Run\n"}
|
||||
{"Action":"output","Test":"TestPanic","Output":"\tgo/src/testing/testing.go:960 +0x350\n"}
|
||||
{"Action":"output","Test":"TestPanic","Output":"FAIL\tcommand-line-arguments\t0.042s\n"}
|
||||
{"Action":"fail","Test":"TestPanic"}
|
||||
{"Action":"output","Output":"FAIL\tcommand-line-arguments\t0.042s\n"}
|
||||
{"Action":"output","Output":"FAIL\n"}
|
||||
{"Action":"fail"}
|
||||
|
||||
@@ -25,6 +25,7 @@ func init() {
|
||||
http.HandleFunc("/trace", httpTrace)
|
||||
http.HandleFunc("/jsontrace", httpJsonTrace)
|
||||
http.HandleFunc("/trace_viewer_html", httpTraceViewerHTML)
|
||||
http.HandleFunc("/webcomponents.min.js", webcomponentsJS)
|
||||
}
|
||||
|
||||
// httpTrace serves either whole trace (goid==0) or trace for goid goroutine.
|
||||
@@ -43,14 +44,26 @@ func httpTrace(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
}
|
||||
|
||||
// See https://github.com/catapult-project/catapult/blob/master/tracing/docs/embedding-trace-viewer.md
|
||||
// This is almost verbatim copy of:
|
||||
// https://github.com/catapult-project/catapult/blob/master/tracing/bin/index.html
|
||||
// on revision 5f9e4c3eaa555bdef18218a89f38c768303b7b6e.
|
||||
// https://chromium.googlesource.com/catapult/+/9508452e18f130c98499cb4c4f1e1efaedee8962/tracing/docs/embedding-trace-viewer.md
|
||||
// This is almost verbatim copy of https://chromium-review.googlesource.com/c/catapult/+/2062938/2/tracing/bin/index.html
|
||||
var templTrace = `
|
||||
<html>
|
||||
<head>
|
||||
<link href="/trace_viewer_html" rel="import">
|
||||
<script src="/webcomponents.min.js"></script>
|
||||
<script>
|
||||
'use strict';
|
||||
|
||||
function onTraceViewerImportFail() {
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
document.body.textContent =
|
||||
'/trace_viewer_full.html is missing. File a bug in https://golang.org/issue';
|
||||
});
|
||||
}
|
||||
</script>
|
||||
|
||||
<link rel="import" href="/trace_viewer_html"
|
||||
onerror="onTraceViewerImportFail(event)">
|
||||
|
||||
<style type="text/css">
|
||||
html, body {
|
||||
box-sizing: border-box;
|
||||
@@ -77,10 +90,10 @@ var templTrace = `
|
||||
|
||||
function load() {
|
||||
var req = new XMLHttpRequest();
|
||||
var is_binary = /[.]gz$/.test(url) || /[.]zip$/.test(url);
|
||||
var isBinary = /[.]gz$/.test(url) || /[.]zip$/.test(url);
|
||||
req.overrideMimeType('text/plain; charset=x-user-defined');
|
||||
req.open('GET', url, true);
|
||||
if (is_binary)
|
||||
if (isBinary)
|
||||
req.responseType = 'arraybuffer';
|
||||
|
||||
req.onreadystatechange = function(event) {
|
||||
@@ -89,7 +102,7 @@ var templTrace = `
|
||||
|
||||
window.setTimeout(function() {
|
||||
if (req.status === 200)
|
||||
onResult(is_binary ? req.response : req.responseText);
|
||||
onResult(isBinary ? req.response : req.responseText);
|
||||
else
|
||||
onResultFail(req.status);
|
||||
}, 0);
|
||||
@@ -136,17 +149,17 @@ var templTrace = `
|
||||
overlay.visible = true;
|
||||
}
|
||||
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
document.addEventListener('WebComponentsReady', function() {
|
||||
var container = document.createElement('track-view-container');
|
||||
container.id = 'track_view_container';
|
||||
|
||||
viewer = document.createElement('tr-ui-timeline-view');
|
||||
viewer.track_view_container = container;
|
||||
viewer.appendChild(container);
|
||||
Polymer.dom(viewer).appendChild(container);
|
||||
|
||||
viewer.id = 'trace-viewer';
|
||||
viewer.globalMode = true;
|
||||
document.body.appendChild(viewer);
|
||||
Polymer.dom(document.body).appendChild(viewer);
|
||||
|
||||
url = '/jsontrace?{{PARAMS}}';
|
||||
load();
|
||||
@@ -165,6 +178,10 @@ func httpTraceViewerHTML(w http.ResponseWriter, r *http.Request) {
|
||||
http.ServeFile(w, r, filepath.Join(runtime.GOROOT(), "misc", "trace", "trace_viewer_full.html"))
|
||||
}
|
||||
|
||||
func webcomponentsJS(w http.ResponseWriter, r *http.Request) {
|
||||
http.ServeFile(w, r, filepath.Join(runtime.GOROOT(), "misc", "trace", "webcomponents.min.js"))
|
||||
}
|
||||
|
||||
// httpJsonTrace serves json trace, requested from within templTrace HTML.
|
||||
func httpJsonTrace(w http.ResponseWriter, r *http.Request) {
|
||||
defer debug.FreeOSMemory()
|
||||
|
||||
@@ -86,7 +86,8 @@ func NewGCM(cipher Block) (AEAD, error) {
|
||||
}
|
||||
|
||||
// NewGCMWithNonceSize returns the given 128-bit, block cipher wrapped in Galois
|
||||
// Counter Mode, which accepts nonces of the given length.
|
||||
// Counter Mode, which accepts nonces of the given length. The length must not
|
||||
// be zero.
|
||||
//
|
||||
// Only use this function if you require compatibility with an existing
|
||||
// cryptosystem that uses non-standard nonce lengths. All other users should use
|
||||
@@ -112,6 +113,10 @@ func newGCMWithNonceAndTagSize(cipher Block, nonceSize, tagSize int) (AEAD, erro
|
||||
return nil, errors.New("cipher: incorrect tag size given to GCM")
|
||||
}
|
||||
|
||||
if nonceSize <= 0 {
|
||||
return nil, errors.New("cipher: the nonce can't have zero length, or the security of the key will be immediately compromised")
|
||||
}
|
||||
|
||||
if cipher, ok := cipher.(gcmAble); ok {
|
||||
return cipher.NewGCM(nonceSize, tagSize)
|
||||
}
|
||||
|
||||
@@ -217,6 +217,13 @@ var aesGCMTests = []struct {
|
||||
"2b9680b886b3efb7c6354b38c63b5373",
|
||||
"e2b7e5ed5ff27fc8664148f5a628a46dcbf2015184fffb82f2651c36",
|
||||
},
|
||||
{
|
||||
"11754cd72aec309bf52f7687212e8957",
|
||||
"",
|
||||
"",
|
||||
"",
|
||||
"250327c674aaf477aef2675748cf6971",
|
||||
},
|
||||
}
|
||||
|
||||
func TestAESGCM(t *testing.T) {
|
||||
@@ -234,14 +241,22 @@ func TestAESGCM(t *testing.T) {
|
||||
|
||||
var aesgcm cipher.AEAD
|
||||
switch {
|
||||
// Handle non-standard nonce sizes
|
||||
// Handle non-standard tag sizes
|
||||
case tagSize != 16:
|
||||
aesgcm, err = cipher.NewGCMWithTagSize(aes, tagSize)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Handle non-standard tag sizes
|
||||
// Handle 0 nonce size (expect error and continue)
|
||||
case len(nonce) == 0:
|
||||
aesgcm, err = cipher.NewGCMWithNonceSize(aes, 0)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for zero nonce size")
|
||||
}
|
||||
continue
|
||||
|
||||
// Handle non-standard nonce sizes
|
||||
case len(nonce) != 12:
|
||||
aesgcm, err = cipher.NewGCMWithNonceSize(aes, len(nonce))
|
||||
if err != nil {
|
||||
|
||||
@@ -153,6 +153,7 @@ var pkgDeps = map[string][]string{
|
||||
"internal/syscall/unix": {"L0", "syscall"},
|
||||
"internal/syscall/windows": {"L0", "syscall", "internal/syscall/windows/sysdll", "unicode/utf16"},
|
||||
"internal/syscall/windows/registry": {"L0", "syscall", "internal/syscall/windows/sysdll", "unicode/utf16"},
|
||||
"internal/syscall/execenv": {"L0", "syscall", "internal/syscall/windows", "unicode/utf16"},
|
||||
"time": {
|
||||
// "L0" without the "io" package:
|
||||
"errors",
|
||||
@@ -170,10 +171,10 @@ var pkgDeps = map[string][]string{
|
||||
"internal/cfg": {"L0"},
|
||||
"internal/poll": {"L0", "internal/oserror", "internal/race", "syscall", "time", "unicode/utf16", "unicode/utf8", "internal/syscall/windows", "internal/syscall/unix"},
|
||||
"internal/testlog": {"L0"},
|
||||
"os": {"L1", "os", "syscall", "time", "internal/oserror", "internal/poll", "internal/syscall/windows", "internal/syscall/unix", "internal/testlog"},
|
||||
"os": {"L1", "os", "syscall", "time", "internal/oserror", "internal/poll", "internal/syscall/windows", "internal/syscall/unix", "internal/syscall/execenv", "internal/testlog"},
|
||||
"path/filepath": {"L2", "os", "syscall", "internal/syscall/windows"},
|
||||
"io/ioutil": {"L2", "os", "path/filepath", "time"},
|
||||
"os/exec": {"L2", "os", "context", "path/filepath", "syscall"},
|
||||
"os/exec": {"L2", "os", "context", "path/filepath", "syscall", "internal/syscall/execenv"},
|
||||
"os/signal": {"L2", "os", "syscall"},
|
||||
|
||||
// OS enables basic operating system functionality,
|
||||
|
||||
@@ -69,7 +69,7 @@ type Hash struct {
|
||||
// which does call h.initSeed.)
|
||||
func (h *Hash) initSeed() {
|
||||
if h.seed.s == 0 {
|
||||
h.SetSeed(MakeSeed())
|
||||
h.setSeed(MakeSeed())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -124,12 +124,17 @@ func (h *Hash) Seed() Seed {
|
||||
// Two Hash objects with different seeds will very likely behave differently.
|
||||
// Any bytes added to h before this call will be discarded.
|
||||
func (h *Hash) SetSeed(seed Seed) {
|
||||
h.setSeed(seed)
|
||||
h.n = 0
|
||||
}
|
||||
|
||||
// setSeed sets seed without discarding accumulated data.
|
||||
func (h *Hash) setSeed(seed Seed) {
|
||||
if seed.s == 0 {
|
||||
panic("maphash: use of uninitialized Seed")
|
||||
}
|
||||
h.seed = seed
|
||||
h.state = seed
|
||||
h.n = 0
|
||||
}
|
||||
|
||||
// Reset discards all bytes added to h.
|
||||
|
||||
@@ -83,6 +83,29 @@ func TestHashHighBytes(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepeat(t *testing.T) {
|
||||
h1 := new(Hash)
|
||||
h1.WriteString("testing")
|
||||
sum1 := h1.Sum64()
|
||||
|
||||
h1.Reset()
|
||||
h1.WriteString("testing")
|
||||
sum2 := h1.Sum64()
|
||||
|
||||
if sum1 != sum2 {
|
||||
t.Errorf("different sum after reseting: %#x != %#x", sum1, sum2)
|
||||
}
|
||||
|
||||
h2 := new(Hash)
|
||||
h2.SetSeed(h1.Seed())
|
||||
h2.WriteString("testing")
|
||||
sum3 := h2.Sum64()
|
||||
|
||||
if sum1 != sum3 {
|
||||
t.Errorf("different sum on the same seed: %#x != %#x", sum1, sum3)
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure a Hash implements the hash.Hash and hash.Hash64 interfaces.
|
||||
var _ hash.Hash = &Hash{}
|
||||
var _ hash.Hash64 = &Hash{}
|
||||
|
||||
19
src/internal/syscall/execenv/execenv_default.go
Normal file
19
src/internal/syscall/execenv/execenv_default.go
Normal file
@@ -0,0 +1,19 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !windows
|
||||
|
||||
package execenv
|
||||
|
||||
import "syscall"
|
||||
|
||||
// Default will return the default environment
|
||||
// variables based on the process attributes
|
||||
// provided.
|
||||
//
|
||||
// Defaults to syscall.Environ() on all platforms
|
||||
// other than Windows.
|
||||
func Default(sys *syscall.SysProcAttr) ([]string, error) {
|
||||
return syscall.Environ(), nil
|
||||
}
|
||||
@@ -1,8 +1,10 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package os
|
||||
// +build windows
|
||||
|
||||
package execenv
|
||||
|
||||
import (
|
||||
"internal/syscall/windows"
|
||||
@@ -11,9 +13,17 @@ import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func environForSysProcAttr(sys *syscall.SysProcAttr) (env []string, err error) {
|
||||
// Default will return the default environment
|
||||
// variables based on the process attributes
|
||||
// provided.
|
||||
//
|
||||
// If the process attributes contain a token, then
|
||||
// the environment variables will be sourced from
|
||||
// the defaults for that user token, otherwise they
|
||||
// will be sourced from syscall.Environ().
|
||||
func Default(sys *syscall.SysProcAttr) (env []string, err error) {
|
||||
if sys == nil || sys.Token == 0 {
|
||||
return Environ(), nil
|
||||
return syscall.Environ(), nil
|
||||
}
|
||||
var block *uint16
|
||||
err = windows.CreateEnvironmentBlock(&block, sys.Token, false)
|
||||
@@ -1,13 +0,0 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !windows
|
||||
|
||||
package os
|
||||
|
||||
import "syscall"
|
||||
|
||||
func environForSysProcAttr(sys *syscall.SysProcAttr) ([]string, error) {
|
||||
return Environ(), nil
|
||||
}
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"internal/syscall/execenv"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -222,11 +223,11 @@ func interfaceEqual(a, b interface{}) bool {
|
||||
return a == b
|
||||
}
|
||||
|
||||
func (c *Cmd) envv() []string {
|
||||
func (c *Cmd) envv() ([]string, error) {
|
||||
if c.Env != nil {
|
||||
return c.Env
|
||||
return c.Env, nil
|
||||
}
|
||||
return os.Environ()
|
||||
return execenv.Default(c.SysProcAttr)
|
||||
}
|
||||
|
||||
func (c *Cmd) argv() []string {
|
||||
@@ -413,11 +414,15 @@ func (c *Cmd) Start() error {
|
||||
}
|
||||
c.childFiles = append(c.childFiles, c.ExtraFiles...)
|
||||
|
||||
var err error
|
||||
envv, err := c.envv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.Process, err = os.StartProcess(c.Path, c.argv(), &os.ProcAttr{
|
||||
Dir: c.Dir,
|
||||
Files: c.childFiles,
|
||||
Env: addCriticalEnv(dedupEnv(c.envv())),
|
||||
Env: addCriticalEnv(dedupEnv(envv)),
|
||||
Sys: c.SysProcAttr,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
package os
|
||||
|
||||
import (
|
||||
"internal/syscall/execenv"
|
||||
"runtime"
|
||||
"syscall"
|
||||
)
|
||||
@@ -39,7 +40,7 @@ func startProcess(name string, argv []string, attr *ProcAttr) (p *Process, err e
|
||||
Sys: attr.Sys,
|
||||
}
|
||||
if sysattr.Env == nil {
|
||||
sysattr.Env, err = environForSysProcAttr(sysattr.Sys)
|
||||
sysattr.Env, err = execenv.Default(sysattr.Sys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -158,9 +158,19 @@ func nilinterhash(p unsafe.Pointer, h uintptr) uintptr {
|
||||
// is slower but more general and is used for hashing interface types
|
||||
// (called from interhash or nilinterhash, above) or for hashing in
|
||||
// maps generated by reflect.MapOf (reflect_typehash, below).
|
||||
// Note: this function must match the compiler generated
|
||||
// functions exactly. See issue 37716.
|
||||
func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
|
||||
if t.tflag&tflagRegularMemory != 0 {
|
||||
return memhash(p, h, t.size)
|
||||
// Handle ptr sizes specially, see issue 37086.
|
||||
switch t.size {
|
||||
case 4:
|
||||
return memhash32(p, h)
|
||||
case 8:
|
||||
return memhash64(p, h)
|
||||
default:
|
||||
return memhash(p, h, t.size)
|
||||
}
|
||||
}
|
||||
switch t.kind & kindMask {
|
||||
case kindFloat32:
|
||||
@@ -187,12 +197,28 @@ func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
|
||||
return h
|
||||
case kindStruct:
|
||||
s := (*structtype)(unsafe.Pointer(t))
|
||||
memStart := uintptr(0)
|
||||
memEnd := uintptr(0)
|
||||
for _, f := range s.fields {
|
||||
// TODO: maybe we could hash several contiguous fields all at once.
|
||||
if memEnd > memStart && (f.name.isBlank() || f.offset() != memEnd || f.typ.tflag&tflagRegularMemory == 0) {
|
||||
// flush any pending regular memory hashing
|
||||
h = memhash(add(p, memStart), h, memEnd-memStart)
|
||||
memStart = memEnd
|
||||
}
|
||||
if f.name.isBlank() {
|
||||
continue
|
||||
}
|
||||
h = typehash(f.typ, add(p, f.offset()), h)
|
||||
if f.typ.tflag&tflagRegularMemory == 0 {
|
||||
h = typehash(f.typ, add(p, f.offset()), h)
|
||||
continue
|
||||
}
|
||||
if memStart == memEnd {
|
||||
memStart = f.offset()
|
||||
}
|
||||
memEnd = f.offset() + f.typ.size
|
||||
}
|
||||
if memEnd > memStart {
|
||||
h = memhash(add(p, memStart), h, memEnd-memStart)
|
||||
}
|
||||
return h
|
||||
default:
|
||||
|
||||
@@ -8,8 +8,10 @@ import "unsafe"
|
||||
|
||||
func checkptrAlignment(p unsafe.Pointer, elem *_type, n uintptr) {
|
||||
// Check that (*[n]elem)(p) is appropriately aligned.
|
||||
// Note that we allow unaligned pointers if the types they point to contain
|
||||
// no pointers themselves. See issue 37298.
|
||||
// TODO(mdempsky): What about fieldAlign?
|
||||
if uintptr(p)&(uintptr(elem.align)-1) != 0 {
|
||||
if elem.ptrdata != 0 && uintptr(p)&(uintptr(elem.align)-1) != 0 {
|
||||
throw("checkptr: unsafe pointer conversion")
|
||||
}
|
||||
|
||||
|
||||
@@ -24,7 +24,8 @@ func TestCheckPtr(t *testing.T) {
|
||||
cmd string
|
||||
want string
|
||||
}{
|
||||
{"CheckPtrAlignment", "fatal error: checkptr: unsafe pointer conversion\n"},
|
||||
{"CheckPtrAlignmentPtr", "fatal error: checkptr: unsafe pointer conversion\n"},
|
||||
{"CheckPtrAlignmentNoPtr", ""},
|
||||
{"CheckPtrArithmetic", "fatal error: checkptr: unsafe pointer arithmetic\n"},
|
||||
{"CheckPtrSize", "fatal error: checkptr: unsafe pointer conversion\n"},
|
||||
{"CheckPtrSmall", "fatal error: checkptr: unsafe pointer arithmetic\n"},
|
||||
@@ -38,6 +39,12 @@ func TestCheckPtr(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
if tc.want == "" {
|
||||
if len(got) > 0 {
|
||||
t.Errorf("output:\n%s\nwant no output", got)
|
||||
}
|
||||
return
|
||||
}
|
||||
if !strings.HasPrefix(string(got), tc.want) {
|
||||
t.Errorf("output:\n%s\n\nwant output starting with: %s", got, tc.want)
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
|
||||
// Offsets into internal/cpu records for use in assembly.
|
||||
const (
|
||||
offsetX86HasAVX = unsafe.Offsetof(cpu.X86.HasAVX)
|
||||
offsetX86HasAVX2 = unsafe.Offsetof(cpu.X86.HasAVX2)
|
||||
offsetX86HasERMS = unsafe.Offsetof(cpu.X86.HasERMS)
|
||||
offsetX86HasSSE2 = unsafe.Offsetof(cpu.X86.HasSSE2)
|
||||
|
||||
@@ -6,6 +6,7 @@ package runtime_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"testing"
|
||||
@@ -281,3 +282,122 @@ func TestDeferForFuncWithNoExit(t *testing.T) {
|
||||
for {
|
||||
}
|
||||
}
|
||||
|
||||
// Test case approximating issue #37664, where a recursive function (interpreter)
|
||||
// may do repeated recovers/re-panics until it reaches the frame where the panic
|
||||
// can actually be handled. The recurseFnPanicRec() function is testing that there
|
||||
// are no stale defer structs on the defer chain after the interpreter() sequence,
|
||||
// by writing a bunch of 0xffffffffs into several recursive stack frames, and then
|
||||
// doing a single panic-recover which would invoke any such stale defer structs.
|
||||
func TestDeferWithRepeatedRepanics(t *testing.T) {
|
||||
interpreter(0, 6, 2)
|
||||
recurseFnPanicRec(0, 10)
|
||||
interpreter(0, 5, 1)
|
||||
recurseFnPanicRec(0, 10)
|
||||
interpreter(0, 6, 3)
|
||||
recurseFnPanicRec(0, 10)
|
||||
}
|
||||
|
||||
func interpreter(level int, maxlevel int, rec int) {
|
||||
defer func() {
|
||||
e := recover()
|
||||
if e == nil {
|
||||
return
|
||||
}
|
||||
if level != e.(int) {
|
||||
//fmt.Fprintln(os.Stderr, "re-panicing, level", level)
|
||||
panic(e)
|
||||
}
|
||||
//fmt.Fprintln(os.Stderr, "Recovered, level", level)
|
||||
}()
|
||||
if level+1 < maxlevel {
|
||||
interpreter(level+1, maxlevel, rec)
|
||||
} else {
|
||||
//fmt.Fprintln(os.Stderr, "Initiating panic")
|
||||
panic(rec)
|
||||
}
|
||||
}
|
||||
|
||||
func recurseFnPanicRec(level int, maxlevel int) {
|
||||
defer func() {
|
||||
recover()
|
||||
}()
|
||||
recurseFn(level, maxlevel)
|
||||
}
|
||||
|
||||
func recurseFn(level int, maxlevel int) {
|
||||
a := [40]uint32{0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff}
|
||||
if level+1 < maxlevel {
|
||||
// Need this print statement to keep a around. '_ = a[4]' doesn't do it.
|
||||
fmt.Fprintln(os.Stderr, "recurseFn", level, a[4])
|
||||
recurseFn(level+1, maxlevel)
|
||||
} else {
|
||||
panic("recurseFn panic")
|
||||
}
|
||||
}
|
||||
|
||||
// Try to reproduce issue #37688, where a pointer to an open-coded defer struct is
|
||||
// mistakenly held, and that struct keeps a pointer to a stack-allocated defer
|
||||
// struct, and that stack-allocated struct gets overwritten or the stack gets
|
||||
// moved, so a memory error happens on GC.
|
||||
func TestIssue37688(t *testing.T) {
|
||||
for j := 0; j < 10; j++ {
|
||||
g2()
|
||||
g3()
|
||||
}
|
||||
}
|
||||
|
||||
type foo struct {
|
||||
}
|
||||
|
||||
func (f *foo) method1() {
|
||||
fmt.Fprintln(os.Stderr, "method1")
|
||||
}
|
||||
|
||||
func (f *foo) method2() {
|
||||
fmt.Fprintln(os.Stderr, "method2")
|
||||
}
|
||||
|
||||
func g2() {
|
||||
var a foo
|
||||
ap := &a
|
||||
// The loop forces this defer to be heap-allocated and the remaining two
|
||||
// to be stack-allocated.
|
||||
for i := 0; i < 1; i++ {
|
||||
defer ap.method1()
|
||||
}
|
||||
defer ap.method2()
|
||||
defer ap.method1()
|
||||
ff1(ap, 1, 2, 3, 4, 5, 6, 7, 8, 9)
|
||||
// Try to get the stack to be be moved by growing it too large, so
|
||||
// existing stack-allocated defer becomes invalid.
|
||||
rec1(2000)
|
||||
}
|
||||
|
||||
func g3() {
|
||||
// Mix up the stack layout by adding in an extra function frame
|
||||
g2()
|
||||
}
|
||||
|
||||
func ff1(ap *foo, a, b, c, d, e, f, g, h, i int) {
|
||||
defer ap.method1()
|
||||
|
||||
// Make a defer that has a very large set of args, hence big size for the
|
||||
// defer record for the open-coded frame (which means it won't use the
|
||||
// defer pool)
|
||||
defer func(ap *foo, a, b, c, d, e, f, g, h, i int) {
|
||||
if v := recover(); v != nil {
|
||||
fmt.Fprintln(os.Stderr, "did recover")
|
||||
}
|
||||
fmt.Fprintln(os.Stderr, "debug", ap, a, b, c, d, e, f, g, h)
|
||||
}(ap, a, b, c, d, e, f, g, h, i)
|
||||
panic("ff1 panic")
|
||||
}
|
||||
|
||||
func rec1(max int) {
|
||||
if max > 0 {
|
||||
rec1(max - 1)
|
||||
} else {
|
||||
fmt.Fprintln(os.Stderr, "finished recursion", max)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -950,3 +950,28 @@ func SemNwait(addr *uint32) uint32 {
|
||||
root := semroot(addr)
|
||||
return atomic.Load(&root.nwait)
|
||||
}
|
||||
|
||||
// MapHashCheck computes the hash of the key k for the map m, twice.
|
||||
// Method 1 uses the built-in hasher for the map.
|
||||
// Method 2 uses the typehash function (the one used by reflect).
|
||||
// Returns the two hash values, which should always be equal.
|
||||
func MapHashCheck(m interface{}, k interface{}) (uintptr, uintptr) {
|
||||
// Unpack m.
|
||||
mt := (*maptype)(unsafe.Pointer(efaceOf(&m)._type))
|
||||
mh := (*hmap)(efaceOf(&m).data)
|
||||
|
||||
// Unpack k.
|
||||
kt := efaceOf(&k)._type
|
||||
var p unsafe.Pointer
|
||||
if isDirectIface(kt) {
|
||||
q := efaceOf(&k).data
|
||||
p = unsafe.Pointer(&q)
|
||||
} else {
|
||||
p = efaceOf(&k).data
|
||||
}
|
||||
|
||||
// Compute the hash functions.
|
||||
x := mt.hasher(noescape(p), uintptr(mh.hash0))
|
||||
y := typehash(kt, noescape(p), uintptr(mh.hash0))
|
||||
return x, y
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"fmt"
|
||||
"math"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
. "runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -48,6 +49,54 @@ func TestMemHash64Equality(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompilerVsRuntimeHash(t *testing.T) {
|
||||
// Test to make sure the compiler's hash function and the runtime's hash function agree.
|
||||
// See issue 37716.
|
||||
for _, m := range []interface{}{
|
||||
map[bool]int{},
|
||||
map[int8]int{},
|
||||
map[uint8]int{},
|
||||
map[int16]int{},
|
||||
map[uint16]int{},
|
||||
map[int32]int{},
|
||||
map[uint32]int{},
|
||||
map[int64]int{},
|
||||
map[uint64]int{},
|
||||
map[int]int{},
|
||||
map[uint]int{},
|
||||
map[uintptr]int{},
|
||||
map[*byte]int{},
|
||||
map[chan int]int{},
|
||||
map[unsafe.Pointer]int{},
|
||||
map[float32]int{},
|
||||
map[float64]int{},
|
||||
map[complex64]int{},
|
||||
map[complex128]int{},
|
||||
map[string]int{},
|
||||
//map[interface{}]int{},
|
||||
//map[interface{F()}]int{},
|
||||
map[[8]uint64]int{},
|
||||
map[[8]string]int{},
|
||||
map[struct{ a, b, c, d int32 }]int{}, // Note: tests AMEM128
|
||||
map[struct{ a, b, _, d int32 }]int{},
|
||||
map[struct {
|
||||
a, b int32
|
||||
c float32
|
||||
d, e [8]byte
|
||||
}]int{},
|
||||
map[struct {
|
||||
a int16
|
||||
b int64
|
||||
}]int{},
|
||||
} {
|
||||
k := reflect.New(reflect.TypeOf(m).Key()).Elem().Interface() // the zero key
|
||||
x, y := MapHashCheck(m, k)
|
||||
if x != y {
|
||||
t.Errorf("hashes did not match (%x vs %x) for map %T", x, y, m)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Smhasher is a torture test for hash functions.
|
||||
// https://code.google.com/p/smhasher/
|
||||
// This code is a port of some of the Smhasher tests to Go.
|
||||
|
||||
@@ -1921,7 +1921,11 @@ Run:
|
||||
// The bitmask starts at s.startAddr.
|
||||
// The result must be deallocated with dematerializeGCProg.
|
||||
func materializeGCProg(ptrdata uintptr, prog *byte) *mspan {
|
||||
s := mheap_.allocManual((ptrdata/(8*sys.PtrSize)+pageSize-1)/pageSize, &memstats.gc_sys)
|
||||
// Each word of ptrdata needs one bit in the bitmap.
|
||||
bitmapBytes := divRoundUp(ptrdata, 8*sys.PtrSize)
|
||||
// Compute the number of pages needed for bitmapBytes.
|
||||
pages := divRoundUp(bitmapBytes, pageSize)
|
||||
s := mheap_.allocManual(pages, &memstats.gc_sys)
|
||||
runGCProg(addb(prog, 4), nil, (*byte)(unsafe.Pointer(s.startAddr)), 1)
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -769,32 +769,40 @@ func gcSetTriggerRatio(triggerRatio float64) {
|
||||
goal = memstats.heap_marked + memstats.heap_marked*uint64(gcpercent)/100
|
||||
}
|
||||
|
||||
// If we let triggerRatio go too low, then if the application
|
||||
// is allocating very rapidly we might end up in a situation
|
||||
// where we're allocating black during a nearly always-on GC.
|
||||
// The result of this is a growing heap and ultimately an
|
||||
// increase in RSS. By capping us at a point >0, we're essentially
|
||||
// saying that we're OK using more CPU during the GC to prevent
|
||||
// this growth in RSS.
|
||||
//
|
||||
// The current constant was chosen empirically: given a sufficiently
|
||||
// fast/scalable allocator with 48 Ps that could drive the trigger ratio
|
||||
// to <0.05, this constant causes applications to retain the same peak
|
||||
// RSS compared to not having this allocator.
|
||||
const minTriggerRatio = 0.6
|
||||
|
||||
// Set the trigger ratio, capped to reasonable bounds.
|
||||
if triggerRatio < minTriggerRatio {
|
||||
// This can happen if the mutator is allocating very
|
||||
// quickly or the GC is scanning very slowly.
|
||||
triggerRatio = minTriggerRatio
|
||||
} else if gcpercent >= 0 {
|
||||
if gcpercent >= 0 {
|
||||
scalingFactor := float64(gcpercent) / 100
|
||||
// Ensure there's always a little margin so that the
|
||||
// mutator assist ratio isn't infinity.
|
||||
maxTriggerRatio := 0.95 * float64(gcpercent) / 100
|
||||
maxTriggerRatio := 0.95 * scalingFactor
|
||||
if triggerRatio > maxTriggerRatio {
|
||||
triggerRatio = maxTriggerRatio
|
||||
}
|
||||
|
||||
// If we let triggerRatio go too low, then if the application
|
||||
// is allocating very rapidly we might end up in a situation
|
||||
// where we're allocating black during a nearly always-on GC.
|
||||
// The result of this is a growing heap and ultimately an
|
||||
// increase in RSS. By capping us at a point >0, we're essentially
|
||||
// saying that we're OK using more CPU during the GC to prevent
|
||||
// this growth in RSS.
|
||||
//
|
||||
// The current constant was chosen empirically: given a sufficiently
|
||||
// fast/scalable allocator with 48 Ps that could drive the trigger ratio
|
||||
// to <0.05, this constant causes applications to retain the same peak
|
||||
// RSS compared to not having this allocator.
|
||||
minTriggerRatio := 0.6 * scalingFactor
|
||||
if triggerRatio < minTriggerRatio {
|
||||
triggerRatio = minTriggerRatio
|
||||
}
|
||||
} else if triggerRatio < 0 {
|
||||
// gcpercent < 0, so just make sure we're not getting a negative
|
||||
// triggerRatio. This case isn't expected to happen in practice,
|
||||
// and doesn't really matter because if gcpercent < 0 then we won't
|
||||
// ever consume triggerRatio further on in this function, but let's
|
||||
// just be defensive here; the triggerRatio being negative is almost
|
||||
// certainly undesirable.
|
||||
triggerRatio = 0
|
||||
}
|
||||
memstats.triggerRatio = triggerRatio
|
||||
|
||||
|
||||
@@ -244,15 +244,6 @@ func genAMD64() {
|
||||
|
||||
// TODO: MXCSR register?
|
||||
|
||||
// Apparently, the signal handling code path in darwin kernel leaves
|
||||
// the upper bits of Y registers in a dirty state, which causes
|
||||
// many SSE operations (128-bit and narrower) become much slower.
|
||||
// Clear the upper bits to get to a clean state. See issue #37174.
|
||||
// It is safe here as Go code don't use the upper bits of Y registers.
|
||||
p("#ifdef GOOS_darwin")
|
||||
p("VZEROUPPER")
|
||||
p("#endif")
|
||||
|
||||
p("PUSHQ BP")
|
||||
p("MOVQ SP, BP")
|
||||
p("// Save flags before clobbering them")
|
||||
@@ -261,6 +252,18 @@ func genAMD64() {
|
||||
p("ADJSP $%d", l.stack)
|
||||
p("// But vet doesn't know ADJSP, so suppress vet stack checking")
|
||||
p("NOP SP")
|
||||
|
||||
// Apparently, the signal handling code path in darwin kernel leaves
|
||||
// the upper bits of Y registers in a dirty state, which causes
|
||||
// many SSE operations (128-bit and narrower) become much slower.
|
||||
// Clear the upper bits to get to a clean state. See issue #37174.
|
||||
// It is safe here as Go code don't use the upper bits of Y registers.
|
||||
p("#ifdef GOOS_darwin")
|
||||
p("CMPB internal∕cpu·X86+const_offsetX86HasAVX(SB), $0")
|
||||
p("JE 2(PC)")
|
||||
p("VZEROUPPER")
|
||||
p("#endif")
|
||||
|
||||
l.save()
|
||||
p("CALL ·asyncPreempt2(SB)")
|
||||
l.restore()
|
||||
@@ -379,6 +382,7 @@ func genMIPS(_64bit bool) {
|
||||
sub := "SUB"
|
||||
r28 := "R28"
|
||||
regsize := 4
|
||||
softfloat := "GOMIPS_softfloat"
|
||||
if _64bit {
|
||||
mov = "MOVV"
|
||||
movf = "MOVD"
|
||||
@@ -386,6 +390,7 @@ func genMIPS(_64bit bool) {
|
||||
sub = "SUBV"
|
||||
r28 = "RSB"
|
||||
regsize = 8
|
||||
softfloat = "GOMIPS64_softfloat"
|
||||
}
|
||||
|
||||
// Add integer registers R1-R22, R24-R25, R28
|
||||
@@ -408,28 +413,36 @@ func genMIPS(_64bit bool) {
|
||||
mov+" LO, R1\n"+mov+" R1, %d(R29)",
|
||||
mov+" %d(R29), R1\n"+mov+" R1, LO",
|
||||
regsize)
|
||||
|
||||
// Add floating point control/status register FCR31 (FCR0-FCR30 are irrelevant)
|
||||
l.addSpecial(
|
||||
var lfp = layout{sp: "R29", stack: l.stack}
|
||||
lfp.addSpecial(
|
||||
mov+" FCR31, R1\n"+mov+" R1, %d(R29)",
|
||||
mov+" %d(R29), R1\n"+mov+" R1, FCR31",
|
||||
regsize)
|
||||
// Add floating point registers F0-F31.
|
||||
for i := 0; i <= 31; i++ {
|
||||
reg := fmt.Sprintf("F%d", i)
|
||||
l.add(movf, reg, regsize)
|
||||
lfp.add(movf, reg, regsize)
|
||||
}
|
||||
|
||||
// allocate frame, save PC of interrupted instruction (in LR)
|
||||
p(mov+" R31, -%d(R29)", l.stack)
|
||||
p(sub+" $%d, R29", l.stack)
|
||||
p(mov+" R31, -%d(R29)", lfp.stack)
|
||||
p(sub+" $%d, R29", lfp.stack)
|
||||
|
||||
l.save()
|
||||
p("#ifndef %s", softfloat)
|
||||
lfp.save()
|
||||
p("#endif")
|
||||
p("CALL ·asyncPreempt2(SB)")
|
||||
p("#ifndef %s", softfloat)
|
||||
lfp.restore()
|
||||
p("#endif")
|
||||
l.restore()
|
||||
|
||||
p(mov+" %d(R29), R31", l.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it
|
||||
p(mov + " (R29), R23") // load PC to REGTMP
|
||||
p(add+" $%d, R29", l.stack+regsize) // pop frame (including the space pushed by sigctxt.pushCall)
|
||||
p(mov+" %d(R29), R31", lfp.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it
|
||||
p(mov + " (R29), R23") // load PC to REGTMP
|
||||
p(add+" $%d, R29", lfp.stack+regsize) // pop frame (including the space pushed by sigctxt.pushCall)
|
||||
p("JMP (R23)")
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"runtime/internal/atomic"
|
||||
"runtime/internal/sys"
|
||||
"unsafe"
|
||||
)
|
||||
@@ -479,7 +480,21 @@ func rt_sigaction(sig uintptr, new, old *sigactiont, size uintptr) int32
|
||||
func getpid() int
|
||||
func tgkill(tgid, tid, sig int)
|
||||
|
||||
// touchStackBeforeSignal stores an errno value. If non-zero, it means
|
||||
// that we should touch the signal stack before sending a signal.
|
||||
// This is used on systems that have a bug when the signal stack must
|
||||
// be faulted in. See #35777 and #37436.
|
||||
//
|
||||
// This is accessed atomically as it is set and read in different threads.
|
||||
//
|
||||
// TODO(austin): Remove this after Go 1.15 when we remove the
|
||||
// mlockGsignal workaround.
|
||||
var touchStackBeforeSignal uint32
|
||||
|
||||
// signalM sends a signal to mp.
|
||||
func signalM(mp *m, sig int) {
|
||||
if atomic.Load(&touchStackBeforeSignal) != 0 {
|
||||
atomic.Cas((*uint32)(unsafe.Pointer(mp.gsignal.stack.hi-4)), 0, 0)
|
||||
}
|
||||
tgkill(getpid(), int(mp.procid), sig)
|
||||
}
|
||||
|
||||
@@ -7,6 +7,8 @@
|
||||
|
||||
package runtime
|
||||
|
||||
import "runtime/internal/atomic"
|
||||
|
||||
//go:noescape
|
||||
func uname(utsname *new_utsname) int
|
||||
|
||||
@@ -58,17 +60,34 @@ func osArchInit() {
|
||||
if m0.gsignal != nil {
|
||||
throw("gsignal quirk too late")
|
||||
}
|
||||
throwReportQuirk = throwBadKernel
|
||||
}
|
||||
}
|
||||
|
||||
func mlockGsignal(gsignal *g) {
|
||||
if err := mlock(gsignal.stack.hi-physPageSize, physPageSize); err < 0 {
|
||||
printlock()
|
||||
println("runtime: mlock of signal stack failed:", -err)
|
||||
if err == -_ENOMEM {
|
||||
println("runtime: increase the mlock limit (ulimit -l) or")
|
||||
}
|
||||
println("runtime: update your kernel to 5.3.15+, 5.4.2+, or 5.5+")
|
||||
throw("mlock failed")
|
||||
if atomic.Load(&touchStackBeforeSignal) != 0 {
|
||||
// mlock has already failed, don't try again.
|
||||
return
|
||||
}
|
||||
|
||||
// This mlock call may fail, but we don't report the failure.
|
||||
// Instead, if something goes badly wrong, we rely on prepareSignalM
|
||||
// and throwBadKernel to do further mitigation and to report a problem
|
||||
// to the user if mitigation fails. This is because many
|
||||
// systems have a limit on the total mlock size, and many kernels
|
||||
// that appear to have bad versions are actually patched to avoid the
|
||||
// bug described above. We want Go 1.14 to run on those systems.
|
||||
// See #37436.
|
||||
if errno := mlock(gsignal.stack.hi-physPageSize, physPageSize); errno < 0 {
|
||||
atomic.Store(&touchStackBeforeSignal, uint32(-errno))
|
||||
}
|
||||
}
|
||||
|
||||
// throwBadKernel is called, via throwReportQuirk, by throw.
|
||||
func throwBadKernel() {
|
||||
if errno := atomic.Load(&touchStackBeforeSignal); errno != 0 {
|
||||
println("runtime: note: your Linux kernel may be buggy")
|
||||
println("runtime: note: see https://golang.org/wiki/LinuxKernelSignalVectorBug")
|
||||
println("runtime: note: mlock workaround for kernel bug failed with errno", errno)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -294,9 +294,7 @@ func loadOptionalSyscalls() {
|
||||
|
||||
func monitorSuspendResume() {
|
||||
const (
|
||||
_DEVICE_NOTIFY_CALLBACK = 2
|
||||
_ERROR_FILE_NOT_FOUND = 2
|
||||
_ERROR_INVALID_PARAMETERS = 87
|
||||
_DEVICE_NOTIFY_CALLBACK = 2
|
||||
)
|
||||
type _DEVICE_NOTIFY_SUBSCRIBE_PARAMETERS struct {
|
||||
callback uintptr
|
||||
@@ -323,25 +321,8 @@ func monitorSuspendResume() {
|
||||
callback: compileCallback(*efaceOf(&fn), true),
|
||||
}
|
||||
handle := uintptr(0)
|
||||
ret := stdcall3(powerRegisterSuspendResumeNotification, _DEVICE_NOTIFY_CALLBACK,
|
||||
stdcall3(powerRegisterSuspendResumeNotification, _DEVICE_NOTIFY_CALLBACK,
|
||||
uintptr(unsafe.Pointer(¶ms)), uintptr(unsafe.Pointer(&handle)))
|
||||
// This function doesn't use GetLastError(), so we use the return value directly.
|
||||
switch ret {
|
||||
case 0:
|
||||
return // Successful, nothing more to do.
|
||||
case _ERROR_FILE_NOT_FOUND:
|
||||
// Systems without access to the suspend/resume notifier
|
||||
// also have their clock on "program time", and therefore
|
||||
// don't want or need this anyway.
|
||||
return
|
||||
case _ERROR_INVALID_PARAMETERS:
|
||||
// This is seen when running in Windows Docker.
|
||||
// See issue 36557.
|
||||
return
|
||||
default:
|
||||
println("runtime: PowerRegisterSuspendResumeNotification failed with errno=", ret)
|
||||
throw("runtime: PowerRegisterSuspendResumeNotification failure")
|
||||
}
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
|
||||
@@ -216,7 +216,8 @@ func panicmem() {
|
||||
// The compiler turns a defer statement into a call to this.
|
||||
//go:nosplit
|
||||
func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
|
||||
if getg().m.curg != getg() {
|
||||
gp := getg()
|
||||
if gp.m.curg != gp {
|
||||
// go code on the system stack can't defer
|
||||
throw("defer on system stack")
|
||||
}
|
||||
@@ -234,6 +235,8 @@ func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
|
||||
if d._panic != nil {
|
||||
throw("deferproc: d.panic != nil after newdefer")
|
||||
}
|
||||
d.link = gp._defer
|
||||
gp._defer = d
|
||||
d.fn = fn
|
||||
d.pc = callerpc
|
||||
d.sp = sp
|
||||
@@ -374,7 +377,8 @@ func init() {
|
||||
}
|
||||
|
||||
// Allocate a Defer, usually using per-P pool.
|
||||
// Each defer must be released with freedefer.
|
||||
// Each defer must be released with freedefer. The defer is not
|
||||
// added to any defer chain yet.
|
||||
//
|
||||
// This must not grow the stack because there may be a frame without
|
||||
// stack map information when this is called.
|
||||
@@ -424,8 +428,6 @@ func newdefer(siz int32) *_defer {
|
||||
}
|
||||
d.siz = siz
|
||||
d.heap = true
|
||||
d.link = gp._defer
|
||||
gp._defer = d
|
||||
return d
|
||||
}
|
||||
|
||||
@@ -1003,11 +1005,12 @@ func gopanic(e interface{}) {
|
||||
atomic.Xadd(&runningPanicDefers, -1)
|
||||
|
||||
if done {
|
||||
// Remove any remaining non-started, open-coded defer
|
||||
// entry after a recover (there's at most one, if we just
|
||||
// ran a non-open-coded defer), since the entry will
|
||||
// become out-dated and the defer will be executed
|
||||
// normally.
|
||||
// Remove any remaining non-started, open-coded
|
||||
// defer entries after a recover, since the
|
||||
// corresponding defers will be executed normally
|
||||
// (inline). Any such entry will become stale once
|
||||
// we run the corresponding defers inline and exit
|
||||
// the associated stack frame.
|
||||
d := gp._defer
|
||||
var prev *_defer
|
||||
for d != nil {
|
||||
@@ -1025,8 +1028,9 @@ func gopanic(e interface{}) {
|
||||
} else {
|
||||
prev.link = d.link
|
||||
}
|
||||
newd := d.link
|
||||
freedefer(d)
|
||||
break
|
||||
d = newd
|
||||
} else {
|
||||
prev = d
|
||||
d = d.link
|
||||
@@ -1279,6 +1283,12 @@ func startpanic_m() bool {
|
||||
}
|
||||
}
|
||||
|
||||
// throwReportQuirk, if non-nil, is called by throw after dumping the stacks.
|
||||
//
|
||||
// TODO(austin): Remove this after Go 1.15 when we remove the
|
||||
// mlockGsignal workaround.
|
||||
var throwReportQuirk func()
|
||||
|
||||
var didothers bool
|
||||
var deadlock mutex
|
||||
|
||||
@@ -1325,6 +1335,10 @@ func dopanic_m(gp *g, pc, sp uintptr) bool {
|
||||
|
||||
printDebugLog()
|
||||
|
||||
if throwReportQuirk != nil {
|
||||
throwReportQuirk()
|
||||
}
|
||||
|
||||
return docrash
|
||||
}
|
||||
|
||||
|
||||
@@ -68,7 +68,8 @@ Search:
|
||||
if len(m.freeStk) < len(stk) {
|
||||
m.freeStk = make([]uintptr, 1024)
|
||||
}
|
||||
e.stk = m.freeStk[:len(stk)]
|
||||
// Limit cap to prevent append from clobbering freeStk.
|
||||
e.stk = m.freeStk[:len(stk):len(stk)]
|
||||
m.freeStk = m.freeStk[len(stk):]
|
||||
|
||||
for j := range stk {
|
||||
|
||||
@@ -1172,16 +1172,37 @@ func TestTryAdd(t *testing.T) {
|
||||
{Value: []int64{20, 20 * period}, Location: []*profile.Location{{ID: 1}}},
|
||||
},
|
||||
}, {
|
||||
name: "recursive_inlined_funcs",
|
||||
name: "bug38096",
|
||||
input: []uint64{
|
||||
3, 0, 500, // hz = 500. Must match the period.
|
||||
// count (data[2]) == 0 && len(stk) == 1 is an overflow
|
||||
// entry. The "stk" entry is actually the count.
|
||||
4, 0, 0, 4242,
|
||||
},
|
||||
wantLocs: [][]string{{"runtime/pprof.lostProfileEvent"}},
|
||||
wantSamples: []*profile.Sample{
|
||||
{Value: []int64{4242, 4242 * period}, Location: []*profile.Location{{ID: 1}}},
|
||||
},
|
||||
}, {
|
||||
// If a function is called recursively then it must not be
|
||||
// inlined in the caller.
|
||||
//
|
||||
// N.B. We're generating an impossible profile here, with a
|
||||
// recursive inlineCallee call. This is simulating a non-Go
|
||||
// function that looks like an inlined Go function other than
|
||||
// its recursive property. See pcDeck.tryAdd.
|
||||
name: "recursive_func_is_not_inlined",
|
||||
input: []uint64{
|
||||
3, 0, 500, // hz = 500. Must match the period.
|
||||
5, 0, 30, inlinedCalleePtr, inlinedCalleePtr,
|
||||
4, 0, 40, inlinedCalleePtr,
|
||||
},
|
||||
wantLocs: [][]string{{"runtime/pprof.inlinedCallee"}},
|
||||
// inlinedCaller shows up here because
|
||||
// runtime_expandFinalInlineFrame adds it to the stack frame.
|
||||
wantLocs: [][]string{{"runtime/pprof.inlinedCallee"}, {"runtime/pprof.inlinedCaller"}},
|
||||
wantSamples: []*profile.Sample{
|
||||
{Value: []int64{30, 30 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}}},
|
||||
{Value: []int64{40, 40 * period}, Location: []*profile.Location{{ID: 1}}},
|
||||
{Value: []int64{30, 30 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}, {ID: 2}}},
|
||||
{Value: []int64{40, 40 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}}},
|
||||
},
|
||||
}, {
|
||||
name: "truncated_stack_trace_later",
|
||||
@@ -1202,12 +1223,36 @@ func TestTryAdd(t *testing.T) {
|
||||
4, 0, 70, inlinedCalleePtr,
|
||||
5, 0, 80, inlinedCalleePtr, inlinedCallerPtr,
|
||||
},
|
||||
wantLocs: [][]string{ // the inline info is screwed up, but better than a crash.
|
||||
{"runtime/pprof.inlinedCallee"},
|
||||
wantLocs: [][]string{{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}},
|
||||
wantSamples: []*profile.Sample{
|
||||
{Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
|
||||
{Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 1}}},
|
||||
},
|
||||
}, {
|
||||
// We can recover the inlined caller from a truncated stack.
|
||||
name: "truncated_stack_trace_only",
|
||||
input: []uint64{
|
||||
3, 0, 500, // hz = 500. Must match the period.
|
||||
4, 0, 70, inlinedCalleePtr,
|
||||
},
|
||||
wantLocs: [][]string{{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}},
|
||||
wantSamples: []*profile.Sample{
|
||||
{Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
|
||||
},
|
||||
}, {
|
||||
// The same location is used for duplicated stacks.
|
||||
name: "truncated_stack_trace_twice",
|
||||
input: []uint64{
|
||||
3, 0, 500, // hz = 500. Must match the period.
|
||||
4, 0, 70, inlinedCalleePtr,
|
||||
5, 0, 80, inlinedCallerPtr, inlinedCalleePtr,
|
||||
},
|
||||
wantLocs: [][]string{
|
||||
{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"},
|
||||
{"runtime/pprof.inlinedCaller"}},
|
||||
wantSamples: []*profile.Sample{
|
||||
{Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
|
||||
{Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}}},
|
||||
{Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 2}, {ID: 1}}},
|
||||
},
|
||||
}}
|
||||
|
||||
|
||||
@@ -322,7 +322,10 @@ func (b *profileBuilder) addCPUData(data []uint64, tags []unsafe.Pointer) error
|
||||
// overflow record
|
||||
count = uint64(stk[0])
|
||||
stk = []uint64{
|
||||
uint64(funcPC(lostProfileEvent)),
|
||||
// gentraceback guarantees that PCs in the
|
||||
// stack can be unconditionally decremented and
|
||||
// still be valid, so we must do the same.
|
||||
uint64(funcPC(lostProfileEvent)+1),
|
||||
}
|
||||
}
|
||||
b.m.lookup(stk, tag).count += int64(count)
|
||||
@@ -384,6 +387,10 @@ func (b *profileBuilder) build() {
|
||||
// It may emit to b.pb, so there must be no message encoding in progress.
|
||||
func (b *profileBuilder) appendLocsForStack(locs []uint64, stk []uintptr) (newLocs []uint64) {
|
||||
b.deck.reset()
|
||||
|
||||
// The last frame might be truncated. Recover lost inline frames.
|
||||
stk = runtime_expandFinalInlineFrame(stk)
|
||||
|
||||
for len(stk) > 0 {
|
||||
addr := stk[0]
|
||||
if l, ok := b.locs[addr]; ok {
|
||||
@@ -395,22 +402,12 @@ func (b *profileBuilder) appendLocsForStack(locs []uint64, stk []uintptr) (newLo
|
||||
// then, record the cached location.
|
||||
locs = append(locs, l.id)
|
||||
|
||||
// The stk may be truncated due to the stack depth limit
|
||||
// (e.g. See maxStack and maxCPUProfStack in runtime) or
|
||||
// bugs in runtime. Avoid the crash in either case.
|
||||
// TODO(hyangah): The correct fix may require using the exact
|
||||
// pcs as the key for b.locs cache management instead of just
|
||||
// relying on the very first pc. We are late in the go1.14 dev
|
||||
// cycle, so this is a workaround with little code change.
|
||||
if len(l.pcs) > len(stk) {
|
||||
stk = nil
|
||||
// TODO(hyangah): would be nice if we can enable
|
||||
// debug print out on demand and report the problematic
|
||||
// cached location entry and stack traces. Do we already
|
||||
// have such facility to utilize (e.g. GODEBUG)?
|
||||
} else {
|
||||
stk = stk[len(l.pcs):] // skip the matching pcs.
|
||||
}
|
||||
// Skip the matching pcs.
|
||||
//
|
||||
// Even if stk was truncated due to the stack depth
|
||||
// limit, expandFinalInlineFrame above has already
|
||||
// fixed the truncation, ensuring it is long enough.
|
||||
stk = stk[len(l.pcs):]
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -427,9 +424,9 @@ func (b *profileBuilder) appendLocsForStack(locs []uint64, stk []uintptr) (newLo
|
||||
stk = stk[1:]
|
||||
continue
|
||||
}
|
||||
// add failed because this addr is not inlined with
|
||||
// the existing PCs in the deck. Flush the deck and retry to
|
||||
// handle this pc.
|
||||
// add failed because this addr is not inlined with the
|
||||
// existing PCs in the deck. Flush the deck and retry handling
|
||||
// this pc.
|
||||
if id := b.emitLocation(); id > 0 {
|
||||
locs = append(locs, id)
|
||||
}
|
||||
@@ -463,8 +460,8 @@ func (b *profileBuilder) appendLocsForStack(locs []uint64, stk []uintptr) (newLo
|
||||
// the fake pcs and restore the inlined and entry functions. Inlined functions
|
||||
// have the following properties:
|
||||
// Frame's Func is nil (note: also true for non-Go functions), and
|
||||
// Frame's Entry matches its entry function frame's Entry. (note: could also be true for recursive calls and non-Go functions),
|
||||
// Frame's Name does not match its entry function frame's name.
|
||||
// Frame's Entry matches its entry function frame's Entry (note: could also be true for recursive calls and non-Go functions), and
|
||||
// Frame's Name does not match its entry function frame's name (note: inlined functions cannot be recursive).
|
||||
//
|
||||
// As reading and processing the pcs in a stack trace one by one (from leaf to the root),
|
||||
// we use pcDeck to temporarily hold the observed pcs and their expanded frames
|
||||
@@ -486,8 +483,8 @@ func (d *pcDeck) reset() {
|
||||
// to the deck. If it fails the caller needs to flush the deck and retry.
|
||||
func (d *pcDeck) tryAdd(pc uintptr, frames []runtime.Frame, symbolizeResult symbolizeFlag) (success bool) {
|
||||
if existing := len(d.pcs); existing > 0 {
|
||||
// 'frames' are all expanded from one 'pc' and represent all inlined functions
|
||||
// so we check only the last one.
|
||||
// 'd.frames' are all expanded from one 'pc' and represent all
|
||||
// inlined functions so we check only the last one.
|
||||
newFrame := frames[0]
|
||||
last := d.frames[existing-1]
|
||||
if last.Func != nil { // the last frame can't be inlined. Flush.
|
||||
|
||||
@@ -422,3 +422,16 @@ func TestFakeMapping(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure the profiler can handle an empty stack trace.
|
||||
// See issue 37967.
|
||||
func TestEmptyStack(t *testing.T) {
|
||||
b := []uint64{
|
||||
3, 0, 500, // hz = 500
|
||||
3, 0, 10, // 10 samples with an empty stack trace
|
||||
}
|
||||
_, err := translateCPUProfile(b)
|
||||
if err != nil {
|
||||
t.Fatalf("translating profile: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,6 +9,9 @@ import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// runtime_expandFinalInlineFrame is defined in runtime/symtab.go.
|
||||
func runtime_expandFinalInlineFrame(stk []uintptr) []uintptr
|
||||
|
||||
// runtime_setProfLabel is defined in runtime/proflabel.go.
|
||||
func runtime_setProfLabel(labels unsafe.Pointer)
|
||||
|
||||
|
||||
@@ -4,9 +4,6 @@
|
||||
#include "textflag.h"
|
||||
|
||||
TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
|
||||
#ifdef GOOS_darwin
|
||||
VZEROUPPER
|
||||
#endif
|
||||
PUSHQ BP
|
||||
MOVQ SP, BP
|
||||
// Save flags before clobbering them
|
||||
@@ -15,6 +12,11 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
|
||||
ADJSP $368
|
||||
// But vet doesn't know ADJSP, so suppress vet stack checking
|
||||
NOP SP
|
||||
#ifdef GOOS_darwin
|
||||
CMPB internal∕cpu·X86+const_offsetX86HasAVX(SB), $0
|
||||
JE 2(PC)
|
||||
VZEROUPPER
|
||||
#endif
|
||||
MOVQ AX, 0(SP)
|
||||
MOVQ CX, 8(SP)
|
||||
MOVQ DX, 16(SP)
|
||||
|
||||
@@ -37,6 +37,7 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
|
||||
MOVV R1, 208(R29)
|
||||
MOVV LO, R1
|
||||
MOVV R1, 216(R29)
|
||||
#ifndef GOMIPS64_softfloat
|
||||
MOVV FCR31, R1
|
||||
MOVV R1, 224(R29)
|
||||
MOVD F0, 232(R29)
|
||||
@@ -71,7 +72,9 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
|
||||
MOVD F29, 464(R29)
|
||||
MOVD F30, 472(R29)
|
||||
MOVD F31, 480(R29)
|
||||
#endif
|
||||
CALL ·asyncPreempt2(SB)
|
||||
#ifndef GOMIPS64_softfloat
|
||||
MOVD 480(R29), F31
|
||||
MOVD 472(R29), F30
|
||||
MOVD 464(R29), F29
|
||||
@@ -106,6 +109,7 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
|
||||
MOVD 232(R29), F0
|
||||
MOVV 224(R29), R1
|
||||
MOVV R1, FCR31
|
||||
#endif
|
||||
MOVV 216(R29), R1
|
||||
MOVV R1, LO
|
||||
MOVV 208(R29), R1
|
||||
|
||||
@@ -37,6 +37,7 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
|
||||
MOVW R1, 104(R29)
|
||||
MOVW LO, R1
|
||||
MOVW R1, 108(R29)
|
||||
#ifndef GOMIPS_softfloat
|
||||
MOVW FCR31, R1
|
||||
MOVW R1, 112(R29)
|
||||
MOVF F0, 116(R29)
|
||||
@@ -71,7 +72,9 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
|
||||
MOVF F29, 232(R29)
|
||||
MOVF F30, 236(R29)
|
||||
MOVF F31, 240(R29)
|
||||
#endif
|
||||
CALL ·asyncPreempt2(SB)
|
||||
#ifndef GOMIPS_softfloat
|
||||
MOVF 240(R29), F31
|
||||
MOVF 236(R29), F30
|
||||
MOVF 232(R29), F29
|
||||
@@ -106,6 +109,7 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
|
||||
MOVF 116(R29), F0
|
||||
MOVW 112(R29), R1
|
||||
MOVW R1, FCR31
|
||||
#endif
|
||||
MOVW 108(R29), R1
|
||||
MOVW R1, LO
|
||||
MOVW 104(R29), R1
|
||||
|
||||
@@ -540,6 +540,10 @@ type m struct {
|
||||
// requested, but fails. Accessed atomically.
|
||||
preemptGen uint32
|
||||
|
||||
// Whether this is a pending preemption signal on this M.
|
||||
// Accessed atomically.
|
||||
signalPending uint32
|
||||
|
||||
dlogPerM
|
||||
|
||||
mOS
|
||||
|
||||
@@ -333,6 +333,7 @@ func doSigPreempt(gp *g, ctxt *sigctxt) {
|
||||
|
||||
// Acknowledge the preemption.
|
||||
atomic.Xadd(&gp.m.preemptGen, 1)
|
||||
atomic.Store(&gp.m.signalPending, 0)
|
||||
}
|
||||
|
||||
const preemptMSupported = pushCallSupported
|
||||
@@ -359,7 +360,14 @@ func preemptM(mp *m) {
|
||||
// required).
|
||||
return
|
||||
}
|
||||
signalM(mp, sigPreempt)
|
||||
if atomic.Cas(&mp.signalPending, 0, 1) {
|
||||
// If multiple threads are preempting the same M, it may send many
|
||||
// signals to the same M such that it hardly make progress, causing
|
||||
// live-lock problem. Apparently this could happen on darwin. See
|
||||
// issue #37741.
|
||||
// Only send a signal if there isn't already one pending.
|
||||
signalM(mp, sigPreempt)
|
||||
}
|
||||
}
|
||||
|
||||
// sigFetchG fetches the value of G safely when running in a signal handler.
|
||||
|
||||
@@ -310,6 +310,13 @@ func alignDown(n, a uintptr) uintptr {
|
||||
return n &^ (a - 1)
|
||||
}
|
||||
|
||||
// divRoundUp returns ceil(n / a).
|
||||
func divRoundUp(n, a uintptr) uintptr {
|
||||
// a is generally a power of two. This will get inlined and
|
||||
// the compiler will optimize the division.
|
||||
return (n + a - 1) / a
|
||||
}
|
||||
|
||||
// checkASM reports whether assembly runtime checks have passed.
|
||||
func checkASM() bool
|
||||
|
||||
|
||||
@@ -148,6 +148,62 @@ func (ci *Frames) Next() (frame Frame, more bool) {
|
||||
return
|
||||
}
|
||||
|
||||
// runtime_expandFinalInlineFrame expands the final pc in stk to include all
|
||||
// "callers" if pc is inline.
|
||||
//
|
||||
//go:linkname runtime_expandFinalInlineFrame runtime/pprof.runtime_expandFinalInlineFrame
|
||||
func runtime_expandFinalInlineFrame(stk []uintptr) []uintptr {
|
||||
if len(stk) == 0 {
|
||||
return stk
|
||||
}
|
||||
pc := stk[len(stk)-1]
|
||||
tracepc := pc - 1
|
||||
|
||||
f := findfunc(tracepc)
|
||||
if !f.valid() {
|
||||
// Not a Go function.
|
||||
return stk
|
||||
}
|
||||
|
||||
inldata := funcdata(f, _FUNCDATA_InlTree)
|
||||
if inldata == nil {
|
||||
// Nothing inline in f.
|
||||
return stk
|
||||
}
|
||||
|
||||
// Treat the previous func as normal. We haven't actually checked, but
|
||||
// since this pc was included in the stack, we know it shouldn't be
|
||||
// elided.
|
||||
lastFuncID := funcID_normal
|
||||
|
||||
// Remove pc from stk; we'll re-add it below.
|
||||
stk = stk[:len(stk)-1]
|
||||
|
||||
// See inline expansion in gentraceback.
|
||||
var cache pcvalueCache
|
||||
inltree := (*[1 << 20]inlinedCall)(inldata)
|
||||
for {
|
||||
ix := pcdatavalue(f, _PCDATA_InlTreeIndex, tracepc, &cache)
|
||||
if ix < 0 {
|
||||
break
|
||||
}
|
||||
if inltree[ix].funcID == funcID_wrapper && elideWrapperCalling(lastFuncID) {
|
||||
// ignore wrappers
|
||||
} else {
|
||||
stk = append(stk, pc)
|
||||
}
|
||||
lastFuncID = inltree[ix].funcID
|
||||
// Back up to an instruction in the "caller".
|
||||
tracepc = f.entry + uintptr(inltree[ix].parentPc)
|
||||
pc = tracepc + 1
|
||||
}
|
||||
|
||||
// N.B. we want to keep the last parentPC which is not inline.
|
||||
stk = append(stk, pc)
|
||||
|
||||
return stk
|
||||
}
|
||||
|
||||
// expandCgoFrames expands frame information for pc, known to be
|
||||
// a non-Go function, using the cgoSymbolizer hook. expandCgoFrames
|
||||
// returns nil if pc could not be expanded.
|
||||
|
||||
@@ -117,6 +117,8 @@ TEXT runtime·pipe(SB),NOSPLIT|NOFRAME,$0-12
|
||||
MOVV R0, R5
|
||||
MOVV $SYS_pipe2, R2
|
||||
SYSCALL
|
||||
BEQ R7, 2(PC)
|
||||
SUBVU R2, R0, R2 // caller expects negative errno
|
||||
MOVW R2, errno+8(FP)
|
||||
RET
|
||||
|
||||
@@ -126,6 +128,8 @@ TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20
|
||||
MOVW flags+0(FP), R5
|
||||
MOVV $SYS_pipe2, R2
|
||||
SYSCALL
|
||||
BEQ R7, 2(PC)
|
||||
SUBVU R2, R0, R2 // caller expects negative errno
|
||||
MOVW R2, errno+16(FP)
|
||||
RET
|
||||
|
||||
|
||||
@@ -118,6 +118,7 @@ TEXT runtime·pipe(SB),NOSPLIT,$0-12
|
||||
MOVW $-1, R1
|
||||
MOVW R1, r+0(FP)
|
||||
MOVW R1, w+4(FP)
|
||||
SUBU R2, R0, R2 // caller expects negative errno
|
||||
MOVW R2, errno+8(FP)
|
||||
RET
|
||||
pipeok:
|
||||
@@ -132,6 +133,8 @@ TEXT runtime·pipe2(SB),NOSPLIT,$0-16
|
||||
MOVW flags+0(FP), R5
|
||||
MOVW $SYS_pipe2, R2
|
||||
SYSCALL
|
||||
BEQ R7, 2(PC)
|
||||
SUBU R2, R0, R2 // caller expects negative errno
|
||||
MOVW R2, errno+12(FP)
|
||||
RET
|
||||
|
||||
|
||||
11
src/runtime/testdata/testprog/checkptr.go
vendored
11
src/runtime/testdata/testprog/checkptr.go
vendored
@@ -7,18 +7,25 @@ package main
|
||||
import "unsafe"
|
||||
|
||||
func init() {
|
||||
register("CheckPtrAlignment", CheckPtrAlignment)
|
||||
register("CheckPtrAlignmentNoPtr", CheckPtrAlignmentNoPtr)
|
||||
register("CheckPtrAlignmentPtr", CheckPtrAlignmentPtr)
|
||||
register("CheckPtrArithmetic", CheckPtrArithmetic)
|
||||
register("CheckPtrSize", CheckPtrSize)
|
||||
register("CheckPtrSmall", CheckPtrSmall)
|
||||
}
|
||||
|
||||
func CheckPtrAlignment() {
|
||||
func CheckPtrAlignmentNoPtr() {
|
||||
var x [2]int64
|
||||
p := unsafe.Pointer(&x[0])
|
||||
sink2 = (*int64)(unsafe.Pointer(uintptr(p) + 1))
|
||||
}
|
||||
|
||||
func CheckPtrAlignmentPtr() {
|
||||
var x [2]int64
|
||||
p := unsafe.Pointer(&x[0])
|
||||
sink2 = (**int64)(unsafe.Pointer(uintptr(p) + 1))
|
||||
}
|
||||
|
||||
func CheckPtrArithmetic() {
|
||||
var x int
|
||||
i := uintptr(unsafe.Pointer(&x))
|
||||
|
||||
@@ -74,36 +74,26 @@ type timer struct {
|
||||
// timerNoStatus -> timerWaiting
|
||||
// anything else -> panic: invalid value
|
||||
// deltimer:
|
||||
// timerWaiting -> timerDeleted
|
||||
// timerWaiting -> timerModifying -> timerDeleted
|
||||
// timerModifiedEarlier -> timerModifying -> timerDeleted
|
||||
// timerModifiedLater -> timerDeleted
|
||||
// timerModifiedLater -> timerModifying -> timerDeleted
|
||||
// timerNoStatus -> do nothing
|
||||
// timerDeleted -> do nothing
|
||||
// timerRemoving -> do nothing
|
||||
// timerRemoved -> do nothing
|
||||
// timerRunning -> wait until status changes
|
||||
// timerMoving -> wait until status changes
|
||||
// timerModifying -> panic: concurrent deltimer/modtimer calls
|
||||
// timerModifying -> wait until status changes
|
||||
// modtimer:
|
||||
// timerWaiting -> timerModifying -> timerModifiedXX
|
||||
// timerModifiedXX -> timerModifying -> timerModifiedYY
|
||||
// timerNoStatus -> timerWaiting
|
||||
// timerRemoved -> timerWaiting
|
||||
// timerNoStatus -> timerModifying -> timerWaiting
|
||||
// timerRemoved -> timerModifying -> timerWaiting
|
||||
// timerDeleted -> timerModifying -> timerModifiedXX
|
||||
// timerRunning -> wait until status changes
|
||||
// timerMoving -> wait until status changes
|
||||
// timerRemoving -> wait until status changes
|
||||
// timerDeleted -> panic: concurrent modtimer/deltimer calls
|
||||
// timerModifying -> panic: concurrent modtimer calls
|
||||
// resettimer:
|
||||
// timerNoStatus -> timerWaiting
|
||||
// timerRemoved -> timerWaiting
|
||||
// timerDeleted -> timerModifying -> timerModifiedXX
|
||||
// timerRemoving -> wait until status changes
|
||||
// timerRunning -> wait until status changes
|
||||
// timerWaiting -> panic: resettimer called on active timer
|
||||
// timerMoving -> panic: resettimer called on active timer
|
||||
// timerModifiedXX -> panic: resettimer called on active timer
|
||||
// timerModifying -> panic: resettimer called on active timer
|
||||
// timerModifying -> wait until status changes
|
||||
// cleantimers (looks in P's timer heap):
|
||||
// timerDeleted -> timerRemoving -> timerRemoved
|
||||
// timerModifiedXX -> timerMoving -> timerWaiting
|
||||
@@ -251,32 +241,24 @@ func addtimer(t *timer) {
|
||||
t.when = maxWhen
|
||||
}
|
||||
if t.status != timerNoStatus {
|
||||
badTimer()
|
||||
throw("addtimer called with initialized timer")
|
||||
}
|
||||
t.status = timerWaiting
|
||||
|
||||
addInitializedTimer(t)
|
||||
}
|
||||
|
||||
// addInitializedTimer adds an initialized timer to the current P.
|
||||
func addInitializedTimer(t *timer) {
|
||||
when := t.when
|
||||
|
||||
pp := getg().m.p.ptr()
|
||||
lock(&pp.timersLock)
|
||||
ok := cleantimers(pp) && doaddtimer(pp, t)
|
||||
cleantimers(pp)
|
||||
doaddtimer(pp, t)
|
||||
unlock(&pp.timersLock)
|
||||
if !ok {
|
||||
badTimer()
|
||||
}
|
||||
|
||||
wakeNetPoller(when)
|
||||
}
|
||||
|
||||
// doaddtimer adds t to the current P's heap.
|
||||
// It reports whether it saw no problems due to races.
|
||||
// The caller must have locked the timers for pp.
|
||||
func doaddtimer(pp *p, t *timer) bool {
|
||||
func doaddtimer(pp *p, t *timer) {
|
||||
// Timers rely on the network poller, so make sure the poller
|
||||
// has started.
|
||||
if netpollInited == 0 {
|
||||
@@ -289,12 +271,11 @@ func doaddtimer(pp *p, t *timer) bool {
|
||||
t.pp.set(pp)
|
||||
i := len(pp.timers)
|
||||
pp.timers = append(pp.timers, t)
|
||||
ok := siftupTimer(pp.timers, i)
|
||||
siftupTimer(pp.timers, i)
|
||||
if t == pp.timers[0] {
|
||||
atomic.Store64(&pp.timer0When, uint64(t.when))
|
||||
}
|
||||
atomic.Xadd(&pp.numTimers, 1)
|
||||
return ok
|
||||
}
|
||||
|
||||
// deltimer deletes the timer t. It may be on some other P, so we can't
|
||||
@@ -305,22 +286,42 @@ func deltimer(t *timer) bool {
|
||||
for {
|
||||
switch s := atomic.Load(&t.status); s {
|
||||
case timerWaiting, timerModifiedLater:
|
||||
tpp := t.pp.ptr()
|
||||
if atomic.Cas(&t.status, s, timerDeleted) {
|
||||
// Prevent preemption while the timer is in timerModifying.
|
||||
// This could lead to a self-deadlock. See #38070.
|
||||
mp := acquirem()
|
||||
if atomic.Cas(&t.status, s, timerModifying) {
|
||||
// Must fetch t.pp before changing status,
|
||||
// as cleantimers in another goroutine
|
||||
// can clear t.pp of a timerDeleted timer.
|
||||
tpp := t.pp.ptr()
|
||||
if !atomic.Cas(&t.status, timerModifying, timerDeleted) {
|
||||
badTimer()
|
||||
}
|
||||
releasem(mp)
|
||||
atomic.Xadd(&tpp.deletedTimers, 1)
|
||||
// Timer was not yet run.
|
||||
return true
|
||||
} else {
|
||||
releasem(mp)
|
||||
}
|
||||
case timerModifiedEarlier:
|
||||
tpp := t.pp.ptr()
|
||||
// Prevent preemption while the timer is in timerModifying.
|
||||
// This could lead to a self-deadlock. See #38070.
|
||||
mp := acquirem()
|
||||
if atomic.Cas(&t.status, s, timerModifying) {
|
||||
// Must fetch t.pp before setting status
|
||||
// to timerDeleted.
|
||||
tpp := t.pp.ptr()
|
||||
atomic.Xadd(&tpp.adjustTimers, -1)
|
||||
if !atomic.Cas(&t.status, timerModifying, timerDeleted) {
|
||||
badTimer()
|
||||
}
|
||||
releasem(mp)
|
||||
atomic.Xadd(&tpp.deletedTimers, 1)
|
||||
// Timer was not yet run.
|
||||
return true
|
||||
} else {
|
||||
releasem(mp)
|
||||
}
|
||||
case timerDeleted, timerRemoving, timerRemoved:
|
||||
// Timer was already run.
|
||||
@@ -335,7 +336,8 @@ func deltimer(t *timer) bool {
|
||||
return false
|
||||
case timerModifying:
|
||||
// Simultaneous calls to deltimer and modtimer.
|
||||
badTimer()
|
||||
// Wait for the other call to complete.
|
||||
osyield()
|
||||
default:
|
||||
badTimer()
|
||||
}
|
||||
@@ -346,7 +348,7 @@ func deltimer(t *timer) bool {
|
||||
// We are locked on the P when this is called.
|
||||
// It reports whether it saw no problems due to races.
|
||||
// The caller must have locked the timers for pp.
|
||||
func dodeltimer(pp *p, i int) bool {
|
||||
func dodeltimer(pp *p, i int) {
|
||||
if t := pp.timers[i]; t.pp.ptr() != pp {
|
||||
throw("dodeltimer: wrong P")
|
||||
} else {
|
||||
@@ -358,29 +360,23 @@ func dodeltimer(pp *p, i int) bool {
|
||||
}
|
||||
pp.timers[last] = nil
|
||||
pp.timers = pp.timers[:last]
|
||||
ok := true
|
||||
if i != last {
|
||||
// Moving to i may have moved the last timer to a new parent,
|
||||
// so sift up to preserve the heap guarantee.
|
||||
if !siftupTimer(pp.timers, i) {
|
||||
ok = false
|
||||
}
|
||||
if !siftdownTimer(pp.timers, i) {
|
||||
ok = false
|
||||
}
|
||||
siftupTimer(pp.timers, i)
|
||||
siftdownTimer(pp.timers, i)
|
||||
}
|
||||
if i == 0 {
|
||||
updateTimer0When(pp)
|
||||
}
|
||||
atomic.Xadd(&pp.numTimers, -1)
|
||||
return ok
|
||||
}
|
||||
|
||||
// dodeltimer0 removes timer 0 from the current P's heap.
|
||||
// We are locked on the P when this is called.
|
||||
// It reports whether it saw no problems due to races.
|
||||
// The caller must have locked the timers for pp.
|
||||
func dodeltimer0(pp *p) bool {
|
||||
func dodeltimer0(pp *p) {
|
||||
if t := pp.timers[0]; t.pp.ptr() != pp {
|
||||
throw("dodeltimer0: wrong P")
|
||||
} else {
|
||||
@@ -392,13 +388,11 @@ func dodeltimer0(pp *p) bool {
|
||||
}
|
||||
pp.timers[last] = nil
|
||||
pp.timers = pp.timers[:last]
|
||||
ok := true
|
||||
if last > 0 {
|
||||
ok = siftdownTimer(pp.timers, 0)
|
||||
siftdownTimer(pp.timers, 0)
|
||||
}
|
||||
updateTimer0When(pp)
|
||||
atomic.Xadd(&pp.numTimers, -1)
|
||||
return ok
|
||||
}
|
||||
|
||||
// modtimer modifies an existing timer.
|
||||
@@ -410,30 +404,47 @@ func modtimer(t *timer, when, period int64, f func(interface{}, uintptr), arg in
|
||||
|
||||
status := uint32(timerNoStatus)
|
||||
wasRemoved := false
|
||||
var mp *m
|
||||
loop:
|
||||
for {
|
||||
switch status = atomic.Load(&t.status); status {
|
||||
case timerWaiting, timerModifiedEarlier, timerModifiedLater:
|
||||
// Prevent preemption while the timer is in timerModifying.
|
||||
// This could lead to a self-deadlock. See #38070.
|
||||
mp = acquirem()
|
||||
if atomic.Cas(&t.status, status, timerModifying) {
|
||||
break loop
|
||||
}
|
||||
releasem(mp)
|
||||
case timerNoStatus, timerRemoved:
|
||||
// Prevent preemption while the timer is in timerModifying.
|
||||
// This could lead to a self-deadlock. See #38070.
|
||||
mp = acquirem()
|
||||
|
||||
// Timer was already run and t is no longer in a heap.
|
||||
// Act like addtimer.
|
||||
if atomic.Cas(&t.status, status, timerWaiting) {
|
||||
if atomic.Cas(&t.status, status, timerModifying) {
|
||||
wasRemoved = true
|
||||
break loop
|
||||
}
|
||||
releasem(mp)
|
||||
case timerDeleted:
|
||||
// Prevent preemption while the timer is in timerModifying.
|
||||
// This could lead to a self-deadlock. See #38070.
|
||||
mp = acquirem()
|
||||
if atomic.Cas(&t.status, status, timerModifying) {
|
||||
atomic.Xadd(&t.pp.ptr().deletedTimers, -1)
|
||||
break loop
|
||||
}
|
||||
releasem(mp)
|
||||
case timerRunning, timerRemoving, timerMoving:
|
||||
// The timer is being run or moved, by a different P.
|
||||
// Wait for it to complete.
|
||||
osyield()
|
||||
case timerDeleted:
|
||||
// Simultaneous calls to modtimer and deltimer.
|
||||
badTimer()
|
||||
case timerModifying:
|
||||
// Multiple simultaneous calls to modtimer.
|
||||
badTimer()
|
||||
// Wait for the other call to complete.
|
||||
osyield()
|
||||
default:
|
||||
badTimer()
|
||||
}
|
||||
@@ -446,7 +457,15 @@ loop:
|
||||
|
||||
if wasRemoved {
|
||||
t.when = when
|
||||
addInitializedTimer(t)
|
||||
pp := getg().m.p.ptr()
|
||||
lock(&pp.timersLock)
|
||||
doaddtimer(pp, t)
|
||||
unlock(&pp.timersLock)
|
||||
if !atomic.Cas(&t.status, timerModifying, timerWaiting) {
|
||||
badTimer()
|
||||
}
|
||||
releasem(mp)
|
||||
wakeNetPoller(when)
|
||||
} else {
|
||||
// The timer is in some other P's heap, so we can't change
|
||||
// the when field. If we did, the other P's heap would
|
||||
@@ -463,7 +482,6 @@ loop:
|
||||
// Update the adjustTimers field. Subtract one if we
|
||||
// are removing a timerModifiedEarlier, add one if we
|
||||
// are adding a timerModifiedEarlier.
|
||||
tpp := t.pp.ptr()
|
||||
adjust := int32(0)
|
||||
if status == timerModifiedEarlier {
|
||||
adjust--
|
||||
@@ -472,13 +490,14 @@ loop:
|
||||
adjust++
|
||||
}
|
||||
if adjust != 0 {
|
||||
atomic.Xadd(&tpp.adjustTimers, adjust)
|
||||
atomic.Xadd(&t.pp.ptr().adjustTimers, adjust)
|
||||
}
|
||||
|
||||
// Set the new status of the timer.
|
||||
if !atomic.Cas(&t.status, timerModifying, newStatus) {
|
||||
badTimer()
|
||||
}
|
||||
releasem(mp)
|
||||
|
||||
// If the new status is earlier, wake up the poller.
|
||||
if newStatus == timerModifiedEarlier {
|
||||
@@ -487,67 +506,22 @@ loop:
|
||||
}
|
||||
}
|
||||
|
||||
// resettimer resets an existing inactive timer to turn it into an active timer,
|
||||
// with a new time for when the timer should fire.
|
||||
// resettimer resets the time when a timer should fire.
|
||||
// If used for an inactive timer, the timer will become active.
|
||||
// This should be called instead of addtimer if the timer value has been,
|
||||
// or may have been, used previously.
|
||||
func resettimer(t *timer, when int64) {
|
||||
if when < 0 {
|
||||
when = maxWhen
|
||||
}
|
||||
|
||||
for {
|
||||
switch s := atomic.Load(&t.status); s {
|
||||
case timerNoStatus, timerRemoved:
|
||||
if atomic.Cas(&t.status, s, timerWaiting) {
|
||||
t.when = when
|
||||
addInitializedTimer(t)
|
||||
return
|
||||
}
|
||||
case timerDeleted:
|
||||
tpp := t.pp.ptr()
|
||||
if atomic.Cas(&t.status, s, timerModifying) {
|
||||
t.nextwhen = when
|
||||
newStatus := uint32(timerModifiedLater)
|
||||
if when < t.when {
|
||||
newStatus = timerModifiedEarlier
|
||||
atomic.Xadd(&t.pp.ptr().adjustTimers, 1)
|
||||
}
|
||||
if !atomic.Cas(&t.status, timerModifying, newStatus) {
|
||||
badTimer()
|
||||
}
|
||||
atomic.Xadd(&tpp.deletedTimers, -1)
|
||||
if newStatus == timerModifiedEarlier {
|
||||
wakeNetPoller(when)
|
||||
}
|
||||
return
|
||||
}
|
||||
case timerRemoving:
|
||||
// Wait for the removal to complete.
|
||||
osyield()
|
||||
case timerRunning:
|
||||
// Even though the timer should not be active,
|
||||
// we can see timerRunning if the timer function
|
||||
// permits some other goroutine to call resettimer.
|
||||
// Wait until the run is complete.
|
||||
osyield()
|
||||
case timerWaiting, timerModifying, timerModifiedEarlier, timerModifiedLater, timerMoving:
|
||||
// Called resettimer on active timer.
|
||||
badTimer()
|
||||
default:
|
||||
badTimer()
|
||||
}
|
||||
}
|
||||
modtimer(t, when, t.period, t.f, t.arg, t.seq)
|
||||
}
|
||||
|
||||
// cleantimers cleans up the head of the timer queue. This speeds up
|
||||
// programs that create and delete timers; leaving them in the heap
|
||||
// slows down addtimer. Reports whether no timer problems were found.
|
||||
// The caller must have locked the timers for pp.
|
||||
func cleantimers(pp *p) bool {
|
||||
func cleantimers(pp *p) {
|
||||
for {
|
||||
if len(pp.timers) == 0 {
|
||||
return true
|
||||
return
|
||||
}
|
||||
t := pp.timers[0]
|
||||
if t.pp.ptr() != pp {
|
||||
@@ -558,11 +532,9 @@ func cleantimers(pp *p) bool {
|
||||
if !atomic.Cas(&t.status, s, timerRemoving) {
|
||||
continue
|
||||
}
|
||||
if !dodeltimer0(pp) {
|
||||
return false
|
||||
}
|
||||
dodeltimer0(pp)
|
||||
if !atomic.Cas(&t.status, timerRemoving, timerRemoved) {
|
||||
return false
|
||||
badTimer()
|
||||
}
|
||||
atomic.Xadd(&pp.deletedTimers, -1)
|
||||
case timerModifiedEarlier, timerModifiedLater:
|
||||
@@ -572,21 +544,17 @@ func cleantimers(pp *p) bool {
|
||||
// Now we can change the when field.
|
||||
t.when = t.nextwhen
|
||||
// Move t to the right position.
|
||||
if !dodeltimer0(pp) {
|
||||
return false
|
||||
}
|
||||
if !doaddtimer(pp, t) {
|
||||
return false
|
||||
}
|
||||
dodeltimer0(pp)
|
||||
doaddtimer(pp, t)
|
||||
if s == timerModifiedEarlier {
|
||||
atomic.Xadd(&pp.adjustTimers, -1)
|
||||
}
|
||||
if !atomic.Cas(&t.status, timerMoving, timerWaiting) {
|
||||
return false
|
||||
badTimer()
|
||||
}
|
||||
default:
|
||||
// Head of timers does not need adjustment.
|
||||
return true
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -602,9 +570,7 @@ func moveTimers(pp *p, timers []*timer) {
|
||||
switch s := atomic.Load(&t.status); s {
|
||||
case timerWaiting:
|
||||
t.pp = 0
|
||||
if !doaddtimer(pp, t) {
|
||||
badTimer()
|
||||
}
|
||||
doaddtimer(pp, t)
|
||||
break loop
|
||||
case timerModifiedEarlier, timerModifiedLater:
|
||||
if !atomic.Cas(&t.status, s, timerMoving) {
|
||||
@@ -612,9 +578,7 @@ func moveTimers(pp *p, timers []*timer) {
|
||||
}
|
||||
t.when = t.nextwhen
|
||||
t.pp = 0
|
||||
if !doaddtimer(pp, t) {
|
||||
badTimer()
|
||||
}
|
||||
doaddtimer(pp, t)
|
||||
if !atomic.Cas(&t.status, timerMoving, timerWaiting) {
|
||||
badTimer()
|
||||
}
|
||||
@@ -668,9 +632,7 @@ loop:
|
||||
switch s := atomic.Load(&t.status); s {
|
||||
case timerDeleted:
|
||||
if atomic.Cas(&t.status, s, timerRemoving) {
|
||||
if !dodeltimer(pp, i) {
|
||||
badTimer()
|
||||
}
|
||||
dodeltimer(pp, i)
|
||||
if !atomic.Cas(&t.status, timerRemoving, timerRemoved) {
|
||||
badTimer()
|
||||
}
|
||||
@@ -686,9 +648,7 @@ loop:
|
||||
// We don't add it back yet because the
|
||||
// heap manipulation could cause our
|
||||
// loop to skip some other timer.
|
||||
if !dodeltimer(pp, i) {
|
||||
badTimer()
|
||||
}
|
||||
dodeltimer(pp, i)
|
||||
moved = append(moved, t)
|
||||
if s == timerModifiedEarlier {
|
||||
if n := atomic.Xadd(&pp.adjustTimers, -1); int32(n) <= 0 {
|
||||
@@ -724,9 +684,7 @@ loop:
|
||||
// back to the timer heap.
|
||||
func addAdjustedTimers(pp *p, moved []*timer) {
|
||||
for _, t := range moved {
|
||||
if !doaddtimer(pp, t) {
|
||||
badTimer()
|
||||
}
|
||||
doaddtimer(pp, t)
|
||||
if !atomic.Cas(&t.status, timerMoving, timerWaiting) {
|
||||
badTimer()
|
||||
}
|
||||
@@ -780,9 +738,7 @@ func runtimer(pp *p, now int64) int64 {
|
||||
if !atomic.Cas(&t.status, s, timerRemoving) {
|
||||
continue
|
||||
}
|
||||
if !dodeltimer0(pp) {
|
||||
badTimer()
|
||||
}
|
||||
dodeltimer0(pp)
|
||||
if !atomic.Cas(&t.status, timerRemoving, timerRemoved) {
|
||||
badTimer()
|
||||
}
|
||||
@@ -796,12 +752,8 @@ func runtimer(pp *p, now int64) int64 {
|
||||
continue
|
||||
}
|
||||
t.when = t.nextwhen
|
||||
if !dodeltimer0(pp) {
|
||||
badTimer()
|
||||
}
|
||||
if !doaddtimer(pp, t) {
|
||||
badTimer()
|
||||
}
|
||||
dodeltimer0(pp)
|
||||
doaddtimer(pp, t)
|
||||
if s == timerModifiedEarlier {
|
||||
atomic.Xadd(&pp.adjustTimers, -1)
|
||||
}
|
||||
@@ -847,18 +799,14 @@ func runOneTimer(pp *p, t *timer, now int64) {
|
||||
// Leave in heap but adjust next time to fire.
|
||||
delta := t.when - now
|
||||
t.when += t.period * (1 + -delta/t.period)
|
||||
if !siftdownTimer(pp.timers, 0) {
|
||||
badTimer()
|
||||
}
|
||||
siftdownTimer(pp.timers, 0)
|
||||
if !atomic.Cas(&t.status, timerRunning, timerWaiting) {
|
||||
badTimer()
|
||||
}
|
||||
updateTimer0When(pp)
|
||||
} else {
|
||||
// Remove from heap.
|
||||
if !dodeltimer0(pp) {
|
||||
badTimer()
|
||||
}
|
||||
dodeltimer0(pp)
|
||||
if !atomic.Cas(&t.status, timerRunning, timerNoStatus) {
|
||||
badTimer()
|
||||
}
|
||||
@@ -1076,9 +1024,9 @@ func timeSleepUntil() (int64, *p) {
|
||||
// "panic holding locks" message. Instead, we panic while not
|
||||
// holding a lock.
|
||||
|
||||
func siftupTimer(t []*timer, i int) bool {
|
||||
func siftupTimer(t []*timer, i int) {
|
||||
if i >= len(t) {
|
||||
return false
|
||||
badTimer()
|
||||
}
|
||||
when := t[i].when
|
||||
tmp := t[i]
|
||||
@@ -1093,13 +1041,12 @@ func siftupTimer(t []*timer, i int) bool {
|
||||
if tmp != t[i] {
|
||||
t[i] = tmp
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func siftdownTimer(t []*timer, i int) bool {
|
||||
func siftdownTimer(t []*timer, i int) {
|
||||
n := len(t)
|
||||
if i >= n {
|
||||
return false
|
||||
badTimer()
|
||||
}
|
||||
when := t[i].when
|
||||
tmp := t[i]
|
||||
@@ -1134,7 +1081,6 @@ func siftdownTimer(t []*timer, i int) bool {
|
||||
if tmp != t[i] {
|
||||
t[i] = tmp
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// badTimer is called if the timer data structures have been corrupted,
|
||||
@@ -1142,5 +1088,5 @@ func siftdownTimer(t []*timer, i int) bool {
|
||||
// panicing due to invalid slice access while holding locks.
|
||||
// See issue #25686.
|
||||
func badTimer() {
|
||||
panic(errorString("racy use of timers"))
|
||||
throw("timer data corruption")
|
||||
}
|
||||
|
||||
@@ -927,16 +927,15 @@ func tRunner(t *T, fn func(t *T)) {
|
||||
t.Logf("cleanup panicked with %v", r)
|
||||
}
|
||||
// Flush the output log up to the root before dying.
|
||||
t.mu.Lock()
|
||||
root := &t.common
|
||||
for ; root.parent != nil; root = root.parent {
|
||||
for root := &t.common; root.parent != nil; root = root.parent {
|
||||
root.mu.Lock()
|
||||
root.duration += time.Since(root.start)
|
||||
fmt.Fprintf(root.parent.w, "--- FAIL: %s (%s)\n", root.name, fmtDuration(root.duration))
|
||||
d := root.duration
|
||||
root.mu.Unlock()
|
||||
root.flushToParent("--- FAIL: %s (%s)\n", root.name, fmtDuration(d))
|
||||
if r := root.parent.runCleanup(recoverAndReturnPanic); r != nil {
|
||||
fmt.Fprintf(root.parent.w, "cleanup panicked with %v", r)
|
||||
}
|
||||
root.parent.mu.Lock()
|
||||
io.Copy(root.parent.w, bytes.NewReader(root.output))
|
||||
}
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"encoding/gob"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"internal/race"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"os"
|
||||
@@ -1393,23 +1392,11 @@ func TestReadFileLimit(t *testing.T) {
|
||||
}
|
||||
|
||||
// Issue 25686: hard crash on concurrent timer access.
|
||||
// Issue 37400: panic with "racy use of timers"
|
||||
// This test deliberately invokes a race condition.
|
||||
// We are testing that we don't crash with "fatal error: panic holding locks".
|
||||
// We are testing that we don't crash with "fatal error: panic holding locks",
|
||||
// and that we also don't panic.
|
||||
func TestConcurrentTimerReset(t *testing.T) {
|
||||
if race.Enabled {
|
||||
t.Skip("skipping test under race detector")
|
||||
}
|
||||
|
||||
// We expect this code to panic rather than crash.
|
||||
// Don't worry if it doesn't panic.
|
||||
catch := func(i int) {
|
||||
if e := recover(); e != nil {
|
||||
t.Logf("panic in goroutine %d, as expected, with %q", i, e)
|
||||
} else {
|
||||
t.Logf("no panic in goroutine %d", i)
|
||||
}
|
||||
}
|
||||
|
||||
const goroutines = 8
|
||||
const tries = 1000
|
||||
var wg sync.WaitGroup
|
||||
@@ -1418,7 +1405,6 @@ func TestConcurrentTimerReset(t *testing.T) {
|
||||
for i := 0; i < goroutines; i++ {
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
defer catch(i)
|
||||
for j := 0; j < tries; j++ {
|
||||
timer.Reset(Hour + Duration(i*j))
|
||||
}
|
||||
@@ -1426,3 +1412,25 @@ func TestConcurrentTimerReset(t *testing.T) {
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Issue 37400: panic with "racy use of timers".
|
||||
func TestConcurrentTimerResetStop(t *testing.T) {
|
||||
const goroutines = 8
|
||||
const tries = 1000
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(goroutines * 2)
|
||||
timer := NewTimer(Hour)
|
||||
for i := 0; i < goroutines; i++ {
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
for j := 0; j < tries; j++ {
|
||||
timer.Reset(Hour + Duration(i*j))
|
||||
}
|
||||
}(i)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
timer.Stop()
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
32
test/fixedbugs/issue37716.go
Normal file
32
test/fixedbugs/issue37716.go
Normal file
@@ -0,0 +1,32 @@
|
||||
// run
|
||||
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import "reflect"
|
||||
|
||||
// complicated enough to require a compile-generated hash function
|
||||
type K struct {
|
||||
a, b int32 // these get merged by the compiler into a single field, something typehash doesn't do
|
||||
c float64
|
||||
}
|
||||
|
||||
func main() {
|
||||
k := K{a: 1, b: 2, c: 3}
|
||||
|
||||
// Make a reflect map.
|
||||
m := reflect.MakeMap(reflect.MapOf(reflect.TypeOf(K{}), reflect.TypeOf(true)))
|
||||
m.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(true))
|
||||
|
||||
// The binary must not contain the type map[K]bool anywhere, or reflect.MapOf
|
||||
// will use that type instead of making a new one. So use an equivalent named type.
|
||||
type M map[K]bool
|
||||
var x M
|
||||
reflect.ValueOf(&x).Elem().Set(m)
|
||||
if !x[k] {
|
||||
panic("key not found")
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user