mirror of
https://github.com/golang/go.git
synced 2026-02-06 10:55:05 +03:00
runtime: use 1-byte load for address checking in racecallatomic
In racecallatomic, we do a load before calling into TSAN, so if the address is invalid we fault on the Go stack. We currently use a 8-byte load instruction, regardless of the data size that the atomic operation is performed on. So if, say, we are doing a LoadUint32 at an address that is the last 4 bytes of a memory mapping, we may fault unexpectedly. Do a 1-byte load instead. (Ideally we should do a load with the right size, so we fault correctly if we're given an unaligned address for a wide load across a page boundary. Leave that for another CL.) Fix AMD64, ARM64, and PPC64. The code already uses 1-byte load on S390X. Should fix #60825. Change-Id: I3dee93eb08ba180c85e86a9d2e71b5b520e8dcf0 Reviewed-on: https://go-review.googlesource.com/c/go/+/503937 Run-TryBot: Cherry Mui <cherryyz@google.com> TryBot-Result: Gopher Robot <gobot@golang.org> Reviewed-by: Than McIntosh <thanm@google.com> Reviewed-by: David Chase <drchase@google.com>
This commit is contained in:
@@ -35,3 +35,31 @@ func TestAtomicMmap(t *testing.T) {
|
||||
t.Fatalf("bad atomic value: %v, want 2", *a)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAtomicPageBoundary(t *testing.T) {
|
||||
// Test that atomic access near (but not cross) a page boundary
|
||||
// doesn't fault. See issue 60825.
|
||||
|
||||
// Mmap two pages of memory, and make the second page inaccessible,
|
||||
// so we have an address at the end of a page.
|
||||
pagesize := syscall.Getpagesize()
|
||||
b, err := syscall.Mmap(0, 0, 2*pagesize, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_ANON|syscall.MAP_PRIVATE)
|
||||
if err != nil {
|
||||
t.Fatalf("mmap failed %s", err)
|
||||
}
|
||||
defer syscall.Munmap(b)
|
||||
err = syscall.Mprotect(b[pagesize:], syscall.PROT_NONE)
|
||||
if err != nil {
|
||||
t.Fatalf("mprotect high failed %s\n", err)
|
||||
}
|
||||
|
||||
// This should not fault.
|
||||
a := (*uint32)(unsafe.Pointer(&b[pagesize-4]))
|
||||
atomic.StoreUint32(a, 1)
|
||||
if x := atomic.LoadUint32(a); x != 1 {
|
||||
t.Fatalf("bad atomic value: %v, want 1", x)
|
||||
}
|
||||
if x := atomic.AddUint32(a, 1); x != 2 {
|
||||
t.Fatalf("bad atomic value: %v, want 2", x)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -333,7 +333,7 @@ TEXT sync∕atomic·CompareAndSwapUintptr(SB), NOSPLIT, $0-25
|
||||
TEXT racecallatomic<>(SB), NOSPLIT|NOFRAME, $0-0
|
||||
// Trigger SIGSEGV early.
|
||||
MOVQ 16(SP), R12
|
||||
MOVL (R12), R13
|
||||
MOVBLZX (R12), R13
|
||||
// Check that addr is within [arenastart, arenaend) or within [racedatastart, racedataend).
|
||||
CMPQ R12, runtime·racearenastart(SB)
|
||||
JB racecallatomic_data
|
||||
|
||||
@@ -348,7 +348,7 @@ TEXT racecallatomic<>(SB), NOSPLIT, $0
|
||||
|
||||
// Trigger SIGSEGV early.
|
||||
MOVD 40(RSP), R3 // 1st arg is addr. after two times BL, get it at 40(RSP)
|
||||
MOVD (R3), R13 // segv here if addr is bad
|
||||
MOVB (R3), R13 // segv here if addr is bad
|
||||
// Check that addr is within [arenastart, arenaend) or within [racedatastart, racedataend).
|
||||
MOVD runtime·racearenastart(SB), R10
|
||||
CMP R10, R3
|
||||
|
||||
@@ -363,7 +363,7 @@ TEXT sync∕atomic·CompareAndSwapUintptr(SB), NOSPLIT, $0-25
|
||||
TEXT racecallatomic<>(SB), NOSPLIT, $0-0
|
||||
// Trigger SIGSEGV early if address passed to atomic function is bad.
|
||||
MOVD (R6), R7 // 1st arg is addr
|
||||
MOVD (R7), R9 // segv here if addr is bad
|
||||
MOVB (R7), R9 // segv here if addr is bad
|
||||
// Check that addr is within [arenastart, arenaend) or within [racedatastart, racedataend).
|
||||
MOVD runtime·racearenastart(SB), R9
|
||||
CMP R7, R9
|
||||
|
||||
Reference in New Issue
Block a user