|
|
|
|
@@ -220,7 +220,7 @@ func windowsFindfunc(lib uintptr, name []byte) stdFunction {
|
|
|
|
|
if name[len(name)-1] != 0 {
|
|
|
|
|
throw("usage")
|
|
|
|
|
}
|
|
|
|
|
f := stdcall2(_GetProcAddress, lib, uintptr(unsafe.Pointer(&name[0])))
|
|
|
|
|
f := stdcall(_GetProcAddress, lib, uintptr(unsafe.Pointer(&name[0])))
|
|
|
|
|
return stdFunction(unsafe.Pointer(f))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@@ -229,7 +229,7 @@ var sysDirectory [_MAX_PATH + 1]byte
|
|
|
|
|
var sysDirectoryLen uintptr
|
|
|
|
|
|
|
|
|
|
func initSysDirectory() {
|
|
|
|
|
l := stdcall2(_GetSystemDirectoryA, uintptr(unsafe.Pointer(&sysDirectory[0])), uintptr(len(sysDirectory)-1))
|
|
|
|
|
l := stdcall(_GetSystemDirectoryA, uintptr(unsafe.Pointer(&sysDirectory[0])), uintptr(len(sysDirectory)-1))
|
|
|
|
|
if l == 0 || l > uintptr(len(sysDirectory)-1) {
|
|
|
|
|
throw("Unable to determine system directory")
|
|
|
|
|
}
|
|
|
|
|
@@ -244,20 +244,20 @@ func windows_GetSystemDirectory() string {
|
|
|
|
|
|
|
|
|
|
func windowsLoadSystemLib(name []uint16) uintptr {
|
|
|
|
|
const _LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x00000800
|
|
|
|
|
return stdcall3(_LoadLibraryExW, uintptr(unsafe.Pointer(&name[0])), 0, _LOAD_LIBRARY_SEARCH_SYSTEM32)
|
|
|
|
|
return stdcall(_LoadLibraryExW, uintptr(unsafe.Pointer(&name[0])), 0, _LOAD_LIBRARY_SEARCH_SYSTEM32)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:linkname windows_QueryPerformanceCounter internal/syscall/windows.QueryPerformanceCounter
|
|
|
|
|
func windows_QueryPerformanceCounter() int64 {
|
|
|
|
|
var counter int64
|
|
|
|
|
stdcall1(_QueryPerformanceCounter, uintptr(unsafe.Pointer(&counter)))
|
|
|
|
|
stdcall(_QueryPerformanceCounter, uintptr(unsafe.Pointer(&counter)))
|
|
|
|
|
return counter
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:linkname windows_QueryPerformanceFrequency internal/syscall/windows.QueryPerformanceFrequency
|
|
|
|
|
func windows_QueryPerformanceFrequency() int64 {
|
|
|
|
|
var frequency int64
|
|
|
|
|
stdcall1(_QueryPerformanceFrequency, uintptr(unsafe.Pointer(&frequency)))
|
|
|
|
|
stdcall(_QueryPerformanceFrequency, uintptr(unsafe.Pointer(&frequency)))
|
|
|
|
|
return frequency
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@@ -308,7 +308,7 @@ func monitorSuspendResume() {
|
|
|
|
|
var fn any = func(context uintptr, changeType uint32, setting uintptr) uintptr {
|
|
|
|
|
for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink {
|
|
|
|
|
if mp.resumesema != 0 {
|
|
|
|
|
stdcall1(_SetEvent, mp.resumesema)
|
|
|
|
|
stdcall(_SetEvent, mp.resumesema)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return 0
|
|
|
|
|
@@ -317,13 +317,13 @@ func monitorSuspendResume() {
|
|
|
|
|
callback: compileCallback(*efaceOf(&fn), true),
|
|
|
|
|
}
|
|
|
|
|
handle := uintptr(0)
|
|
|
|
|
stdcall3(powerRegisterSuspendResumeNotification, _DEVICE_NOTIFY_CALLBACK,
|
|
|
|
|
stdcall(powerRegisterSuspendResumeNotification, _DEVICE_NOTIFY_CALLBACK,
|
|
|
|
|
uintptr(unsafe.Pointer(¶ms)), uintptr(unsafe.Pointer(&handle)))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func getCPUCount() int32 {
|
|
|
|
|
var mask, sysmask uintptr
|
|
|
|
|
ret := stdcall3(_GetProcessAffinityMask, currentProcess, uintptr(unsafe.Pointer(&mask)), uintptr(unsafe.Pointer(&sysmask)))
|
|
|
|
|
ret := stdcall(_GetProcessAffinityMask, currentProcess, uintptr(unsafe.Pointer(&mask)), uintptr(unsafe.Pointer(&sysmask)))
|
|
|
|
|
if ret != 0 {
|
|
|
|
|
n := 0
|
|
|
|
|
maskbits := int(unsafe.Sizeof(mask) * 8)
|
|
|
|
|
@@ -338,13 +338,13 @@ func getCPUCount() int32 {
|
|
|
|
|
}
|
|
|
|
|
// use GetSystemInfo if GetProcessAffinityMask fails
|
|
|
|
|
var info systeminfo
|
|
|
|
|
stdcall1(_GetSystemInfo, uintptr(unsafe.Pointer(&info)))
|
|
|
|
|
stdcall(_GetSystemInfo, uintptr(unsafe.Pointer(&info)))
|
|
|
|
|
return int32(info.dwnumberofprocessors)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func getPageSize() uintptr {
|
|
|
|
|
var info systeminfo
|
|
|
|
|
stdcall1(_GetSystemInfo, uintptr(unsafe.Pointer(&info)))
|
|
|
|
|
stdcall(_GetSystemInfo, uintptr(unsafe.Pointer(&info)))
|
|
|
|
|
return uintptr(info.dwpagesize)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@@ -383,9 +383,9 @@ func osRelax(relax bool) uint32 {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if relax {
|
|
|
|
|
return uint32(stdcall1(_timeEndPeriod, 1))
|
|
|
|
|
return uint32(stdcall(_timeEndPeriod, 1))
|
|
|
|
|
} else {
|
|
|
|
|
return uint32(stdcall1(_timeBeginPeriod, 1))
|
|
|
|
|
return uint32(stdcall(_timeBeginPeriod, 1))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@@ -414,7 +414,7 @@ func createHighResTimer() uintptr {
|
|
|
|
|
_TIMER_QUERY_STATE = 0x0001
|
|
|
|
|
_TIMER_MODIFY_STATE = 0x0002
|
|
|
|
|
)
|
|
|
|
|
return stdcall4(_CreateWaitableTimerExW, 0, 0,
|
|
|
|
|
return stdcall(_CreateWaitableTimerExW, 0, 0,
|
|
|
|
|
_CREATE_WAITABLE_TIMER_HIGH_RESOLUTION,
|
|
|
|
|
_SYNCHRONIZE|_TIMER_QUERY_STATE|_TIMER_MODIFY_STATE)
|
|
|
|
|
}
|
|
|
|
|
@@ -424,7 +424,7 @@ func initHighResTimer() {
|
|
|
|
|
if h != 0 {
|
|
|
|
|
haveHighResTimer = true
|
|
|
|
|
haveHighResSleep = _NtCreateWaitCompletionPacket != nil
|
|
|
|
|
stdcall1(_CloseHandle, h)
|
|
|
|
|
stdcall(_CloseHandle, h)
|
|
|
|
|
} else {
|
|
|
|
|
// Only load winmm.dll if we need it.
|
|
|
|
|
// This avoids a dependency on winmm.dll for Go programs
|
|
|
|
|
@@ -456,7 +456,7 @@ func initLongPathSupport() {
|
|
|
|
|
// Check that we're ≥ 10.0.15063.
|
|
|
|
|
info := _OSVERSIONINFOW{}
|
|
|
|
|
info.osVersionInfoSize = uint32(unsafe.Sizeof(info))
|
|
|
|
|
stdcall1(_RtlGetVersion, uintptr(unsafe.Pointer(&info)))
|
|
|
|
|
stdcall(_RtlGetVersion, uintptr(unsafe.Pointer(&info)))
|
|
|
|
|
if info.majorVersion < 10 || (info.majorVersion == 10 && info.minorVersion == 0 && info.buildNumber < 15063) {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
@@ -464,7 +464,7 @@ func initLongPathSupport() {
|
|
|
|
|
// Set the IsLongPathAwareProcess flag of the PEB's bit field.
|
|
|
|
|
// This flag is not documented, but it's known to be used
|
|
|
|
|
// by Windows to enable long path support.
|
|
|
|
|
bitField := (*byte)(unsafe.Pointer(stdcall0(_RtlGetCurrentPeb) + PebBitFieldOffset))
|
|
|
|
|
bitField := (*byte)(unsafe.Pointer(stdcall(_RtlGetCurrentPeb) + PebBitFieldOffset))
|
|
|
|
|
*bitField |= IsLongPathAwareProcess
|
|
|
|
|
|
|
|
|
|
canUseLongPaths = true
|
|
|
|
|
@@ -493,13 +493,13 @@ func osinit() {
|
|
|
|
|
// of dedicated threads -- GUI, IO, computational, etc. Go processes use
|
|
|
|
|
// equivalent threads that all do a mix of GUI, IO, computations, etc.
|
|
|
|
|
// In such context dynamic priority boosting does nothing but harm, so we turn it off.
|
|
|
|
|
stdcall2(_SetProcessPriorityBoost, currentProcess, 1)
|
|
|
|
|
stdcall(_SetProcessPriorityBoost, currentProcess, 1)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
func readRandom(r []byte) int {
|
|
|
|
|
n := 0
|
|
|
|
|
if stdcall2(_ProcessPrng, uintptr(unsafe.Pointer(&r[0])), uintptr(len(r)))&0xff != 0 {
|
|
|
|
|
if stdcall(_ProcessPrng, uintptr(unsafe.Pointer(&r[0])), uintptr(len(r)))&0xff != 0 {
|
|
|
|
|
n = len(r)
|
|
|
|
|
}
|
|
|
|
|
return n
|
|
|
|
|
@@ -509,7 +509,7 @@ func goenvs() {
|
|
|
|
|
// strings is a pointer to environment variable pairs in the form:
|
|
|
|
|
// "envA=valA\x00envB=valB\x00\x00" (in UTF-16)
|
|
|
|
|
// Two consecutive zero bytes end the list.
|
|
|
|
|
strings := unsafe.Pointer(stdcall0(_GetEnvironmentStringsW))
|
|
|
|
|
strings := unsafe.Pointer(stdcall(_GetEnvironmentStringsW))
|
|
|
|
|
p := (*[1 << 24]uint16)(strings)[:]
|
|
|
|
|
|
|
|
|
|
n := 0
|
|
|
|
|
@@ -533,13 +533,13 @@ func goenvs() {
|
|
|
|
|
p = p[1:] // skip nil byte
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
stdcall1(_FreeEnvironmentStringsW, uintptr(strings))
|
|
|
|
|
stdcall(_FreeEnvironmentStringsW, uintptr(strings))
|
|
|
|
|
|
|
|
|
|
// We call these all the way here, late in init, so that malloc works
|
|
|
|
|
// for the callback functions these generate.
|
|
|
|
|
var fn any = ctrlHandler
|
|
|
|
|
ctrlHandlerPC := compileCallback(*efaceOf(&fn), true)
|
|
|
|
|
stdcall2(_SetConsoleCtrlHandler, ctrlHandlerPC, 1)
|
|
|
|
|
stdcall(_SetConsoleCtrlHandler, ctrlHandlerPC, 1)
|
|
|
|
|
|
|
|
|
|
monitorSuspendResume()
|
|
|
|
|
}
|
|
|
|
|
@@ -555,7 +555,7 @@ func exit(code int32) {
|
|
|
|
|
// kills the suspending thread, and then this thread suspends.
|
|
|
|
|
lock(&suspendLock)
|
|
|
|
|
atomic.Store(&exiting, 1)
|
|
|
|
|
stdcall1(_ExitProcess, uintptr(code))
|
|
|
|
|
stdcall(_ExitProcess, uintptr(code))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// write1 must be nosplit because it's used as a last resort in
|
|
|
|
|
@@ -571,9 +571,9 @@ func write1(fd uintptr, buf unsafe.Pointer, n int32) int32 {
|
|
|
|
|
var handle uintptr
|
|
|
|
|
switch fd {
|
|
|
|
|
case 1:
|
|
|
|
|
handle = stdcall1(_GetStdHandle, _STD_OUTPUT_HANDLE)
|
|
|
|
|
handle = stdcall(_GetStdHandle, _STD_OUTPUT_HANDLE)
|
|
|
|
|
case 2:
|
|
|
|
|
handle = stdcall1(_GetStdHandle, _STD_ERROR_HANDLE)
|
|
|
|
|
handle = stdcall(_GetStdHandle, _STD_ERROR_HANDLE)
|
|
|
|
|
default:
|
|
|
|
|
// assume fd is real windows handle.
|
|
|
|
|
handle = fd
|
|
|
|
|
@@ -589,7 +589,7 @@ func write1(fd uintptr, buf unsafe.Pointer, n int32) int32 {
|
|
|
|
|
|
|
|
|
|
if !isASCII {
|
|
|
|
|
var m uint32
|
|
|
|
|
isConsole := stdcall2(_GetConsoleMode, handle, uintptr(unsafe.Pointer(&m))) != 0
|
|
|
|
|
isConsole := stdcall(_GetConsoleMode, handle, uintptr(unsafe.Pointer(&m))) != 0
|
|
|
|
|
// If this is a console output, various non-unicode code pages can be in use.
|
|
|
|
|
// Use the dedicated WriteConsole call to ensure unicode is printed correctly.
|
|
|
|
|
if isConsole {
|
|
|
|
|
@@ -597,7 +597,7 @@ func write1(fd uintptr, buf unsafe.Pointer, n int32) int32 {
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
var written uint32
|
|
|
|
|
stdcall5(_WriteFile, handle, uintptr(buf), uintptr(n), uintptr(unsafe.Pointer(&written)), 0)
|
|
|
|
|
stdcall(_WriteFile, handle, uintptr(buf), uintptr(n), uintptr(unsafe.Pointer(&written)), 0)
|
|
|
|
|
return int32(written)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@@ -650,7 +650,7 @@ func writeConsoleUTF16(handle uintptr, b []uint16) {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
var written uint32
|
|
|
|
|
stdcall5(_WriteConsoleW,
|
|
|
|
|
stdcall(_WriteConsoleW,
|
|
|
|
|
handle,
|
|
|
|
|
uintptr(unsafe.Pointer(&b[0])),
|
|
|
|
|
uintptr(l),
|
|
|
|
|
@@ -671,7 +671,7 @@ func semasleep(ns int64) int32 {
|
|
|
|
|
|
|
|
|
|
var result uintptr
|
|
|
|
|
if ns < 0 {
|
|
|
|
|
result = stdcall2(_WaitForSingleObject, getg().m.waitsema, uintptr(_INFINITE))
|
|
|
|
|
result = stdcall(_WaitForSingleObject, getg().m.waitsema, uintptr(_INFINITE))
|
|
|
|
|
} else {
|
|
|
|
|
start := nanotime()
|
|
|
|
|
elapsed := int64(0)
|
|
|
|
|
@@ -680,7 +680,7 @@ func semasleep(ns int64) int32 {
|
|
|
|
|
if ms == 0 {
|
|
|
|
|
ms = 1
|
|
|
|
|
}
|
|
|
|
|
result = stdcall4(_WaitForMultipleObjects, 2,
|
|
|
|
|
result = stdcall(_WaitForMultipleObjects, 2,
|
|
|
|
|
uintptr(unsafe.Pointer(&[2]uintptr{getg().m.waitsema, getg().m.resumesema})),
|
|
|
|
|
0, uintptr(ms))
|
|
|
|
|
if result != _WAIT_OBJECT_0+1 {
|
|
|
|
|
@@ -723,7 +723,7 @@ func semasleep(ns int64) int32 {
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
func semawakeup(mp *m) {
|
|
|
|
|
if stdcall1(_SetEvent, mp.waitsema) == 0 {
|
|
|
|
|
if stdcall(_SetEvent, mp.waitsema) == 0 {
|
|
|
|
|
systemstack(func() {
|
|
|
|
|
print("runtime: setevent failed; errno=", getlasterror(), "\n")
|
|
|
|
|
throw("runtime.semawakeup")
|
|
|
|
|
@@ -736,20 +736,20 @@ func semacreate(mp *m) {
|
|
|
|
|
if mp.waitsema != 0 {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
mp.waitsema = stdcall4(_CreateEventA, 0, 0, 0, 0)
|
|
|
|
|
mp.waitsema = stdcall(_CreateEventA, 0, 0, 0, 0)
|
|
|
|
|
if mp.waitsema == 0 {
|
|
|
|
|
systemstack(func() {
|
|
|
|
|
print("runtime: createevent failed; errno=", getlasterror(), "\n")
|
|
|
|
|
throw("runtime.semacreate")
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
mp.resumesema = stdcall4(_CreateEventA, 0, 0, 0, 0)
|
|
|
|
|
mp.resumesema = stdcall(_CreateEventA, 0, 0, 0, 0)
|
|
|
|
|
if mp.resumesema == 0 {
|
|
|
|
|
systemstack(func() {
|
|
|
|
|
print("runtime: createevent failed; errno=", getlasterror(), "\n")
|
|
|
|
|
throw("runtime.semacreate")
|
|
|
|
|
})
|
|
|
|
|
stdcall1(_CloseHandle, mp.waitsema)
|
|
|
|
|
stdcall(_CloseHandle, mp.waitsema)
|
|
|
|
|
mp.waitsema = 0
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
@@ -762,7 +762,7 @@ func semacreate(mp *m) {
|
|
|
|
|
//go:nosplit
|
|
|
|
|
func newosproc(mp *m) {
|
|
|
|
|
// We pass 0 for the stack size to use the default for this binary.
|
|
|
|
|
thandle := stdcall6(_CreateThread, 0, 0,
|
|
|
|
|
thandle := stdcall(_CreateThread, 0, 0,
|
|
|
|
|
abi.FuncPCABI0(tstart_stdcall), uintptr(unsafe.Pointer(mp)),
|
|
|
|
|
0, 0)
|
|
|
|
|
|
|
|
|
|
@@ -780,7 +780,7 @@ func newosproc(mp *m) {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Close thandle to avoid leaking the thread object if it exits.
|
|
|
|
|
stdcall1(_CloseHandle, thandle)
|
|
|
|
|
stdcall(_CloseHandle, thandle)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Used by the C library build mode. On Linux this function would allocate a
|
|
|
|
|
@@ -828,7 +828,7 @@ func sigblock(exiting bool) {
|
|
|
|
|
// Called on the new thread, cannot allocate Go memory.
|
|
|
|
|
func minit() {
|
|
|
|
|
var thandle uintptr
|
|
|
|
|
if stdcall7(_DuplicateHandle, currentProcess, currentThread, currentProcess, uintptr(unsafe.Pointer(&thandle)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 {
|
|
|
|
|
if stdcall(_DuplicateHandle, currentProcess, currentThread, currentProcess, uintptr(unsafe.Pointer(&thandle)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 {
|
|
|
|
|
print("runtime.minit: duplicatehandle failed; errno=", getlasterror(), "\n")
|
|
|
|
|
throw("runtime.minit: duplicatehandle failed")
|
|
|
|
|
}
|
|
|
|
|
@@ -836,7 +836,7 @@ func minit() {
|
|
|
|
|
mp := getg().m
|
|
|
|
|
lock(&mp.threadLock)
|
|
|
|
|
mp.thread = thandle
|
|
|
|
|
mp.procid = uint64(stdcall0(_GetCurrentThreadId))
|
|
|
|
|
mp.procid = uint64(stdcall(_GetCurrentThreadId))
|
|
|
|
|
|
|
|
|
|
// Configure usleep timer, if possible.
|
|
|
|
|
if mp.highResTimer == 0 && haveHighResTimer {
|
|
|
|
|
@@ -853,7 +853,7 @@ func minit() {
|
|
|
|
|
throw("CreateWaitableTimerEx when creating timer failed")
|
|
|
|
|
}
|
|
|
|
|
const GENERIC_ALL = 0x10000000
|
|
|
|
|
errno := stdcall3(_NtCreateWaitCompletionPacket, uintptr(unsafe.Pointer(&mp.waitIocpHandle)), GENERIC_ALL, 0)
|
|
|
|
|
errno := stdcall(_NtCreateWaitCompletionPacket, uintptr(unsafe.Pointer(&mp.waitIocpHandle)), GENERIC_ALL, 0)
|
|
|
|
|
if mp.waitIocpHandle == 0 {
|
|
|
|
|
print("runtime: NtCreateWaitCompletionPacket failed; errno=", errno, "\n")
|
|
|
|
|
throw("NtCreateWaitCompletionPacket failed")
|
|
|
|
|
@@ -864,7 +864,7 @@ func minit() {
|
|
|
|
|
// Query the true stack base from the OS. Currently we're
|
|
|
|
|
// running on a small assumed stack.
|
|
|
|
|
var mbi memoryBasicInformation
|
|
|
|
|
res := stdcall3(_VirtualQuery, uintptr(unsafe.Pointer(&mbi)), uintptr(unsafe.Pointer(&mbi)), unsafe.Sizeof(mbi))
|
|
|
|
|
res := stdcall(_VirtualQuery, uintptr(unsafe.Pointer(&mbi)), uintptr(unsafe.Pointer(&mbi)), unsafe.Sizeof(mbi))
|
|
|
|
|
if res == 0 {
|
|
|
|
|
print("runtime: VirtualQuery failed; errno=", getlasterror(), "\n")
|
|
|
|
|
throw("VirtualQuery for stack base failed")
|
|
|
|
|
@@ -896,7 +896,7 @@ func unminit() {
|
|
|
|
|
mp := getg().m
|
|
|
|
|
lock(&mp.threadLock)
|
|
|
|
|
if mp.thread != 0 {
|
|
|
|
|
stdcall1(_CloseHandle, mp.thread)
|
|
|
|
|
stdcall(_CloseHandle, mp.thread)
|
|
|
|
|
mp.thread = 0
|
|
|
|
|
}
|
|
|
|
|
unlock(&mp.threadLock)
|
|
|
|
|
@@ -913,49 +913,59 @@ func unminit() {
|
|
|
|
|
//go:nosplit
|
|
|
|
|
func mdestroy(mp *m) {
|
|
|
|
|
if mp.highResTimer != 0 {
|
|
|
|
|
stdcall1(_CloseHandle, mp.highResTimer)
|
|
|
|
|
stdcall(_CloseHandle, mp.highResTimer)
|
|
|
|
|
mp.highResTimer = 0
|
|
|
|
|
}
|
|
|
|
|
if mp.waitIocpTimer != 0 {
|
|
|
|
|
stdcall1(_CloseHandle, mp.waitIocpTimer)
|
|
|
|
|
stdcall(_CloseHandle, mp.waitIocpTimer)
|
|
|
|
|
mp.waitIocpTimer = 0
|
|
|
|
|
}
|
|
|
|
|
if mp.waitIocpHandle != 0 {
|
|
|
|
|
stdcall1(_CloseHandle, mp.waitIocpHandle)
|
|
|
|
|
stdcall(_CloseHandle, mp.waitIocpHandle)
|
|
|
|
|
mp.waitIocpHandle = 0
|
|
|
|
|
}
|
|
|
|
|
if mp.waitsema != 0 {
|
|
|
|
|
stdcall1(_CloseHandle, mp.waitsema)
|
|
|
|
|
stdcall(_CloseHandle, mp.waitsema)
|
|
|
|
|
mp.waitsema = 0
|
|
|
|
|
}
|
|
|
|
|
if mp.resumesema != 0 {
|
|
|
|
|
stdcall1(_CloseHandle, mp.resumesema)
|
|
|
|
|
stdcall(_CloseHandle, mp.resumesema)
|
|
|
|
|
mp.resumesema = 0
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// stdcall_no_g calls asmstdcall on os stack without using g.
|
|
|
|
|
// stdcall_no_g is like [stdcall] but can be called without a G.
|
|
|
|
|
//
|
|
|
|
|
//go:nowritebarrier
|
|
|
|
|
//go:nosplit
|
|
|
|
|
func stdcall_no_g(fn stdFunction, n int, args uintptr) uintptr {
|
|
|
|
|
//go:uintptrkeepalive
|
|
|
|
|
func stdcall_no_g(fn stdFunction, args ...uintptr) uintptr {
|
|
|
|
|
call := windows.StdCallInfo{
|
|
|
|
|
Fn: uintptr(unsafe.Pointer(fn)),
|
|
|
|
|
N: uintptr(n),
|
|
|
|
|
Args: args,
|
|
|
|
|
Fn: uintptr(unsafe.Pointer(fn)),
|
|
|
|
|
N: uintptr(len(args)),
|
|
|
|
|
}
|
|
|
|
|
if len(args) > 0 {
|
|
|
|
|
call.Args = uintptr(abi.NoEscape(unsafe.Pointer(&args[0])))
|
|
|
|
|
}
|
|
|
|
|
windows.StdCall(&call)
|
|
|
|
|
return call.R1
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Calling stdcall on os stack.
|
|
|
|
|
// stdcall calls fn with the given arguments using the stdcall calling convention.
|
|
|
|
|
// Must be called from the system stack.
|
|
|
|
|
// May run during STW, so write barriers are not allowed.
|
|
|
|
|
//
|
|
|
|
|
//go:nowritebarrier
|
|
|
|
|
//go:nosplit
|
|
|
|
|
func stdcall(fn stdFunction) uintptr {
|
|
|
|
|
//go:uintptrkeepalive
|
|
|
|
|
func stdcall(fn stdFunction, args ...uintptr) uintptr {
|
|
|
|
|
gp := getg()
|
|
|
|
|
mp := gp.m
|
|
|
|
|
mp.stdCallInfo.Fn = uintptr(unsafe.Pointer(fn))
|
|
|
|
|
mp.stdCallInfo.N = uintptr(len(args))
|
|
|
|
|
if len(args) > 0 {
|
|
|
|
|
mp.stdCallInfo.Args = uintptr(abi.NoEscape(unsafe.Pointer(&args[0])))
|
|
|
|
|
}
|
|
|
|
|
resetLibcall := false
|
|
|
|
|
if mp.profilehz != 0 && mp.libcallsp == 0 {
|
|
|
|
|
// leave pc/sp for cpu profiler
|
|
|
|
|
@@ -973,105 +983,24 @@ func stdcall(fn stdFunction) uintptr {
|
|
|
|
|
return mp.stdCallInfo.R1
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
func stdcall0(fn stdFunction) uintptr {
|
|
|
|
|
mp := getg().m
|
|
|
|
|
mp.stdCallInfo.N = 0
|
|
|
|
|
mp.stdCallInfo.Args = 0
|
|
|
|
|
return stdcall(fn)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
//go:cgo_unsafe_args
|
|
|
|
|
func stdcall1(fn stdFunction, a0 uintptr) uintptr {
|
|
|
|
|
mp := getg().m
|
|
|
|
|
mp.stdCallInfo.N = 1
|
|
|
|
|
mp.stdCallInfo.Args = uintptr(noescape(unsafe.Pointer(&a0)))
|
|
|
|
|
return stdcall(fn)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
//go:cgo_unsafe_args
|
|
|
|
|
func stdcall2(fn stdFunction, a0, a1 uintptr) uintptr {
|
|
|
|
|
mp := getg().m
|
|
|
|
|
mp.stdCallInfo.N = 2
|
|
|
|
|
mp.stdCallInfo.Args = uintptr(noescape(unsafe.Pointer(&a0)))
|
|
|
|
|
return stdcall(fn)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
//go:cgo_unsafe_args
|
|
|
|
|
func stdcall3(fn stdFunction, a0, a1, a2 uintptr) uintptr {
|
|
|
|
|
mp := getg().m
|
|
|
|
|
mp.stdCallInfo.N = 3
|
|
|
|
|
mp.stdCallInfo.Args = uintptr(noescape(unsafe.Pointer(&a0)))
|
|
|
|
|
return stdcall(fn)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
//go:cgo_unsafe_args
|
|
|
|
|
func stdcall4(fn stdFunction, a0, a1, a2, a3 uintptr) uintptr {
|
|
|
|
|
mp := getg().m
|
|
|
|
|
mp.stdCallInfo.N = 4
|
|
|
|
|
mp.stdCallInfo.Args = uintptr(noescape(unsafe.Pointer(&a0)))
|
|
|
|
|
return stdcall(fn)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
//go:cgo_unsafe_args
|
|
|
|
|
func stdcall5(fn stdFunction, a0, a1, a2, a3, a4 uintptr) uintptr {
|
|
|
|
|
mp := getg().m
|
|
|
|
|
mp.stdCallInfo.N = 5
|
|
|
|
|
mp.stdCallInfo.Args = uintptr(noescape(unsafe.Pointer(&a0)))
|
|
|
|
|
return stdcall(fn)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
//go:cgo_unsafe_args
|
|
|
|
|
func stdcall6(fn stdFunction, a0, a1, a2, a3, a4, a5 uintptr) uintptr {
|
|
|
|
|
mp := getg().m
|
|
|
|
|
mp.stdCallInfo.N = 6
|
|
|
|
|
mp.stdCallInfo.Args = uintptr(noescape(unsafe.Pointer(&a0)))
|
|
|
|
|
return stdcall(fn)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
//go:cgo_unsafe_args
|
|
|
|
|
func stdcall7(fn stdFunction, a0, a1, a2, a3, a4, a5, a6 uintptr) uintptr {
|
|
|
|
|
mp := getg().m
|
|
|
|
|
mp.stdCallInfo.N = 7
|
|
|
|
|
mp.stdCallInfo.Args = uintptr(noescape(unsafe.Pointer(&a0)))
|
|
|
|
|
return stdcall(fn)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
//go:cgo_unsafe_args
|
|
|
|
|
func stdcall8(fn stdFunction, a0, a1, a2, a3, a4, a5, a6, a7 uintptr) uintptr {
|
|
|
|
|
mp := getg().m
|
|
|
|
|
mp.stdCallInfo.N = 8
|
|
|
|
|
mp.stdCallInfo.Args = uintptr(noescape(unsafe.Pointer(&a0)))
|
|
|
|
|
return stdcall(fn)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// These must run on the system stack only.
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
func osyield_no_g() {
|
|
|
|
|
stdcall_no_g(_SwitchToThread, 0, 0)
|
|
|
|
|
stdcall_no_g(_SwitchToThread)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
func osyield() {
|
|
|
|
|
systemstack(func() {
|
|
|
|
|
stdcall0(_SwitchToThread)
|
|
|
|
|
stdcall(_SwitchToThread)
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
func usleep_no_g(us uint32) {
|
|
|
|
|
timeout := uintptr(us) / 1000 // ms units
|
|
|
|
|
args := [...]uintptr{_INVALID_HANDLE_VALUE, timeout}
|
|
|
|
|
stdcall_no_g(_WaitForSingleObject, len(args), uintptr(noescape(unsafe.Pointer(&args[0]))))
|
|
|
|
|
stdcall_no_g(_WaitForSingleObject, _INVALID_HANDLE_VALUE, timeout)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//go:nosplit
|
|
|
|
|
@@ -1083,13 +1012,13 @@ func usleep(us uint32) {
|
|
|
|
|
if haveHighResTimer && getg().m.highResTimer != 0 {
|
|
|
|
|
h = getg().m.highResTimer
|
|
|
|
|
dt := -10 * int64(us) // relative sleep (negative), 100ns units
|
|
|
|
|
stdcall6(_SetWaitableTimer, h, uintptr(unsafe.Pointer(&dt)), 0, 0, 0, 0)
|
|
|
|
|
stdcall(_SetWaitableTimer, h, uintptr(unsafe.Pointer(&dt)), 0, 0, 0, 0)
|
|
|
|
|
timeout = _INFINITE
|
|
|
|
|
} else {
|
|
|
|
|
h = _INVALID_HANDLE_VALUE
|
|
|
|
|
timeout = uintptr(us) / 1000 // ms units
|
|
|
|
|
}
|
|
|
|
|
stdcall2(_WaitForSingleObject, h, timeout)
|
|
|
|
|
stdcall(_WaitForSingleObject, h, timeout)
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@@ -1130,7 +1059,7 @@ func profilem(mp *m, thread uintptr) {
|
|
|
|
|
c = (*context)(unsafe.Pointer((uintptr(unsafe.Pointer(&cbuf[15]))) &^ 15))
|
|
|
|
|
|
|
|
|
|
c.contextflags = _CONTEXT_CONTROL
|
|
|
|
|
stdcall2(_GetThreadContext, thread, uintptr(unsafe.Pointer(c)))
|
|
|
|
|
stdcall(_GetThreadContext, thread, uintptr(unsafe.Pointer(c)))
|
|
|
|
|
|
|
|
|
|
gp := gFromSP(mp, c.sp())
|
|
|
|
|
|
|
|
|
|
@@ -1151,10 +1080,10 @@ func gFromSP(mp *m, sp uintptr) *g {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func profileLoop() {
|
|
|
|
|
stdcall2(_SetThreadPriority, currentThread, _THREAD_PRIORITY_HIGHEST)
|
|
|
|
|
stdcall(_SetThreadPriority, currentThread, _THREAD_PRIORITY_HIGHEST)
|
|
|
|
|
|
|
|
|
|
for {
|
|
|
|
|
stdcall2(_WaitForSingleObject, profiletimer, _INFINITE)
|
|
|
|
|
stdcall(_WaitForSingleObject, profiletimer, _INFINITE)
|
|
|
|
|
first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
|
|
|
|
|
for mp := first; mp != nil; mp = mp.alllink {
|
|
|
|
|
if mp == getg().m {
|
|
|
|
|
@@ -1172,7 +1101,7 @@ func profileLoop() {
|
|
|
|
|
}
|
|
|
|
|
// Acquire our own handle to the thread.
|
|
|
|
|
var thread uintptr
|
|
|
|
|
if stdcall7(_DuplicateHandle, currentProcess, mp.thread, currentProcess, uintptr(unsafe.Pointer(&thread)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 {
|
|
|
|
|
if stdcall(_DuplicateHandle, currentProcess, mp.thread, currentProcess, uintptr(unsafe.Pointer(&thread)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 {
|
|
|
|
|
print("runtime: duplicatehandle failed; errno=", getlasterror(), "\n")
|
|
|
|
|
throw("duplicatehandle failed")
|
|
|
|
|
}
|
|
|
|
|
@@ -1182,9 +1111,9 @@ func profileLoop() {
|
|
|
|
|
// above and the SuspendThread. The handle
|
|
|
|
|
// will remain valid, but SuspendThread may
|
|
|
|
|
// fail.
|
|
|
|
|
if int32(stdcall1(_SuspendThread, thread)) == -1 {
|
|
|
|
|
if int32(stdcall(_SuspendThread, thread)) == -1 {
|
|
|
|
|
// The thread no longer exists.
|
|
|
|
|
stdcall1(_CloseHandle, thread)
|
|
|
|
|
stdcall(_CloseHandle, thread)
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
if mp.profilehz != 0 && !mp.blocked {
|
|
|
|
|
@@ -1192,8 +1121,8 @@ func profileLoop() {
|
|
|
|
|
// was in the process of shutting down.
|
|
|
|
|
profilem(mp, thread)
|
|
|
|
|
}
|
|
|
|
|
stdcall1(_ResumeThread, thread)
|
|
|
|
|
stdcall1(_CloseHandle, thread)
|
|
|
|
|
stdcall(_ResumeThread, thread)
|
|
|
|
|
stdcall(_CloseHandle, thread)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
@@ -1204,7 +1133,7 @@ func setProcessCPUProfiler(hz int32) {
|
|
|
|
|
if haveHighResTimer {
|
|
|
|
|
timer = createHighResTimer()
|
|
|
|
|
} else {
|
|
|
|
|
timer = stdcall3(_CreateWaitableTimerA, 0, 0, 0)
|
|
|
|
|
timer = stdcall(_CreateWaitableTimerA, 0, 0, 0)
|
|
|
|
|
}
|
|
|
|
|
atomic.Storeuintptr(&profiletimer, timer)
|
|
|
|
|
newm(profileLoop, nil, -1)
|
|
|
|
|
@@ -1221,7 +1150,7 @@ func setThreadCPUProfiler(hz int32) {
|
|
|
|
|
}
|
|
|
|
|
due = int64(ms) * -10000
|
|
|
|
|
}
|
|
|
|
|
stdcall6(_SetWaitableTimer, profiletimer, uintptr(unsafe.Pointer(&due)), uintptr(ms), 0, 0, 0)
|
|
|
|
|
stdcall(_SetWaitableTimer, profiletimer, uintptr(unsafe.Pointer(&due)), uintptr(ms), 0, 0, 0)
|
|
|
|
|
atomic.Store((*uint32)(unsafe.Pointer(&getg().m.profilehz)), uint32(hz))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@@ -1254,7 +1183,7 @@ func preemptM(mp *m) {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
var thread uintptr
|
|
|
|
|
if stdcall7(_DuplicateHandle, currentProcess, mp.thread, currentProcess, uintptr(unsafe.Pointer(&thread)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 {
|
|
|
|
|
if stdcall(_DuplicateHandle, currentProcess, mp.thread, currentProcess, uintptr(unsafe.Pointer(&thread)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 {
|
|
|
|
|
print("runtime.preemptM: duplicatehandle failed; errno=", getlasterror(), "\n")
|
|
|
|
|
throw("runtime.preemptM: duplicatehandle failed")
|
|
|
|
|
}
|
|
|
|
|
@@ -1274,9 +1203,9 @@ func preemptM(mp *m) {
|
|
|
|
|
lock(&suspendLock)
|
|
|
|
|
|
|
|
|
|
// Suspend the thread.
|
|
|
|
|
if int32(stdcall1(_SuspendThread, thread)) == -1 {
|
|
|
|
|
if int32(stdcall(_SuspendThread, thread)) == -1 {
|
|
|
|
|
unlock(&suspendLock)
|
|
|
|
|
stdcall1(_CloseHandle, thread)
|
|
|
|
|
stdcall(_CloseHandle, thread)
|
|
|
|
|
atomic.Store(&mp.preemptExtLock, 0)
|
|
|
|
|
// The thread no longer exists. This shouldn't be
|
|
|
|
|
// possible, but just acknowledge the request.
|
|
|
|
|
@@ -1293,7 +1222,7 @@ func preemptM(mp *m) {
|
|
|
|
|
// We have to get the thread context before inspecting the M
|
|
|
|
|
// because SuspendThread only requests a suspend.
|
|
|
|
|
// GetThreadContext actually blocks until it's suspended.
|
|
|
|
|
stdcall2(_GetThreadContext, thread, uintptr(unsafe.Pointer(c)))
|
|
|
|
|
stdcall(_GetThreadContext, thread, uintptr(unsafe.Pointer(c)))
|
|
|
|
|
|
|
|
|
|
unlock(&suspendLock)
|
|
|
|
|
|
|
|
|
|
@@ -1304,7 +1233,7 @@ func preemptM(mp *m) {
|
|
|
|
|
// Inject call to asyncPreempt
|
|
|
|
|
targetPC := abi.FuncPCABI0(asyncPreempt)
|
|
|
|
|
c.pushCall(targetPC, resumePC)
|
|
|
|
|
stdcall2(_SetThreadContext, thread, uintptr(unsafe.Pointer(c)))
|
|
|
|
|
stdcall(_SetThreadContext, thread, uintptr(unsafe.Pointer(c)))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@@ -1313,8 +1242,8 @@ func preemptM(mp *m) {
|
|
|
|
|
// Acknowledge the preemption.
|
|
|
|
|
mp.preemptGen.Add(1)
|
|
|
|
|
|
|
|
|
|
stdcall1(_ResumeThread, thread)
|
|
|
|
|
stdcall1(_CloseHandle, thread)
|
|
|
|
|
stdcall(_ResumeThread, thread)
|
|
|
|
|
stdcall(_CloseHandle, thread)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// osPreemptExtEnter is called before entering external code that may
|
|
|
|
|
|