Compare commits

..

1 Commits

Author SHA1 Message Date
Chris Broadfoot
3de6e96e4b [release-branch.go1.8] go1.8rc1
Change-Id: I68a99a4d750357dd59eb48f7c05b4dc08c64c92d
Reviewed-on: https://go-review.googlesource.com/35097
Run-TryBot: Chris Broadfoot <cbro@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2017-01-10 19:35:03 +00:00
14 changed files with 68 additions and 202 deletions

1
VERSION Normal file
View File

@@ -0,0 +1 @@
go1.8rc1

View File

@@ -111,7 +111,7 @@ const (
// Tiny allocator parameters, see "Tiny allocator" comment in malloc.go.
_TinySize = 16
_TinySizeClass = int8(2)
_TinySizeClass = 2
_FixAllocChunk = 16 << 10 // Chunk size for FixAlloc
_MaxMHeapList = 1 << (20 - _PageShift) // Maximum page length for fixed-size list in MHeap.
@@ -512,8 +512,8 @@ func nextFreeFast(s *mspan) gclinkptr {
// weight allocation. If it is a heavy weight allocation the caller must
// determine whether a new GC cycle needs to be started or if the GC is active
// whether this goroutine needs to assist the GC.
func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bool) {
s = c.alloc[spc]
func (c *mcache) nextFree(sizeclass uint8) (v gclinkptr, s *mspan, shouldhelpgc bool) {
s = c.alloc[sizeclass]
shouldhelpgc = false
freeIndex := s.nextFreeIndex()
if freeIndex == s.nelems {
@@ -523,10 +523,10 @@ func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bo
throw("s.allocCount != s.nelems && freeIndex == s.nelems")
}
systemstack(func() {
c.refill(spc)
c.refill(int32(sizeclass))
})
shouldhelpgc = true
s = c.alloc[spc]
s = c.alloc[sizeclass]
freeIndex = s.nextFreeIndex()
}
@@ -650,10 +650,10 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
return x
}
// Allocate a new maxTinySize block.
span := c.alloc[tinySpanClass]
span := c.alloc[tinySizeClass]
v := nextFreeFast(span)
if v == 0 {
v, _, shouldhelpgc = c.nextFree(tinySpanClass)
v, _, shouldhelpgc = c.nextFree(tinySizeClass)
}
x = unsafe.Pointer(v)
(*[2]uint64)(x)[0] = 0
@@ -673,11 +673,10 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
sizeclass = size_to_class128[(size-smallSizeMax+largeSizeDiv-1)/largeSizeDiv]
}
size = uintptr(class_to_size[sizeclass])
spc := makeSpanClass(sizeclass, noscan)
span := c.alloc[spc]
span := c.alloc[sizeclass]
v := nextFreeFast(span)
if v == 0 {
v, span, shouldhelpgc = c.nextFree(spc)
v, span, shouldhelpgc = c.nextFree(sizeclass)
}
x = unsafe.Pointer(v)
if needzero && span.needzero != 0 {
@@ -688,7 +687,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
var s *mspan
shouldhelpgc = true
systemstack(func() {
s = largeAlloc(size, needzero, noscan)
s = largeAlloc(size, needzero)
})
s.freeindex = 1
s.allocCount = 1
@@ -697,7 +696,9 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
}
var scanSize uintptr
if !noscan {
if noscan {
heapBitsSetTypeNoScan(uintptr(x))
} else {
// If allocating a defer+arg block, now that we've picked a malloc size
// large enough to hold everything, cut the "asked for" size down to
// just the defer header, so that the GC bitmap will record the arg block
@@ -775,7 +776,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
return x
}
func largeAlloc(size uintptr, needzero bool, noscan bool) *mspan {
func largeAlloc(size uintptr, needzero bool) *mspan {
// print("largeAlloc size=", size, "\n")
if size+_PageSize < size {
@@ -791,7 +792,7 @@ func largeAlloc(size uintptr, needzero bool, noscan bool) *mspan {
// pays the debt down to npage pages.
deductSweepCredit(npages*_PageSize, npages)
s := mheap_.alloc(npages, makeSpanClass(0, noscan), true, needzero)
s := mheap_.alloc(npages, 0, true, needzero)
if s == nil {
throw("out of memory")
}

View File

@@ -45,11 +45,6 @@
// not checkmarked, and is the dead encoding.
// These properties must be preserved when modifying the encoding.
//
// The bitmap for noscan spans is not maintained. Code must ensure
// that an object is scannable before consulting its bitmap by
// checking either the noscan bit in the span or by consulting its
// type's information.
//
// Checkmarks
//
// In a concurrent garbage collector, one worries about failing to mark
@@ -189,90 +184,6 @@ type markBits struct {
index uintptr
}
//go:nosplit
func inBss(p uintptr) bool {
for datap := &firstmoduledata; datap != nil; datap = datap.next {
if p >= datap.bss && p < datap.ebss {
return true
}
}
return false
}
//go:nosplit
func inData(p uintptr) bool {
for datap := &firstmoduledata; datap != nil; datap = datap.next {
if p >= datap.data && p < datap.edata {
return true
}
}
return false
}
// isPublic checks whether the object has been published.
// ptr may not point to the start of the object.
// This is conservative in the sense that it will return true
// for any object that hasn't been allocated by this
// goroutine since the last roc checkpoint was performed.
// Must run on the system stack to prevent stack growth and
// moving of goroutine stack.
//go:systemstack
func isPublic(ptr uintptr) bool {
if debug.gcroc == 0 {
// Unexpected call to ROC specific routine while not running ROC.
// blowup without supressing inlining.
_ = *(*int)(nil)
}
if inStack(ptr, getg().stack) {
return false
}
if getg().m != nil && getg().m.curg != nil && inStack(ptr, getg().m.curg.stack) {
return false
}
if inBss(ptr) {
return true
}
if inData(ptr) {
return true
}
if !inheap(ptr) {
// Note: Objects created using persistentalloc are not in the heap
// so any pointers from such object to local objects needs to be dealt
// with specially. nil is also considered not in the heap.
return true
}
// At this point we know the object is in the heap.
s := spanOf(ptr)
oldSweepgen := atomic.Load(&s.sweepgen)
sg := mheap_.sweepgen
if oldSweepgen != sg {
// We have an unswept span which means that the pointer points to a public object since it will
// be found to be marked once it is swept.
return true
}
abits := s.allocBitsForAddr(ptr)
if abits.isMarked() {
return true
} else if s.freeindex <= abits.index {
// Unmarked and beyond freeindex yet reachable object encountered.
// blowup without supressing inlining.
_ = *(*int)(nil)
}
// The object is not marked. If it is part of the current
// ROC epoch then it is not public.
if s.startindex*s.elemsize <= ptr-s.base() {
// Object allocated in this ROC epoch and since it is
// not marked it has not been published.
return false
}
// Object allocated since last GC but in a previous ROC epoch so it is public.
return true
}
//go:nosplit
func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
whichByte := allocBitIndex / 8
@@ -281,16 +192,6 @@ func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
return markBits{bytePtr, uint8(1 << whichBit), allocBitIndex}
}
//go:nosplit
func (s *mspan) allocBitsForAddr(p uintptr) markBits {
byteOffset := p - s.base()
allocBitIndex := byteOffset / s.elemsize
whichByte := allocBitIndex / 8
whichBit := allocBitIndex % 8
bytePtr := addb(s.allocBits, whichByte)
return markBits{bytePtr, uint8(1 << whichBit), allocBitIndex}
}
// refillaCache takes 8 bytes s.allocBits starting at whichByte
// and negates them so that ctz (count trailing zeros) instructions
// can be used. It then places these 8 bytes into the cached 64 bit
@@ -608,6 +509,16 @@ func (h heapBits) isPointer() bool {
return h.bits()&bitPointer != 0
}
// hasPointers reports whether the given object has any pointers.
// It must be told how large the object at h is for efficiency.
// h must describe the initial word of the object.
func (h heapBits) hasPointers(size uintptr) bool {
if size == sys.PtrSize { // 1-word objects are always pointers
return true
}
return (*h.bitp>>h.shift)&bitScan != 0
}
// isCheckmarked reports whether the heap bits have the checkmarked bit set.
// It must be told how large the object at h is, because the encoding of the
// checkmark bit varies by size.
@@ -1452,6 +1363,13 @@ Phase4:
}
}
// heapBitsSetTypeNoScan marks x as noscan by setting the first word
// of x in the heap bitmap to scalar/dead.
func heapBitsSetTypeNoScan(x uintptr) {
h := heapBitsForAddr(uintptr(x))
*h.bitp &^= (bitPointer | bitScan) << h.shift
}
var debugPtrmask struct {
lock mutex
data *byte

View File

@@ -33,8 +33,7 @@ type mcache struct {
local_tinyallocs uintptr // number of tiny allocs not counted in other stats
// The rest is not accessed on every malloc.
alloc [numSpanClasses]*mspan // spans to allocate from, indexed by spanClass
alloc [_NumSizeClasses]*mspan // spans to allocate from
stackcache [_NumStackOrders]stackfreelist
@@ -78,7 +77,7 @@ func allocmcache() *mcache {
lock(&mheap_.lock)
c := (*mcache)(mheap_.cachealloc.alloc())
unlock(&mheap_.lock)
for i := range c.alloc {
for i := 0; i < _NumSizeClasses; i++ {
c.alloc[i] = &emptymspan
}
c.next_sample = nextSample()
@@ -104,12 +103,12 @@ func freemcache(c *mcache) {
// Gets a span that has a free object in it and assigns it
// to be the cached span for the given sizeclass. Returns this span.
func (c *mcache) refill(spc spanClass) *mspan {
func (c *mcache) refill(sizeclass int32) *mspan {
_g_ := getg()
_g_.m.locks++
// Return the current cached span to the central lists.
s := c.alloc[spc]
s := c.alloc[sizeclass]
if uintptr(s.allocCount) != s.nelems {
throw("refill of span with free space remaining")
@@ -120,7 +119,7 @@ func (c *mcache) refill(spc spanClass) *mspan {
}
// Get a new cached span from the central lists.
s = mheap_.central[spc].mcentral.cacheSpan()
s = mheap_.central[sizeclass].mcentral.cacheSpan()
if s == nil {
throw("out of memory")
}
@@ -129,13 +128,13 @@ func (c *mcache) refill(spc spanClass) *mspan {
throw("span has no free space")
}
c.alloc[spc] = s
c.alloc[sizeclass] = s
_g_.m.locks--
return s
}
func (c *mcache) releaseAll() {
for i := range c.alloc {
for i := 0; i < _NumSizeClasses; i++ {
s := c.alloc[i]
if s != &emptymspan {
mheap_.central[i].mcentral.uncacheSpan(s)

View File

@@ -19,14 +19,14 @@ import "runtime/internal/atomic"
//go:notinheap
type mcentral struct {
lock mutex
spanclass spanClass
sizeclass int32
nonempty mSpanList // list of spans with a free object, ie a nonempty free list
empty mSpanList // list of spans with no free objects (or cached in an mcache)
}
// Initialize a single central free list.
func (c *mcentral) init(spc spanClass) {
c.spanclass = spc
func (c *mcentral) init(sizeclass int32) {
c.sizeclass = sizeclass
c.nonempty.init()
c.empty.init()
}
@@ -34,7 +34,7 @@ func (c *mcentral) init(spc spanClass) {
// Allocate a span to use in an MCache.
func (c *mcentral) cacheSpan() *mspan {
// Deduct credit for this span allocation and sweep if necessary.
spanBytes := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) * _PageSize
spanBytes := uintptr(class_to_allocnpages[c.sizeclass]) * _PageSize
deductSweepCredit(spanBytes, 0)
lock(&c.lock)
@@ -205,11 +205,11 @@ func (c *mcentral) freeSpan(s *mspan, preserve bool, wasempty bool) bool {
// grow allocates a new empty span from the heap and initializes it for c's size class.
func (c *mcentral) grow() *mspan {
npages := uintptr(class_to_allocnpages[c.spanclass.sizeclass()])
size := uintptr(class_to_size[c.spanclass.sizeclass()])
npages := uintptr(class_to_allocnpages[c.sizeclass])
size := uintptr(class_to_size[c.sizeclass])
n := (npages << _PageShift) / size
s := mheap_.alloc(npages, c.spanclass, false, true)
s := mheap_.alloc(npages, c.sizeclass, false, true)
if s == nil {
return nil
}

View File

@@ -441,7 +441,7 @@ func findObject(v unsafe.Pointer) (s *mspan, x unsafe.Pointer, n uintptr) {
}
n = s.elemsize
if s.spanclass.sizeclass() != 0 {
if s.sizeclass != 0 {
x = add(x, (uintptr(v)-uintptr(x))/n*n)
}
return

View File

@@ -244,7 +244,6 @@ var writeBarrier struct {
pad [3]byte // compiler uses 32-bit load for "enabled" field
needed bool // whether we need a write barrier for current GC phase
cgo bool // whether we need a write barrier for a cgo check
roc bool // whether we need a write barrier for the ROC algorithm
alignme uint64 // guarantee alignment so that compiler can use a 32 or 64-bit load
}

View File

@@ -1268,7 +1268,7 @@ func scanobject(b uintptr, gcw *gcWork) {
// paths), in which case we must *not* enqueue
// oblets since their bitmaps will be
// uninitialized.
if s.spanclass.noscan() {
if !hbits.hasPointers(n) {
// Bypass the whole scan.
gcw.bytesMarked += uint64(n)
return
@@ -1396,7 +1396,7 @@ func greyobject(obj, base, off uintptr, hbits heapBits, span *mspan, gcw *gcWork
atomic.Or8(mbits.bytep, mbits.mask)
// If this is a noscan object, fast-track it to black
// instead of greying it.
if span.spanclass.noscan() {
if !hbits.hasPointers(span.elemsize) {
gcw.bytesMarked += uint64(span.elemsize)
return
}
@@ -1429,7 +1429,7 @@ func gcDumpObject(label string, obj, off uintptr) {
print(" s=nil\n")
return
}
print(" s.base()=", hex(s.base()), " s.limit=", hex(s.limit), " s.spanclass=", s.spanclass, " s.elemsize=", s.elemsize, " s.state=")
print(" s.base()=", hex(s.base()), " s.limit=", hex(s.limit), " s.sizeclass=", s.sizeclass, " s.elemsize=", s.elemsize, " s.state=")
if 0 <= s.state && int(s.state) < len(mSpanStateNames) {
print(mSpanStateNames[s.state], "\n")
} else {

View File

@@ -183,7 +183,7 @@ func (s *mspan) sweep(preserve bool) bool {
atomic.Xadd64(&mheap_.pagesSwept, int64(s.npages))
spc := s.spanclass
cl := s.sizeclass
size := s.elemsize
res := false
nfree := 0
@@ -277,7 +277,7 @@ func (s *mspan) sweep(preserve bool) bool {
// Count the number of free objects in this span.
nfree = s.countFree()
if spc.sizeclass() == 0 && nfree != 0 {
if cl == 0 && nfree != 0 {
s.needzero = 1
freeToHeap = true
}
@@ -318,9 +318,9 @@ func (s *mspan) sweep(preserve bool) bool {
atomic.Store(&s.sweepgen, sweepgen)
}
if nfreed > 0 && spc.sizeclass() != 0 {
c.local_nsmallfree[spc.sizeclass()] += uintptr(nfreed)
res = mheap_.central[spc].mcentral.freeSpan(s, preserve, wasempty)
if nfreed > 0 && cl != 0 {
c.local_nsmallfree[cl] += uintptr(nfreed)
res = mheap_.central[cl].mcentral.freeSpan(s, preserve, wasempty)
// MCentral_FreeSpan updates sweepgen
} else if freeToHeap {
// Free large span to heap

View File

@@ -98,8 +98,7 @@ type mheap struct {
// the padding makes sure that the MCentrals are
// spaced CacheLineSize bytes apart, so that each MCentral.lock
// gets its own cache line.
// central is indexed by spanClass.
central [numSpanClasses]struct {
central [_NumSizeClasses]struct {
mcentral mcentral
pad [sys.CacheLineSize]byte
}
@@ -195,13 +194,6 @@ type mspan struct {
// helps performance.
nelems uintptr // number of object in the span.
// startindex is the object index where the owner G started allocating in this span.
//
// This is used in conjunction with nextUsedSpan to implement ROC checkpoints and recycles.
startindex uintptr
// nextUsedSpan links together all spans that have the same span class and owner G.
nextUsedSpan *mspan
// Cache of the allocBits at freeindex. allocCache is shifted
// such that the lowest bit corresponds to the bit freeindex.
// allocCache holds the complement of allocBits, thus allowing
@@ -245,7 +237,7 @@ type mspan struct {
divMul uint16 // for divide by elemsize - divMagic.mul
baseMask uint16 // if non-0, elemsize is a power of 2, & this will get object allocation base
allocCount uint16 // capacity - number of objects in freelist
spanclass spanClass // size class and noscan (uint8)
sizeclass uint8 // size class
incache bool // being used by an mcache
state mSpanState // mspaninuse etc
needzero uint8 // needs to be zeroed before allocation
@@ -300,31 +292,6 @@ func recordspan(vh unsafe.Pointer, p unsafe.Pointer) {
h.allspans = append(h.allspans, s)
}
// A spanClass represents the size class and noscan-ness of a span.
//
// Each size class has a noscan spanClass and a scan spanClass. The
// noscan spanClass contains only noscan objects, which do not contain
// pointers and thus do not need to be scanned by the garbage
// collector.
type spanClass uint8
const (
numSpanClasses = _NumSizeClasses << 1
tinySpanClass = spanClass(tinySizeClass<<1 | 1)
)
func makeSpanClass(sizeclass uint8, noscan bool) spanClass {
return spanClass(sizeclass<<1) | spanClass(bool2int(noscan))
}
func (sc spanClass) sizeclass() int8 {
return int8(sc >> 1)
}
func (sc spanClass) noscan() bool {
return sc&1 != 0
}
// inheap reports whether b is a pointer into a (potentially dead) heap object.
// It returns false for pointers into stack spans.
// Non-preemptible because it is used by write barriers.
@@ -409,7 +376,7 @@ func mlookup(v uintptr, base *uintptr, size *uintptr, sp **mspan) int32 {
}
p := s.base()
if s.spanclass.sizeclass() == 0 {
if s.sizeclass == 0 {
// Large object.
if base != nil {
*base = p
@@ -457,7 +424,7 @@ func (h *mheap) init(spansStart, spansBytes uintptr) {
h.freelarge.init()
h.busylarge.init()
for i := range h.central {
h.central[i].mcentral.init(spanClass(i))
h.central[i].mcentral.init(int32(i))
}
sp := (*slice)(unsafe.Pointer(&h.spans))
@@ -566,7 +533,7 @@ func (h *mheap) reclaim(npage uintptr) {
// Allocate a new span of npage pages from the heap for GC'd memory
// and record its size class in the HeapMap and HeapMapCache.
func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan {
func (h *mheap) alloc_m(npage uintptr, sizeclass int32, large bool) *mspan {
_g_ := getg()
if _g_ != _g_.m.g0 {
throw("_mheap_alloc not on g0 stack")
@@ -600,8 +567,8 @@ func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan {
h.sweepSpans[h.sweepgen/2%2].push(s) // Add to swept in-use list.
s.state = _MSpanInUse
s.allocCount = 0
s.spanclass = spanclass
if sizeclass := spanclass.sizeclass(); sizeclass == 0 {
s.sizeclass = uint8(sizeclass)
if sizeclass == 0 {
s.elemsize = s.npages << _PageShift
s.divShift = 0
s.divMul = 0
@@ -651,13 +618,13 @@ func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan {
return s
}
func (h *mheap) alloc(npage uintptr, spanclass spanClass, large bool, needzero bool) *mspan {
func (h *mheap) alloc(npage uintptr, sizeclass int32, large bool, needzero bool) *mspan {
// Don't do any operations that lock the heap on the G stack.
// It might trigger stack growth, and the stack growth code needs
// to be able to allocate heap.
var s *mspan
systemstack(func() {
s = h.alloc_m(npage, spanclass, large)
s = h.alloc_m(npage, sizeclass, large)
})
if s != nil {
@@ -1053,7 +1020,7 @@ func (span *mspan) init(base uintptr, npages uintptr) {
span.startAddr = base
span.npages = npages
span.allocCount = 0
span.spanclass = 0
span.sizeclass = 0
span.incache = false
span.elemsize = 0
span.state = _MSpanDead

View File

@@ -553,12 +553,12 @@ func updatememstats(stats *gcstats) {
if s.state != mSpanInUse {
continue
}
if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
if s.sizeclass == 0 {
memstats.nmalloc++
memstats.alloc += uint64(s.elemsize)
} else {
memstats.nmalloc += uint64(s.allocCount)
memstats.by_size[sizeclass].nmalloc += uint64(s.allocCount)
memstats.by_size[s.sizeclass].nmalloc += uint64(s.allocCount)
memstats.alloc += uint64(s.allocCount) * uint64(s.elemsize)
}
}

View File

@@ -324,7 +324,6 @@ var debug struct {
gcrescanstacks int32
gcstoptheworld int32
gctrace int32
gcroc int32
invalidptr int32
sbrk int32
scavenge int32
@@ -345,7 +344,6 @@ var dbgvars = []dbgVar{
{"gcrescanstacks", &debug.gcrescanstacks},
{"gcstoptheworld", &debug.gcstoptheworld},
{"gctrace", &debug.gctrace},
{"gcroc", &debug.gcroc},
{"invalidptr", &debug.invalidptr},
{"sbrk", &debug.sbrk},
{"scavenge", &debug.scavenge},
@@ -411,11 +409,6 @@ func parsedebugvars() {
writeBarrier.cgo = true
writeBarrier.enabled = true
}
// For the roc algorithm we turn on the write barrier at all times
if debug.gcroc >= 1 {
writeBarrier.roc = true
writeBarrier.enabled = true
}
}
//go:linkname setTraceback runtime/debug.SetTraceback

View File

@@ -1225,8 +1225,3 @@ func morestackc() {
throw("attempt to execute C code on Go stack")
})
}
//go:nosplit
func inStack(p uintptr, s stack) bool {
return s.lo <= p && p < s.hi
}

View File

@@ -295,10 +295,3 @@ func checkASM() bool
func memequal_varlen(a, b unsafe.Pointer) bool
func eqstring(s1, s2 string) bool
// bool2int returns 0 if x is false or 1 if x is true.
func bool2int(x bool) int {
// Avoid branches. In the SSA compiler, this compiles to
// exactly what you would want it to.
return int(uint8(*(*uint8)(unsafe.Pointer(&x))))
}