Skip to content

Commit dac220b

Browse files
committed
runtime: remove in-use page count loop from STW
In order to compute the sweep ratio, the runtime needs to know how many pages belong to spans in state _MSpanInUse. Currently it finds this out by looping over all spans during mark termination. However, this takes ~1ms/heap GB, so multi-gigabyte heaps can quickly push our STW time past 10ms. Replace the loop with an actively maintained count of in-use pages. For multi-gigabyte heaps, this reduces max mark termination pause time by 75%–90% relative to tip and by 85%–95% relative to Go 1.5.1. This shifts the longest pause time for large heaps to the sweep termination phase, so it only slightly decreases max pause time, though it roughly halves mean pause time. Here are the results for the garbage benchmark: ---- max mark termination pause ---- Heap Procs after change before change 1.5.1 24GB 12 1.9ms 18ms 37ms 24GB 4 3.7ms 18ms 37ms 4GB 4 920µs 3.8ms 6.9ms Fixes #11484. Change-Id: Ia2d28bb8a1e4f1c3b8ebf79fb203f12b9bf114ac Reviewed-on: https://go-review.googlesource.com/15070 Reviewed-by: Rick Hudson <rlh@golang.org> Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
1 parent 608c1b0 commit dac220b

File tree

2 files changed

+12
-9
lines changed

2 files changed

+12
-9
lines changed

src/runtime/mgc.go

Lines changed: 4 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1565,14 +1565,9 @@ func gcSweep(mode gcMode) {
15651565
return
15661566
}
15671567

1568-
// Account how much sweeping needs to be done before the next
1569-
// GC cycle and set up proportional sweep statistics.
1570-
var pagesToSweep uintptr
1571-
for _, s := range work.spans {
1572-
if s.state == mSpanInUse {
1573-
pagesToSweep += s.npages
1574-
}
1575-
}
1568+
// Concurrent sweep needs to sweep all of the in-use pages by
1569+
// the time the allocated heap reaches the GC trigger. Compute
1570+
// the ratio of in-use pages to sweep per byte allocated.
15761571
heapDistance := int64(memstats.next_gc) - int64(memstats.heap_live)
15771572
// Add a little margin so rounding errors and concurrent
15781573
// sweep are less likely to leave pages unswept when GC starts.
@@ -1582,7 +1577,7 @@ func gcSweep(mode gcMode) {
15821577
heapDistance = _PageSize
15831578
}
15841579
lock(&mheap_.lock)
1585-
mheap_.sweepPagesPerByte = float64(pagesToSweep) / float64(heapDistance)
1580+
mheap_.sweepPagesPerByte = float64(mheap_.pagesInUse) / float64(heapDistance)
15861581
mheap_.pagesSwept = 0
15871582
mheap_.spanBytesAlloc = 0
15881583
unlock(&mheap_.lock)

src/runtime/mheap.go

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,9 +29,12 @@ type mheap struct {
2929
spans_mapped uintptr
3030

3131
// Proportional sweep
32+
pagesInUse uint64 // pages of spans in stats _MSpanInUse; R/W with mheap.lock
3233
spanBytesAlloc uint64 // bytes of spans allocated this cycle; updated atomically
3334
pagesSwept uint64 // pages swept this cycle; updated atomically
3435
sweepPagesPerByte float64 // proportional sweep ratio; written with lock, read without
36+
// TODO(austin): pagesInUse should be a uintptr, but the 386
37+
// compiler can't 8-byte align fields.
3538

3639
// Malloc stats.
3740
largefree uint64 // bytes freed for large objects (>maxsmallsize)
@@ -447,6 +450,7 @@ func mHeap_Alloc_m(h *mheap, npage uintptr, sizeclass int32, large bool) *mspan
447450
}
448451

449452
// update stats, sweep lists
453+
h.pagesInUse += uint64(npage)
450454
if large {
451455
memstats.heap_objects++
452456
memstats.heap_live += uint64(npage << _PageShift)
@@ -614,6 +618,8 @@ func bestFit(list *mspan, npage uintptr, best *mspan) *mspan {
614618

615619
// Try to add at least npage pages of memory to the heap,
616620
// returning whether it worked.
621+
//
622+
// h must be locked.
617623
func mHeap_Grow(h *mheap, npage uintptr) bool {
618624
// Ask for a big chunk, to reduce the number of mappings
619625
// the operating system needs to track; also amortizes
@@ -648,6 +654,7 @@ func mHeap_Grow(h *mheap, npage uintptr) bool {
648654
}
649655
atomicstore(&s.sweepgen, h.sweepgen)
650656
s.state = _MSpanInUse
657+
h.pagesInUse += uint64(npage)
651658
mHeap_FreeSpanLocked(h, s, false, true, 0)
652659
return true
653660
}
@@ -728,6 +735,7 @@ func mHeap_FreeSpanLocked(h *mheap, s *mspan, acctinuse, acctidle bool, unusedsi
728735
print("MHeap_FreeSpanLocked - span ", s, " ptr ", hex(s.start<<_PageShift), " ref ", s.ref, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n")
729736
throw("MHeap_FreeSpanLocked - invalid free")
730737
}
738+
h.pagesInUse -= uint64(s.npages)
731739
default:
732740
throw("MHeap_FreeSpanLocked - invalid span state")
733741
}

0 commit comments

Comments
 (0)