Skip to content

Commit ae4534e

Browse files
committed
runtime: ensure heap memstats are updated atomically
For the most part, heap memstats are already updated atomically when passed down to OS-level memory functions (e.g. sysMap). Elsewhere, however, they're updated with the heap lock. In order to facilitate holding the heap lock for less time during allocation paths, this change more consistently makes the update of these statistics atomic by calling mSysStat{Inc,Dec} appropriately instead of simply adding or subtracting. It also ensures these values are loaded atomically. Furthermore, an undocumented but safe update condition for these memstats is during STW, at which point using atomics is unnecessary. This change also documents this condition in mstats.go. Updates #35112. Change-Id: I87d0b6c27b98c88099acd2563ea23f8da1239b66 Reviewed-on: https://go-review.googlesource.com/c/go/+/196638 Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com>
1 parent 814c505 commit ae4534e

File tree

3 files changed

+19
-18
lines changed

3 files changed

+19
-18
lines changed

src/runtime/mgcscavenge.go

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,7 @@ package runtime
5757

5858
import (
5959
"math/bits"
60+
"runtime/internal/atomic"
6061
"unsafe"
6162
)
6263

@@ -78,10 +79,8 @@ const (
7879
)
7980

8081
// heapRetained returns an estimate of the current heap RSS.
81-
//
82-
// mheap_.lock must be held or the world must be stopped.
8382
func heapRetained() uint64 {
84-
return memstats.heap_sys - memstats.heap_released
83+
return atomic.Load64(&memstats.heap_sys) - atomic.Load64(&memstats.heap_released)
8584
}
8685

8786
// gcPaceScavenger updates the scavenger's pacing, particularly
@@ -489,7 +488,7 @@ func (s *pageAlloc) scavengeRangeLocked(ci chunkIdx, base, npages uint) {
489488

490489
// Update global accounting only when not in test, otherwise
491490
// the runtime's accounting will be wrong.
492-
memstats.heap_released += uint64(npages) * pageSize
491+
mSysStatInc(&memstats.heap_released, uintptr(npages)*pageSize)
493492
}
494493

495494
// fillAligned returns x but with all zeroes in m-aligned

src/runtime/mheap.go

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1003,7 +1003,7 @@ func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan {
10031003
s.limit = s.base() + s.npages<<_PageShift
10041004
s.state.set(mSpanManual) // Publish the span
10051005
// Manually managed memory doesn't count toward heap_sys.
1006-
memstats.heap_sys -= uint64(s.npages << _PageShift)
1006+
mSysStatDec(&memstats.heap_sys, s.npages*pageSize)
10071007
}
10081008

10091009
// This unlock acts as a release barrier. See mheap.alloc_m.
@@ -1113,7 +1113,7 @@ HaveBase:
11131113
// sysUsed all the pages that are actually available
11141114
// in the span.
11151115
sysUsed(unsafe.Pointer(base), npage*pageSize)
1116-
memstats.heap_released -= uint64(scav)
1116+
mSysStatDec(&memstats.heap_released, scav)
11171117
}
11181118

11191119
s := (*mspan)(h.spanalloc.alloc())
@@ -1123,8 +1123,10 @@ HaveBase:
11231123
}
11241124
h.setSpans(s.base(), npage, s)
11251125

1126-
*stat += uint64(npage << _PageShift)
1127-
memstats.heap_idle -= uint64(npage << _PageShift)
1126+
// Update stats.
1127+
nbytes := npage * pageSize
1128+
mSysStatInc(stat, nbytes)
1129+
mSysStatDec(&memstats.heap_idle, nbytes)
11281130

11291131
return s
11301132
}
@@ -1172,8 +1174,8 @@ func (h *mheap) grow(npage uintptr) bool {
11721174
// The allocation is always aligned to the heap arena
11731175
// size which is always > physPageSize, so its safe to
11741176
// just add directly to heap_released.
1175-
memstats.heap_released += uint64(asize)
1176-
memstats.heap_idle += uint64(asize)
1177+
mSysStatInc(&memstats.heap_released, asize)
1178+
mSysStatInc(&memstats.heap_idle, asize)
11771179

11781180
// Recalculate nBase
11791181
nBase = alignUp(h.curArena.base+ask, physPageSize)
@@ -1237,8 +1239,8 @@ func (h *mheap) freeSpan(s *mspan) {
12371239
func (h *mheap) freeManual(s *mspan, stat *uint64) {
12381240
s.needzero = 1
12391241
lock(&h.lock)
1240-
*stat -= uint64(s.npages << _PageShift)
1241-
memstats.heap_sys += uint64(s.npages << _PageShift)
1242+
mSysStatDec(stat, s.npages*pageSize)
1243+
mSysStatInc(&memstats.heap_sys, s.npages*pageSize)
12421244
h.freeSpanLocked(s, false, true)
12431245
unlock(&h.lock)
12441246
}
@@ -1264,10 +1266,10 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool) {
12641266
}
12651267

12661268
if acctinuse {
1267-
memstats.heap_inuse -= uint64(s.npages << _PageShift)
1269+
mSysStatDec(&memstats.heap_inuse, s.npages*pageSize)
12681270
}
12691271
if acctidle {
1270-
memstats.heap_idle += uint64(s.npages << _PageShift)
1272+
mSysStatInc(&memstats.heap_idle, s.npages*pageSize)
12711273
}
12721274

12731275
// Mark the space as free.

src/runtime/mstats.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ type mstats struct {
3131
nfree uint64 // number of frees
3232

3333
// Statistics about malloc heap.
34-
// Protected by mheap.lock
34+
// Updated atomically, or with the world stopped.
3535
//
3636
// Like MemStats, heap_sys and heap_inuse do not count memory
3737
// in manually-managed spans.
@@ -47,15 +47,15 @@ type mstats struct {
4747

4848
// Statistics about allocation of low-level fixed-size structures.
4949
// Protected by FixAlloc locks.
50-
stacks_inuse uint64 // bytes in manually-managed stack spans
50+
stacks_inuse uint64 // bytes in manually-managed stack spans; updated atomically or during STW
5151
stacks_sys uint64 // only counts newosproc0 stack in mstats; differs from MemStats.StackSys
5252
mspan_inuse uint64 // mspan structures
5353
mspan_sys uint64
5454
mcache_inuse uint64 // mcache structures
5555
mcache_sys uint64
5656
buckhash_sys uint64 // profiling bucket hash table
57-
gc_sys uint64
58-
other_sys uint64
57+
gc_sys uint64 // updated atomically or during STW
58+
other_sys uint64 // updated atomically or during STW
5959

6060
// Statistics about garbage collector.
6161
// Protected by mheap or stopping the world during GC.

0 commit comments

Comments
 (0)