Skip to content

Commit 81640ea

Browse files
committed
runtime: add page cache and tests
This change adds a page cache structure which owns a chunk of free pages at a given base address. It also adds code to allocate to this cache from the page allocator. Finally, it adds tests for both. Notably this change does not yet integrate the code into the runtime, just into runtime tests. Updates #35112. Change-Id: Ibe121498d5c3be40390fab58a3816295601670df Reviewed-on: https://go-review.googlesource.com/c/go/+/196643 Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com>
1 parent c444ec3 commit 81640ea

File tree

4 files changed

+552
-0
lines changed

4 files changed

+552
-0
lines changed

src/runtime/export_test.go

+22
Original file line numberDiff line numberDiff line change
@@ -684,6 +684,25 @@ func (d *PallocData) Scavenged() *PallocBits {
684684
// Expose fillAligned for testing.
685685
func FillAligned(x uint64, m uint) uint64 { return fillAligned(x, m) }
686686

687+
// Expose pageCache for testing.
688+
type PageCache pageCache
689+
690+
const PageCachePages = pageCachePages
691+
692+
func NewPageCache(base uintptr, cache, scav uint64) PageCache {
693+
return PageCache(pageCache{base: base, cache: cache, scav: scav})
694+
}
695+
func (c *PageCache) Empty() bool { return (*pageCache)(c).empty() }
696+
func (c *PageCache) Base() uintptr { return (*pageCache)(c).base }
697+
func (c *PageCache) Cache() uint64 { return (*pageCache)(c).cache }
698+
func (c *PageCache) Scav() uint64 { return (*pageCache)(c).scav }
699+
func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {
700+
return (*pageCache)(c).alloc(npages)
701+
}
702+
func (c *PageCache) Flush(s *PageAlloc) {
703+
(*pageCache)(c).flush((*pageAlloc)(s))
704+
}
705+
687706
// Expose chunk index type.
688707
type ChunkIdx chunkIdx
689708

@@ -694,6 +713,9 @@ type PageAlloc pageAlloc
694713
func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
695714
return (*pageAlloc)(p).alloc(npages)
696715
}
716+
func (p *PageAlloc) AllocToCache() PageCache {
717+
return PageCache((*pageAlloc)(p).allocToCache())
718+
}
697719
func (p *PageAlloc) Free(base, npages uintptr) {
698720
(*pageAlloc)(p).free(base, npages)
699721
}

src/runtime/mpagecache.go

+154
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,154 @@
1+
// Copyright 2019 The Go Authors. All rights reserved.
2+
// Use of this source code is governed by a BSD-style
3+
// license that can be found in the LICENSE file.
4+
5+
package runtime
6+
7+
import (
8+
"math/bits"
9+
"unsafe"
10+
)
11+
12+
const pageCachePages = 8 * unsafe.Sizeof(pageCache{}.cache)
13+
14+
// pageCache represents a per-p cache of pages the allocator can
15+
// allocate from without a lock. More specifically, it represents
16+
// a pageCachePages*pageSize chunk of memory with 0 or more free
17+
// pages in it.
18+
type pageCache struct {
19+
base uintptr // base address of the chunk
20+
cache uint64 // 64-bit bitmap representing free pages (1 means free)
21+
scav uint64 // 64-bit bitmap representing scavenged pages (1 means scavenged)
22+
}
23+
24+
// empty returns true if the pageCache has any free pages, and false
25+
// otherwise.
26+
func (c *pageCache) empty() bool {
27+
return c.cache == 0
28+
}
29+
30+
// alloc allocates npages from the page cache and is the main entry
31+
// point for allocation.
32+
//
33+
// Returns a base address and the amount of scavenged memory in the
34+
// allocated region in bytes.
35+
//
36+
// Returns a base address of zero on failure, in which case the
37+
// amount of scavenged memory should be ignored.
38+
func (c *pageCache) alloc(npages uintptr) (uintptr, uintptr) {
39+
if c.cache == 0 {
40+
return 0, 0
41+
}
42+
if npages == 1 {
43+
i := uintptr(bits.TrailingZeros64(c.cache))
44+
scav := (c.scav >> i) & 1
45+
c.cache &^= 1 << i // set bit to mark in-use
46+
c.scav &^= 1 << i // clear bit to mark unscavenged
47+
return c.base + i*pageSize, uintptr(scav) * pageSize
48+
}
49+
return c.allocN(npages)
50+
}
51+
52+
// allocN is a helper which attempts to allocate npages worth of pages
53+
// from the cache. It represents the general case for allocating from
54+
// the page cache.
55+
//
56+
// Returns a base address and the amount of scavenged memory in the
57+
// allocated region in bytes.
58+
func (c *pageCache) allocN(npages uintptr) (uintptr, uintptr) {
59+
i := findBitRange64(c.cache, uint(npages))
60+
if i >= 64 {
61+
return 0, 0
62+
}
63+
mask := ((uint64(1) << npages) - 1) << i
64+
scav := bits.OnesCount64(c.scav & mask)
65+
c.cache &^= mask // mark in-use bits
66+
c.scav &^= mask // clear scavenged bits
67+
return c.base + uintptr(i*pageSize), uintptr(scav) * pageSize
68+
}
69+
70+
// flush empties out unallocated free pages in the given cache
71+
// into s. Then, it clears the cache, such that empty returns
72+
// true.
73+
//
74+
// s.mheapLock must be held or the world must be stopped.
75+
func (c *pageCache) flush(s *pageAlloc) {
76+
if c.empty() {
77+
return
78+
}
79+
ci := chunkIndex(c.base)
80+
pi := chunkPageIndex(c.base)
81+
82+
// This method is called very infrequently, so just do the
83+
// slower, safer thing by iterating over each bit individually.
84+
for i := uint(0); i < 64; i++ {
85+
if c.cache&(1<<i) != 0 {
86+
s.chunks[ci].free1(pi + i)
87+
}
88+
if c.scav&(1<<i) != 0 {
89+
s.chunks[ci].scavenged.setRange(pi+i, 1)
90+
}
91+
}
92+
// Since this is a lot like a free, we need to make sure
93+
// we update the searchAddr just like free does.
94+
if s.compareSearchAddrTo(c.base) < 0 {
95+
s.searchAddr = c.base
96+
}
97+
s.update(c.base, pageCachePages, false, false)
98+
*c = pageCache{}
99+
}
100+
101+
// allocToCache acquires a pageCachePages-aligned chunk of free pages which
102+
// may not be contiguous, and returns a pageCache structure which owns the
103+
// chunk.
104+
//
105+
// s.mheapLock must be held.
106+
func (s *pageAlloc) allocToCache() pageCache {
107+
// If the searchAddr refers to a region which has a higher address than
108+
// any known chunk, then we know we're out of memory.
109+
if chunkIndex(s.searchAddr) >= s.end {
110+
return pageCache{}
111+
}
112+
c := pageCache{}
113+
ci := chunkIndex(s.searchAddr) // chunk index
114+
if s.summary[len(s.summary)-1][ci] != 0 {
115+
// Fast path: there's free pages at or near the searchAddr address.
116+
j, _ := s.chunks[ci].find(1, chunkPageIndex(s.searchAddr))
117+
if j < 0 {
118+
throw("bad summary data")
119+
}
120+
c = pageCache{
121+
base: chunkBase(ci) + alignDown(uintptr(j), 64)*pageSize,
122+
cache: ^s.chunks[ci].pages64(j),
123+
scav: s.chunks[ci].scavenged.block64(j),
124+
}
125+
} else {
126+
// Slow path: the searchAddr address had nothing there, so go find
127+
// the first free page the slow way.
128+
addr, _ := s.find(1)
129+
if addr == 0 {
130+
// We failed to find adequate free space, so mark the searchAddr as OoM
131+
// and return an empty pageCache.
132+
s.searchAddr = maxSearchAddr
133+
return pageCache{}
134+
}
135+
ci := chunkIndex(addr)
136+
c = pageCache{
137+
base: alignDown(addr, 64*pageSize),
138+
cache: ^s.chunks[ci].pages64(chunkPageIndex(addr)),
139+
scav: s.chunks[ci].scavenged.block64(chunkPageIndex(addr)),
140+
}
141+
}
142+
143+
// Set the bits as allocated and clear the scavenged bits.
144+
s.allocRange(c.base, pageCachePages)
145+
146+
// Update as an allocation, but note that it's not contiguous.
147+
s.update(c.base, pageCachePages, false, true)
148+
149+
// We're always searching for the first free page, and we always know the
150+
// up to pageCache size bits will be allocated, so we can always move the
151+
// searchAddr past the cache.
152+
s.searchAddr = c.base + pageSize*pageCachePages
153+
return c
154+
}

0 commit comments

Comments
 (0)