@@ -30,13 +30,11 @@ const minPhysPageSize = 4096
30
30
//go:notinheap
31
31
type mheap struct {
32
32
lock mutex
33
- free [_MaxMHeapList ]mSpanList // free lists of given length up to _MaxMHeapList
34
- freelarge mTreap // free treap of length >= _MaxMHeapList
35
- busy [_MaxMHeapList ]mSpanList // busy lists of large spans of given length
36
- busylarge mSpanList // busy lists of large spans length >= _MaxMHeapList
37
- sweepgen uint32 // sweep generation, see comment in mspan
38
- sweepdone uint32 // all spans are swept
39
- sweepers uint32 // number of active sweepone calls
33
+ free mTreap // free treap of spans
34
+ busy mSpanList // busy list of spans
35
+ sweepgen uint32 // sweep generation, see comment in mspan
36
+ sweepdone uint32 // all spans are swept
37
+ sweepers uint32 // number of active sweepone calls
40
38
41
39
// allspans is a slice of all mspans ever created. Each mspan
42
40
// appears exactly once.
@@ -599,12 +597,7 @@ func (h *mheap) init() {
599
597
h .spanalloc .zero = false
600
598
601
599
// h->mapcache needs no init
602
- for i := range h .free {
603
- h .free [i ].init ()
604
- h .busy [i ].init ()
605
- }
606
-
607
- h .busylarge .init ()
600
+ h .busy .init ()
608
601
for i := range h .central {
609
602
h .central [i ].mcentral .init (spanClass (i ))
610
603
}
@@ -647,30 +640,12 @@ retry:
647
640
// Sweeps and reclaims at least npage pages into heap.
648
641
// Called before allocating npage pages.
649
642
func (h * mheap ) reclaim (npage uintptr ) {
650
- // First try to sweep busy spans with large objects of size >= npage,
651
- // this has good chances of reclaiming the necessary space.
652
- for i := int (npage ); i < len (h .busy ); i ++ {
653
- if h .reclaimList (& h .busy [i ], npage ) != 0 {
654
- return // Bingo!
655
- }
656
- }
657
-
658
- // Then -- even larger objects.
659
- if h .reclaimList (& h .busylarge , npage ) != 0 {
643
+ if h .reclaimList (& h .busy , npage ) != 0 {
660
644
return // Bingo!
661
645
}
662
646
663
- // Now try smaller objects.
664
- // One such object is not enough, so we need to reclaim several of them.
665
- reclaimed := uintptr (0 )
666
- for i := 0 ; i < int (npage ) && i < len (h .busy ); i ++ {
667
- reclaimed += h .reclaimList (& h .busy [i ], npage - reclaimed )
668
- if reclaimed >= npage {
669
- return
670
- }
671
- }
672
-
673
647
// Now sweep everything that is not yet swept.
648
+ var reclaimed uintptr
674
649
unlock (& h .lock )
675
650
for {
676
651
n := sweepone ()
@@ -752,11 +727,7 @@ func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan {
752
727
mheap_ .nlargealloc ++
753
728
atomic .Xadd64 (& memstats .heap_live , int64 (npage << _PageShift ))
754
729
// Swept spans are at the end of lists.
755
- if s .npages < uintptr (len (h .busy )) {
756
- h .busy [s .npages ].insertBack (s )
757
- } else {
758
- h .busylarge .insertBack (s )
759
- }
730
+ h .busy .insertBack (s )
760
731
}
761
732
}
762
733
// heap_scan and heap_live were updated.
@@ -867,31 +838,20 @@ func (h *mheap) setSpans(base, npage uintptr, s *mspan) {
867
838
// The returned span has been removed from the
868
839
// free list, but its state is still mSpanFree.
869
840
func (h * mheap ) allocSpanLocked (npage uintptr , stat * uint64 ) * mspan {
870
- var list * mSpanList
871
841
var s * mspan
872
842
873
- // Try in fixed-size lists up to max.
874
- for i := int (npage ); i < len (h .free ); i ++ {
875
- list = & h .free [i ]
876
- if ! list .isEmpty () {
877
- s = list .first
878
- list .remove (s )
879
- goto HaveSpan
880
- }
881
- }
882
- // Best fit in list of large spans.
883
- s = h .allocLarge (npage ) // allocLarge removed s from h.freelarge for us
843
+ // Best fit in the treap of spans.
844
+ s = h .free .remove (npage )
884
845
if s == nil {
885
846
if ! h .grow (npage ) {
886
847
return nil
887
848
}
888
- s = h .allocLarge (npage )
849
+ s = h .free . remove (npage )
889
850
if s == nil {
890
851
return nil
891
852
}
892
853
}
893
854
894
- HaveSpan:
895
855
// Mark span in use.
896
856
if s .state != mSpanFree {
897
857
throw ("MHeap_AllocLocked - MSpan not free" )
@@ -933,21 +893,6 @@ HaveSpan:
933
893
return s
934
894
}
935
895
936
- // Large spans have a minimum size of 1MByte. The maximum number of large spans to support
937
- // 1TBytes is 1 million, experimentation using random sizes indicates that the depth of
938
- // the tree is less that 2x that of a perfectly balanced tree. For 1TByte can be referenced
939
- // by a perfectly balanced tree with a depth of 20. Twice that is an acceptable 40.
940
- func (h * mheap ) isLargeSpan (npages uintptr ) bool {
941
- return npages >= uintptr (len (h .free ))
942
- }
943
-
944
- // allocLarge allocates a span of at least npage pages from the treap of large spans.
945
- // Returns nil if no such span currently exists.
946
- func (h * mheap ) allocLarge (npage uintptr ) * mspan {
947
- // Search treap for smallest span with >= npage pages.
948
- return h .freelarge .remove (npage )
949
- }
950
-
951
896
// Try to add at least npage pages of memory to the heap,
952
897
// returning whether it worked.
953
898
//
@@ -1023,7 +968,7 @@ func (h *mheap) freeManual(s *mspan, stat *uint64) {
1023
968
unlock (& h .lock )
1024
969
}
1025
970
1026
- // s must be on a busy list (h.busy or h.busylarge) or unlinked.
971
+ // s must be on the busy list or unlinked.
1027
972
func (h * mheap ) freeSpanLocked (s * mspan , acctinuse , acctidle bool , unusedsince int64 ) {
1028
973
switch s .state {
1029
974
case mSpanManual :
@@ -1048,7 +993,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
1048
993
}
1049
994
s .state = mSpanFree
1050
995
if s .inList () {
1051
- h .busyList ( s . npages ) .remove (s )
996
+ h .busy .remove (s )
1052
997
}
1053
998
1054
999
// Stamp newly unused spans. The scavenger will use that
@@ -1069,12 +1014,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
1069
1014
h .setSpan (before .base (), s )
1070
1015
// The size is potentially changing so the treap needs to delete adjacent nodes and
1071
1016
// insert back as a combined node.
1072
- if h .isLargeSpan (before .npages ) {
1073
- // We have a t, it is large so it has to be in the treap so we can remove it.
1074
- h .freelarge .removeSpan (before )
1075
- } else {
1076
- h .freeList (before .npages ).remove (before )
1077
- }
1017
+ h .free .removeSpan (before )
1078
1018
before .state = mSpanDead
1079
1019
h .spanalloc .free (unsafe .Pointer (before ))
1080
1020
}
@@ -1085,32 +1025,13 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
1085
1025
s .npreleased += after .npreleased
1086
1026
s .needzero |= after .needzero
1087
1027
h .setSpan (s .base ()+ s .npages * pageSize - 1 , s )
1088
- if h .isLargeSpan (after .npages ) {
1089
- h .freelarge .removeSpan (after )
1090
- } else {
1091
- h .freeList (after .npages ).remove (after )
1092
- }
1028
+ h .free .removeSpan (after )
1093
1029
after .state = mSpanDead
1094
1030
h .spanalloc .free (unsafe .Pointer (after ))
1095
1031
}
1096
1032
1097
- // Insert s into appropriate list or treap.
1098
- if h .isLargeSpan (s .npages ) {
1099
- h .freelarge .insert (s )
1100
- } else {
1101
- h .freeList (s .npages ).insert (s )
1102
- }
1103
- }
1104
-
1105
- func (h * mheap ) freeList (npages uintptr ) * mSpanList {
1106
- return & h .free [npages ]
1107
- }
1108
-
1109
- func (h * mheap ) busyList (npages uintptr ) * mSpanList {
1110
- if npages < uintptr (len (h .busy )) {
1111
- return & h .busy [npages ]
1112
- }
1113
- return & h .busylarge
1033
+ // Insert s into the free treap.
1034
+ h .free .insert (s )
1114
1035
}
1115
1036
1116
1037
func scavengeTreapNode (t * treapNode , now , limit uint64 ) uintptr {
@@ -1123,33 +1044,14 @@ func scavengeTreapNode(t *treapNode, now, limit uint64) uintptr {
1123
1044
return 0
1124
1045
}
1125
1046
1126
- func scavengelist (list * mSpanList , now , limit uint64 ) uintptr {
1127
- if list .isEmpty () {
1128
- return 0
1129
- }
1130
-
1131
- var sumreleased uintptr
1132
- for s := list .first ; s != nil ; s = s .next {
1133
- if (now - uint64 (s .unusedsince )) <= limit || s .npreleased == s .npages {
1134
- continue
1135
- }
1136
- sumreleased += s .scavenge ()
1137
- }
1138
- return sumreleased
1139
- }
1140
-
1141
1047
func (h * mheap ) scavenge (k int32 , now , limit uint64 ) {
1142
1048
// Disallow malloc or panic while holding the heap lock. We do
1143
1049
// this here because this is an non-mallocgc entry-point to
1144
1050
// the mheap API.
1145
1051
gp := getg ()
1146
1052
gp .m .mallocing ++
1147
1053
lock (& h .lock )
1148
- var sumreleased uintptr
1149
- for i := 0 ; i < len (h .free ); i ++ {
1150
- sumreleased += scavengelist (& h .free [i ], now , limit )
1151
- }
1152
- sumreleased += scavengetreap (h .freelarge .treap , now , limit )
1054
+ sumreleased := scavengetreap (h .free .treap , now , limit )
1153
1055
unlock (& h .lock )
1154
1056
gp .m .mallocing --
1155
1057
0 commit comments