...

Source file src/pkg/runtime/malloc.go

     1	// Copyright 2014 The Go Authors. All rights reserved.
     2	// Use of this source code is governed by a BSD-style
     3	// license that can be found in the LICENSE file.
     4	
     5	// Memory allocator.
     6	//
     7	// This was originally based on tcmalloc, but has diverged quite a bit.
     8	// http://goog-perftools.sourceforge.net/doc/tcmalloc.html
     9	
    10	// The main allocator works in runs of pages.
    11	// Small allocation sizes (up to and including 32 kB) are
    12	// rounded to one of about 70 size classes, each of which
    13	// has its own free set of objects of exactly that size.
    14	// Any free page of memory can be split into a set of objects
    15	// of one size class, which are then managed using a free bitmap.
    16	//
    17	// The allocator's data structures are:
    18	//
    19	//	fixalloc: a free-list allocator for fixed-size off-heap objects,
    20	//		used to manage storage used by the allocator.
    21	//	mheap: the malloc heap, managed at page (8192-byte) granularity.
    22	//	mspan: a run of pages managed by the mheap.
    23	//	mcentral: collects all spans of a given size class.
    24	//	mcache: a per-P cache of mspans with free space.
    25	//	mstats: allocation statistics.
    26	//
    27	// Allocating a small object proceeds up a hierarchy of caches:
    28	//
    29	//	1. Round the size up to one of the small size classes
    30	//	   and look in the corresponding mspan in this P's mcache.
    31	//	   Scan the mspan's free bitmap to find a free slot.
    32	//	   If there is a free slot, allocate it.
    33	//	   This can all be done without acquiring a lock.
    34	//
    35	//	2. If the mspan has no free slots, obtain a new mspan
    36	//	   from the mcentral's list of mspans of the required size
    37	//	   class that have free space.
    38	//	   Obtaining a whole span amortizes the cost of locking
    39	//	   the mcentral.
    40	//
    41	//	3. If the mcentral's mspan list is empty, obtain a run
    42	//	   of pages from the mheap to use for the mspan.
    43	//
    44	//	4. If the mheap is empty or has no page runs large enough,
    45	//	   allocate a new group of pages (at least 1MB) from the
    46	//	   operating system. Allocating a large run of pages
    47	//	   amortizes the cost of talking to the operating system.
    48	//
    49	// Sweeping an mspan and freeing objects on it proceeds up a similar
    50	// hierarchy:
    51	//
    52	//	1. If the mspan is being swept in response to allocation, it
    53	//	   is returned to the mcache to satisfy the allocation.
    54	//
    55	//	2. Otherwise, if the mspan still has allocated objects in it,
    56	//	   it is placed on the mcentral free list for the mspan's size
    57	//	   class.
    58	//
    59	//	3. Otherwise, if all objects in the mspan are free, the mspan
    60	//	   is now "idle", so it is returned to the mheap and no longer
    61	//	   has a size class.
    62	//	   This may coalesce it with adjacent idle mspans.
    63	//
    64	//	4. If an mspan remains idle for long enough, return its pages
    65	//	   to the operating system.
    66	//
    67	// Allocating and freeing a large object uses the mheap
    68	// directly, bypassing the mcache and mcentral.
    69	//
    70	// Free object slots in an mspan are zeroed only if mspan.needzero is
    71	// false. If needzero is true, objects are zeroed as they are
    72	// allocated. There are various benefits to delaying zeroing this way:
    73	//
    74	//	1. Stack frame allocation can avoid zeroing altogether.
    75	//
    76	//	2. It exhibits better temporal locality, since the program is
    77	//	   probably about to write to the memory.
    78	//
    79	//	3. We don't zero pages that never get reused.
    80	
    81	// Virtual memory layout
    82	//
    83	// The heap consists of a set of arenas, which are 64MB on 64-bit and
    84	// 4MB on 32-bit (heapArenaBytes). Each arena's start address is also
    85	// aligned to the arena size.
    86	//
    87	// Each arena has an associated heapArena object that stores the
    88	// metadata for that arena: the heap bitmap for all words in the arena
    89	// and the span map for all pages in the arena. heapArena objects are
    90	// themselves allocated off-heap.
    91	//
    92	// Since arenas are aligned, the address space can be viewed as a
    93	// series of arena frames. The arena map (mheap_.arenas) maps from
    94	// arena frame number to *heapArena, or nil for parts of the address
    95	// space not backed by the Go heap. The arena map is structured as a
    96	// two-level array consisting of a "L1" arena map and many "L2" arena
    97	// maps; however, since arenas are large, on many architectures, the
    98	// arena map consists of a single, large L2 map.
    99	//
   100	// The arena map covers the entire possible address space, allowing
   101	// the Go heap to use any part of the address space. The allocator
   102	// attempts to keep arenas contiguous so that large spans (and hence
   103	// large objects) can cross arenas.
   104	
   105	package runtime
   106	
   107	import (
   108		"runtime/internal/atomic"
   109		"runtime/internal/math"
   110		"runtime/internal/sys"
   111		"unsafe"
   112	)
   113	
   114	const (
   115		debugMalloc = false
   116	
   117		maxTinySize   = _TinySize
   118		tinySizeClass = _TinySizeClass
   119		maxSmallSize  = _MaxSmallSize
   120	
   121		pageShift = _PageShift
   122		pageSize  = _PageSize
   123		pageMask  = _PageMask
   124		// By construction, single page spans of the smallest object class
   125		// have the most objects per span.
   126		maxObjsPerSpan = pageSize / 8
   127	
   128		concurrentSweep = _ConcurrentSweep
   129	
   130		_PageSize = 1 << _PageShift
   131		_PageMask = _PageSize - 1
   132	
   133		// _64bit = 1 on 64-bit systems, 0 on 32-bit systems
   134		_64bit = 1 << (^uintptr(0) >> 63) / 2
   135	
   136		// Tiny allocator parameters, see "Tiny allocator" comment in malloc.go.
   137		_TinySize      = 16
   138		_TinySizeClass = int8(2)
   139	
   140		_FixAllocChunk = 16 << 10 // Chunk size for FixAlloc
   141	
   142		// Per-P, per order stack segment cache size.
   143		_StackCacheSize = 32 * 1024
   144	
   145		// Number of orders that get caching. Order 0 is FixedStack
   146		// and each successive order is twice as large.
   147		// We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks
   148		// will be allocated directly.
   149		// Since FixedStack is different on different systems, we
   150		// must vary NumStackOrders to keep the same maximum cached size.
   151		//   OS               | FixedStack | NumStackOrders
   152		//   -----------------+------------+---------------
   153		//   linux/darwin/bsd | 2KB        | 4
   154		//   windows/32       | 4KB        | 3
   155		//   windows/64       | 8KB        | 2
   156		//   plan9            | 4KB        | 3
   157		_NumStackOrders = 4 - sys.PtrSize/4*sys.GoosWindows - 1*sys.GoosPlan9
   158	
   159		// heapAddrBits is the number of bits in a heap address. On
   160		// amd64, addresses are sign-extended beyond heapAddrBits. On
   161		// other arches, they are zero-extended.
   162		//
   163		// On most 64-bit platforms, we limit this to 48 bits based on a
   164		// combination of hardware and OS limitations.
   165		//
   166		// amd64 hardware limits addresses to 48 bits, sign-extended
   167		// to 64 bits. Addresses where the top 16 bits are not either
   168		// all 0 or all 1 are "non-canonical" and invalid. Because of
   169		// these "negative" addresses, we offset addresses by 1<<47
   170		// (arenaBaseOffset) on amd64 before computing indexes into
   171		// the heap arenas index. In 2017, amd64 hardware added
   172		// support for 57 bit addresses; however, currently only Linux
   173		// supports this extension and the kernel will never choose an
   174		// address above 1<<47 unless mmap is called with a hint
   175		// address above 1<<47 (which we never do).
   176		//
   177		// arm64 hardware (as of ARMv8) limits user addresses to 48
   178		// bits, in the range [0, 1<<48).
   179		//
   180		// ppc64, mips64, and s390x support arbitrary 64 bit addresses
   181		// in hardware. On Linux, Go leans on stricter OS limits. Based
   182		// on Linux's processor.h, the user address space is limited as
   183		// follows on 64-bit architectures:
   184		//
   185		// Architecture  Name              Maximum Value (exclusive)
   186		// ---------------------------------------------------------------------
   187		// amd64         TASK_SIZE_MAX     0x007ffffffff000 (47 bit addresses)
   188		// arm64         TASK_SIZE_64      0x01000000000000 (48 bit addresses)
   189		// ppc64{,le}    TASK_SIZE_USER64  0x00400000000000 (46 bit addresses)
   190		// mips64{,le}   TASK_SIZE64       0x00010000000000 (40 bit addresses)
   191		// s390x         TASK_SIZE         1<<64 (64 bit addresses)
   192		//
   193		// These limits may increase over time, but are currently at
   194		// most 48 bits except on s390x. On all architectures, Linux
   195		// starts placing mmap'd regions at addresses that are
   196		// significantly below 48 bits, so even if it's possible to
   197		// exceed Go's 48 bit limit, it's extremely unlikely in
   198		// practice.
   199		//
   200		// On aix/ppc64, the limits is increased to 1<<60 to accept addresses
   201		// returned by mmap syscall. These are in range:
   202		//  0x0a00000000000000 - 0x0afffffffffffff
   203		//
   204		// On 32-bit platforms, we accept the full 32-bit address
   205		// space because doing so is cheap.
   206		// mips32 only has access to the low 2GB of virtual memory, so
   207		// we further limit it to 31 bits.
   208		//
   209		// WebAssembly currently has a limit of 4GB linear memory.
   210		heapAddrBits = (_64bit*(1-sys.GoarchWasm)*(1-sys.GoosAix))*48 + (1-_64bit+sys.GoarchWasm)*(32-(sys.GoarchMips+sys.GoarchMipsle)) + 60*sys.GoosAix
   211	
   212		// maxAlloc is the maximum size of an allocation. On 64-bit,
   213		// it's theoretically possible to allocate 1<<heapAddrBits bytes. On
   214		// 32-bit, however, this is one less than 1<<32 because the
   215		// number of bytes in the address space doesn't actually fit
   216		// in a uintptr.
   217		maxAlloc = (1 << heapAddrBits) - (1-_64bit)*1
   218	
   219		// The number of bits in a heap address, the size of heap
   220		// arenas, and the L1 and L2 arena map sizes are related by
   221		//
   222		//   (1 << addr bits) = arena size * L1 entries * L2 entries
   223		//
   224		// Currently, we balance these as follows:
   225		//
   226		//       Platform  Addr bits  Arena size  L1 entries   L2 entries
   227		// --------------  ---------  ----------  ----------  -----------
   228		//       */64-bit         48        64MB           1    4M (32MB)
   229		//     aix/64-bit         60       256MB        4096    4M (32MB)
   230		// windows/64-bit         48         4MB          64    1M  (8MB)
   231		//       */32-bit         32         4MB           1  1024  (4KB)
   232		//     */mips(le)         31         4MB           1   512  (2KB)
   233	
   234		// heapArenaBytes is the size of a heap arena. The heap
   235		// consists of mappings of size heapArenaBytes, aligned to
   236		// heapArenaBytes. The initial heap mapping is one arena.
   237		//
   238		// This is currently 64MB on 64-bit non-Windows and 4MB on
   239		// 32-bit and on Windows. We use smaller arenas on Windows
   240		// because all committed memory is charged to the process,
   241		// even if it's not touched. Hence, for processes with small
   242		// heaps, the mapped arena space needs to be commensurate.
   243		// This is particularly important with the race detector,
   244		// since it significantly amplifies the cost of committed
   245		// memory.
   246		heapArenaBytes = 1 << logHeapArenaBytes
   247	
   248		// logHeapArenaBytes is log_2 of heapArenaBytes. For clarity,
   249		// prefer using heapArenaBytes where possible (we need the
   250		// constant to compute some other constants).
   251		logHeapArenaBytes = (6+20)*(_64bit*(1-sys.GoosWindows)*(1-sys.GoosAix)*(1-sys.GoarchWasm)) + (2+20)*(_64bit*sys.GoosWindows) + (2+20)*(1-_64bit) + (8+20)*sys.GoosAix + (2+20)*sys.GoarchWasm
   252	
   253		// heapArenaBitmapBytes is the size of each heap arena's bitmap.
   254		heapArenaBitmapBytes = heapArenaBytes / (sys.PtrSize * 8 / 2)
   255	
   256		pagesPerArena = heapArenaBytes / pageSize
   257	
   258		// arenaL1Bits is the number of bits of the arena number
   259		// covered by the first level arena map.
   260		//
   261		// This number should be small, since the first level arena
   262		// map requires PtrSize*(1<<arenaL1Bits) of space in the
   263		// binary's BSS. It can be zero, in which case the first level
   264		// index is effectively unused. There is a performance benefit
   265		// to this, since the generated code can be more efficient,
   266		// but comes at the cost of having a large L2 mapping.
   267		//
   268		// We use the L1 map on 64-bit Windows because the arena size
   269		// is small, but the address space is still 48 bits, and
   270		// there's a high cost to having a large L2.
   271		//
   272		// We use the L1 map on aix/ppc64 to keep the same L2 value
   273		// as on Linux.
   274		arenaL1Bits = 6*(_64bit*sys.GoosWindows) + 12*sys.GoosAix
   275	
   276		// arenaL2Bits is the number of bits of the arena number
   277		// covered by the second level arena index.
   278		//
   279		// The size of each arena map allocation is proportional to
   280		// 1<<arenaL2Bits, so it's important that this not be too
   281		// large. 48 bits leads to 32MB arena index allocations, which
   282		// is about the practical threshold.
   283		arenaL2Bits = heapAddrBits - logHeapArenaBytes - arenaL1Bits
   284	
   285		// arenaL1Shift is the number of bits to shift an arena frame
   286		// number by to compute an index into the first level arena map.
   287		arenaL1Shift = arenaL2Bits
   288	
   289		// arenaBits is the total bits in a combined arena map index.
   290		// This is split between the index into the L1 arena map and
   291		// the L2 arena map.
   292		arenaBits = arenaL1Bits + arenaL2Bits
   293	
   294		// arenaBaseOffset is the pointer value that corresponds to
   295		// index 0 in the heap arena map.
   296		//
   297		// On amd64, the address space is 48 bits, sign extended to 64
   298		// bits. This offset lets us handle "negative" addresses (or
   299		// high addresses if viewed as unsigned).
   300		//
   301		// On other platforms, the user address space is contiguous
   302		// and starts at 0, so no offset is necessary.
   303		arenaBaseOffset uintptr = sys.GoarchAmd64 * (1 << 47)
   304	
   305		// Max number of threads to run garbage collection.
   306		// 2, 3, and 4 are all plausible maximums depending
   307		// on the hardware details of the machine. The garbage
   308		// collector scales well to 32 cpus.
   309		_MaxGcproc = 32
   310	
   311		// minLegalPointer is the smallest possible legal pointer.
   312		// This is the smallest possible architectural page size,
   313		// since we assume that the first page is never mapped.
   314		//
   315		// This should agree with minZeroPage in the compiler.
   316		minLegalPointer uintptr = 4096
   317	)
   318	
   319	// physPageSize is the size in bytes of the OS's physical pages.
   320	// Mapping and unmapping operations must be done at multiples of
   321	// physPageSize.
   322	//
   323	// This must be set by the OS init code (typically in osinit) before
   324	// mallocinit.
   325	var physPageSize uintptr
   326	
   327	// physHugePageSize is the size in bytes of the OS's default physical huge
   328	// page size whose allocation is opaque to the application. It is assumed
   329	// and verified to be a power of two.
   330	//
   331	// If set, this must be set by the OS init code (typically in osinit) before
   332	// mallocinit. However, setting it at all is optional, and leaving the default
   333	// value is always safe (though potentially less efficient).
   334	//
   335	// Since physHugePageSize is always assumed to be a power of two,
   336	// physHugePageShift is defined as physHugePageSize == 1 << physHugePageShift.
   337	// The purpose of physHugePageShift is to avoid doing divisions in
   338	// performance critical functions.
   339	var (
   340		physHugePageSize  uintptr
   341		physHugePageShift uint
   342	)
   343	
   344	// OS memory management abstraction layer
   345	//
   346	// Regions of the address space managed by the runtime may be in one of four
   347	// states at any given time:
   348	// 1) None - Unreserved and unmapped, the default state of any region.
   349	// 2) Reserved - Owned by the runtime, but accessing it would cause a fault.
   350	//               Does not count against the process' memory footprint.
   351	// 3) Prepared - Reserved, intended not to be backed by physical memory (though
   352	//               an OS may implement this lazily). Can transition efficiently to
   353	//               Ready. Accessing memory in such a region is undefined (may
   354	//               fault, may give back unexpected zeroes, etc.).
   355	// 4) Ready - may be accessed safely.
   356	//
   357	// This set of states is more than is strictly necessary to support all the
   358	// currently supported platforms. One could get by with just None, Reserved, and
   359	// Ready. However, the Prepared state gives us flexibility for performance
   360	// purposes. For example, on POSIX-y operating systems, Reserved is usually a
   361	// private anonymous mmap'd region with PROT_NONE set, and to transition
   362	// to Ready would require setting PROT_READ|PROT_WRITE. However the
   363	// underspecification of Prepared lets us use just MADV_FREE to transition from
   364	// Ready to Prepared. Thus with the Prepared state we can set the permission
   365	// bits just once early on, we can efficiently tell the OS that it's free to
   366	// take pages away from us when we don't strictly need them.
   367	//
   368	// For each OS there is a common set of helpers defined that transition
   369	// memory regions between these states. The helpers are as follows:
   370	//
   371	// sysAlloc transitions an OS-chosen region of memory from None to Ready.
   372	// More specifically, it obtains a large chunk of zeroed memory from the
   373	// operating system, typically on the order of a hundred kilobytes
   374	// or a megabyte. This memory is always immediately available for use.
   375	//
   376	// sysFree transitions a memory region from any state to None. Therefore, it
   377	// returns memory unconditionally. It is used if an out-of-memory error has been
   378	// detected midway through an allocation or to carve out an aligned section of
   379	// the address space. It is okay if sysFree is a no-op only if sysReserve always
   380	// returns a memory region aligned to the heap allocator's alignment
   381	// restrictions.
   382	//
   383	// sysReserve transitions a memory region from None to Reserved. It reserves
   384	// address space in such a way that it would cause a fatal fault upon access
   385	// (either via permissions or not committing the memory). Such a reservation is
   386	// thus never backed by physical memory.
   387	// If the pointer passed to it is non-nil, the caller wants the
   388	// reservation there, but sysReserve can still choose another
   389	// location if that one is unavailable.
   390	// NOTE: sysReserve returns OS-aligned memory, but the heap allocator
   391	// may use larger alignment, so the caller must be careful to realign the
   392	// memory obtained by sysReserve.
   393	//
   394	// sysMap transitions a memory region from Reserved to Prepared. It ensures the
   395	// memory region can be efficiently transitioned to Ready.
   396	//
   397	// sysUsed transitions a memory region from Prepared to Ready. It notifies the
   398	// operating system that the memory region is needed and ensures that the region
   399	// may be safely accessed. This is typically a no-op on systems that don't have
   400	// an explicit commit step and hard over-commit limits, but is critical on
   401	// Windows, for example.
   402	//
   403	// sysUnused transitions a memory region from Ready to Prepared. It notifies the
   404	// operating system that the physical pages backing this memory region are no
   405	// longer needed and can be reused for other purposes. The contents of a
   406	// sysUnused memory region are considered forfeit and the region must not be
   407	// accessed again until sysUsed is called.
   408	//
   409	// sysFault transitions a memory region from Ready or Prepared to Reserved. It
   410	// marks a region such that it will always fault if accessed. Used only for
   411	// debugging the runtime.
   412	
   413	func mallocinit() {
   414		if class_to_size[_TinySizeClass] != _TinySize {
   415			throw("bad TinySizeClass")
   416		}
   417	
   418		testdefersizes()
   419	
   420		if heapArenaBitmapBytes&(heapArenaBitmapBytes-1) != 0 {
   421			// heapBits expects modular arithmetic on bitmap
   422			// addresses to work.
   423			throw("heapArenaBitmapBytes not a power of 2")
   424		}
   425	
   426		// Copy class sizes out for statistics table.
   427		for i := range class_to_size {
   428			memstats.by_size[i].size = uint32(class_to_size[i])
   429		}
   430	
   431		// Check physPageSize.
   432		if physPageSize == 0 {
   433			// The OS init code failed to fetch the physical page size.
   434			throw("failed to get system page size")
   435		}
   436		if physPageSize < minPhysPageSize {
   437			print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n")
   438			throw("bad system page size")
   439		}
   440		if physPageSize&(physPageSize-1) != 0 {
   441			print("system page size (", physPageSize, ") must be a power of 2\n")
   442			throw("bad system page size")
   443		}
   444		if physHugePageSize&(physHugePageSize-1) != 0 {
   445			print("system huge page size (", physHugePageSize, ") must be a power of 2\n")
   446			throw("bad system huge page size")
   447		}
   448		if physHugePageSize != 0 {
   449			// Since physHugePageSize is a power of 2, it suffices to increase
   450			// physHugePageShift until 1<<physHugePageShift == physHugePageSize.
   451			for 1<<physHugePageShift != physHugePageSize {
   452				physHugePageShift++
   453			}
   454		}
   455	
   456		// Initialize the heap.
   457		mheap_.init()
   458		_g_ := getg()
   459		_g_.m.mcache = allocmcache()
   460	
   461		// Create initial arena growth hints.
   462		if sys.PtrSize == 8 {
   463			// On a 64-bit machine, we pick the following hints
   464			// because:
   465			//
   466			// 1. Starting from the middle of the address space
   467			// makes it easier to grow out a contiguous range
   468			// without running in to some other mapping.
   469			//
   470			// 2. This makes Go heap addresses more easily
   471			// recognizable when debugging.
   472			//
   473			// 3. Stack scanning in gccgo is still conservative,
   474			// so it's important that addresses be distinguishable
   475			// from other data.
   476			//
   477			// Starting at 0x00c0 means that the valid memory addresses
   478			// will begin 0x00c0, 0x00c1, ...
   479			// In little-endian, that's c0 00, c1 00, ... None of those are valid
   480			// UTF-8 sequences, and they are otherwise as far away from
   481			// ff (likely a common byte) as possible. If that fails, we try other 0xXXc0
   482			// addresses. An earlier attempt to use 0x11f8 caused out of memory errors
   483			// on OS X during thread allocations.  0x00c0 causes conflicts with
   484			// AddressSanitizer which reserves all memory up to 0x0100.
   485			// These choices reduce the odds of a conservative garbage collector
   486			// not collecting memory because some non-pointer block of memory
   487			// had a bit pattern that matched a memory address.
   488			//
   489			// However, on arm64, we ignore all this advice above and slam the
   490			// allocation at 0x40 << 32 because when using 4k pages with 3-level
   491			// translation buffers, the user address space is limited to 39 bits
   492			// On darwin/arm64, the address space is even smaller.
   493			// On AIX, mmaps starts at 0x0A00000000000000 for 64-bit.
   494			// processes.
   495			for i := 0x7f; i >= 0; i-- {
   496				var p uintptr
   497				switch {
   498				case GOARCH == "arm64" && GOOS == "darwin":
   499					p = uintptr(i)<<40 | uintptrMask&(0x0013<<28)
   500				case GOARCH == "arm64":
   501					p = uintptr(i)<<40 | uintptrMask&(0x0040<<32)
   502				case GOOS == "aix":
   503					if i == 0 {
   504						// We don't use addresses directly after 0x0A00000000000000
   505						// to avoid collisions with others mmaps done by non-go programs.
   506						continue
   507					}
   508					p = uintptr(i)<<40 | uintptrMask&(0xa0<<52)
   509				case raceenabled:
   510					// The TSAN runtime requires the heap
   511					// to be in the range [0x00c000000000,
   512					// 0x00e000000000).
   513					p = uintptr(i)<<32 | uintptrMask&(0x00c0<<32)
   514					if p >= uintptrMask&0x00e000000000 {
   515						continue
   516					}
   517				default:
   518					p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32)
   519				}
   520				hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
   521				hint.addr = p
   522				hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
   523			}
   524		} else {
   525			// On a 32-bit machine, we're much more concerned
   526			// about keeping the usable heap contiguous.
   527			// Hence:
   528			//
   529			// 1. We reserve space for all heapArenas up front so
   530			// they don't get interleaved with the heap. They're
   531			// ~258MB, so this isn't too bad. (We could reserve a
   532			// smaller amount of space up front if this is a
   533			// problem.)
   534			//
   535			// 2. We hint the heap to start right above the end of
   536			// the binary so we have the best chance of keeping it
   537			// contiguous.
   538			//
   539			// 3. We try to stake out a reasonably large initial
   540			// heap reservation.
   541	
   542			const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{})
   543			meta := uintptr(sysReserve(nil, arenaMetaSize))
   544			if meta != 0 {
   545				mheap_.heapArenaAlloc.init(meta, arenaMetaSize)
   546			}
   547	
   548			// We want to start the arena low, but if we're linked
   549			// against C code, it's possible global constructors
   550			// have called malloc and adjusted the process' brk.
   551			// Query the brk so we can avoid trying to map the
   552			// region over it (which will cause the kernel to put
   553			// the region somewhere else, likely at a high
   554			// address).
   555			procBrk := sbrk0()
   556	
   557			// If we ask for the end of the data segment but the
   558			// operating system requires a little more space
   559			// before we can start allocating, it will give out a
   560			// slightly higher pointer. Except QEMU, which is
   561			// buggy, as usual: it won't adjust the pointer
   562			// upward. So adjust it upward a little bit ourselves:
   563			// 1/4 MB to get away from the running binary image.
   564			p := firstmoduledata.end
   565			if p < procBrk {
   566				p = procBrk
   567			}
   568			if mheap_.heapArenaAlloc.next <= p && p < mheap_.heapArenaAlloc.end {
   569				p = mheap_.heapArenaAlloc.end
   570			}
   571			p = round(p+(256<<10), heapArenaBytes)
   572			// Because we're worried about fragmentation on
   573			// 32-bit, we try to make a large initial reservation.
   574			arenaSizes := []uintptr{
   575				512 << 20,
   576				256 << 20,
   577				128 << 20,
   578			}
   579			for _, arenaSize := range arenaSizes {
   580				a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes)
   581				if a != nil {
   582					mheap_.arena.init(uintptr(a), size)
   583					p = uintptr(a) + size // For hint below
   584					break
   585				}
   586			}
   587			hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
   588			hint.addr = p
   589			hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
   590		}
   591	}
   592	
   593	// sysAlloc allocates heap arena space for at least n bytes. The
   594	// returned pointer is always heapArenaBytes-aligned and backed by
   595	// h.arenas metadata. The returned size is always a multiple of
   596	// heapArenaBytes. sysAlloc returns nil on failure.
   597	// There is no corresponding free function.
   598	//
   599	// sysAlloc returns a memory region in the Prepared state. This region must
   600	// be transitioned to Ready before use.
   601	//
   602	// h must be locked.
   603	func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) {
   604		n = round(n, heapArenaBytes)
   605	
   606		// First, try the arena pre-reservation.
   607		v = h.arena.alloc(n, heapArenaBytes, &memstats.heap_sys)
   608		if v != nil {
   609			size = n
   610			goto mapped
   611		}
   612	
   613		// Try to grow the heap at a hint address.
   614		for h.arenaHints != nil {
   615			hint := h.arenaHints
   616			p := hint.addr
   617			if hint.down {
   618				p -= n
   619			}
   620			if p+n < p {
   621				// We can't use this, so don't ask.
   622				v = nil
   623			} else if arenaIndex(p+n-1) >= 1<<arenaBits {
   624				// Outside addressable heap. Can't use.
   625				v = nil
   626			} else {
   627				v = sysReserve(unsafe.Pointer(p), n)
   628			}
   629			if p == uintptr(v) {
   630				// Success. Update the hint.
   631				if !hint.down {
   632					p += n
   633				}
   634				hint.addr = p
   635				size = n
   636				break
   637			}
   638			// Failed. Discard this hint and try the next.
   639			//
   640			// TODO: This would be cleaner if sysReserve could be
   641			// told to only return the requested address. In
   642			// particular, this is already how Windows behaves, so
   643			// it would simplify things there.
   644			if v != nil {
   645				sysFree(v, n, nil)
   646			}
   647			h.arenaHints = hint.next
   648			h.arenaHintAlloc.free(unsafe.Pointer(hint))
   649		}
   650	
   651		if size == 0 {
   652			if raceenabled {
   653				// The race detector assumes the heap lives in
   654				// [0x00c000000000, 0x00e000000000), but we
   655				// just ran out of hints in this region. Give
   656				// a nice failure.
   657				throw("too many address space collisions for -race mode")
   658			}
   659	
   660			// All of the hints failed, so we'll take any
   661			// (sufficiently aligned) address the kernel will give
   662			// us.
   663			v, size = sysReserveAligned(nil, n, heapArenaBytes)
   664			if v == nil {
   665				return nil, 0
   666			}
   667	
   668			// Create new hints for extending this region.
   669			hint := (*arenaHint)(h.arenaHintAlloc.alloc())
   670			hint.addr, hint.down = uintptr(v), true
   671			hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
   672			hint = (*arenaHint)(h.arenaHintAlloc.alloc())
   673			hint.addr = uintptr(v) + size
   674			hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
   675		}
   676	
   677		// Check for bad pointers or pointers we can't use.
   678		{
   679			var bad string
   680			p := uintptr(v)
   681			if p+size < p {
   682				bad = "region exceeds uintptr range"
   683			} else if arenaIndex(p) >= 1<<arenaBits {
   684				bad = "base outside usable address space"
   685			} else if arenaIndex(p+size-1) >= 1<<arenaBits {
   686				bad = "end outside usable address space"
   687			}
   688			if bad != "" {
   689				// This should be impossible on most architectures,
   690				// but it would be really confusing to debug.
   691				print("runtime: memory allocated by OS [", hex(p), ", ", hex(p+size), ") not in usable address space: ", bad, "\n")
   692				throw("memory reservation exceeds address space limit")
   693			}
   694		}
   695	
   696		if uintptr(v)&(heapArenaBytes-1) != 0 {
   697			throw("misrounded allocation in sysAlloc")
   698		}
   699	
   700		// Transition from Reserved to Prepared.
   701		sysMap(v, size, &memstats.heap_sys)
   702	
   703	mapped:
   704		// Create arena metadata.
   705		for ri := arenaIndex(uintptr(v)); ri <= arenaIndex(uintptr(v)+size-1); ri++ {
   706			l2 := h.arenas[ri.l1()]
   707			if l2 == nil {
   708				// Allocate an L2 arena map.
   709				l2 = (*[1 << arenaL2Bits]*heapArena)(persistentalloc(unsafe.Sizeof(*l2), sys.PtrSize, nil))
   710				if l2 == nil {
   711					throw("out of memory allocating heap arena map")
   712				}
   713				atomic.StorepNoWB(unsafe.Pointer(&h.arenas[ri.l1()]), unsafe.Pointer(l2))
   714			}
   715	
   716			if l2[ri.l2()] != nil {
   717				throw("arena already initialized")
   718			}
   719			var r *heapArena
   720			r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gc_sys))
   721			if r == nil {
   722				r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gc_sys))
   723				if r == nil {
   724					throw("out of memory allocating heap arena metadata")
   725				}
   726			}
   727	
   728			// Add the arena to the arenas list.
   729			if len(h.allArenas) == cap(h.allArenas) {
   730				size := 2 * uintptr(cap(h.allArenas)) * sys.PtrSize
   731				if size == 0 {
   732					size = physPageSize
   733				}
   734				newArray := (*notInHeap)(persistentalloc(size, sys.PtrSize, &memstats.gc_sys))
   735				if newArray == nil {
   736					throw("out of memory allocating allArenas")
   737				}
   738				oldSlice := h.allArenas
   739				*(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / sys.PtrSize)}
   740				copy(h.allArenas, oldSlice)
   741				// Do not free the old backing array because
   742				// there may be concurrent readers. Since we
   743				// double the array each time, this can lead
   744				// to at most 2x waste.
   745			}
   746			h.allArenas = h.allArenas[:len(h.allArenas)+1]
   747			h.allArenas[len(h.allArenas)-1] = ri
   748	
   749			// Store atomically just in case an object from the
   750			// new heap arena becomes visible before the heap lock
   751			// is released (which shouldn't happen, but there's
   752			// little downside to this).
   753			atomic.StorepNoWB(unsafe.Pointer(&l2[ri.l2()]), unsafe.Pointer(r))
   754		}
   755	
   756		// Tell the race detector about the new heap memory.
   757		if raceenabled {
   758			racemapshadow(v, size)
   759		}
   760	
   761		return
   762	}
   763	
   764	// sysReserveAligned is like sysReserve, but the returned pointer is
   765	// aligned to align bytes. It may reserve either n or n+align bytes,
   766	// so it returns the size that was reserved.
   767	func sysReserveAligned(v unsafe.Pointer, size, align uintptr) (unsafe.Pointer, uintptr) {
   768		// Since the alignment is rather large in uses of this
   769		// function, we're not likely to get it by chance, so we ask
   770		// for a larger region and remove the parts we don't need.
   771		retries := 0
   772	retry:
   773		p := uintptr(sysReserve(v, size+align))
   774		switch {
   775		case p == 0:
   776			return nil, 0
   777		case p&(align-1) == 0:
   778			// We got lucky and got an aligned region, so we can
   779			// use the whole thing.
   780			return unsafe.Pointer(p), size + align
   781		case GOOS == "windows":
   782			// On Windows we can't release pieces of a
   783			// reservation, so we release the whole thing and
   784			// re-reserve the aligned sub-region. This may race,
   785			// so we may have to try again.
   786			sysFree(unsafe.Pointer(p), size+align, nil)
   787			p = round(p, align)
   788			p2 := sysReserve(unsafe.Pointer(p), size)
   789			if p != uintptr(p2) {
   790				// Must have raced. Try again.
   791				sysFree(p2, size, nil)
   792				if retries++; retries == 100 {
   793					throw("failed to allocate aligned heap memory; too many retries")
   794				}
   795				goto retry
   796			}
   797			// Success.
   798			return p2, size
   799		default:
   800			// Trim off the unaligned parts.
   801			pAligned := round(p, align)
   802			sysFree(unsafe.Pointer(p), pAligned-p, nil)
   803			end := pAligned + size
   804			endLen := (p + size + align) - end
   805			if endLen > 0 {
   806				sysFree(unsafe.Pointer(end), endLen, nil)
   807			}
   808			return unsafe.Pointer(pAligned), size
   809		}
   810	}
   811	
   812	// base address for all 0-byte allocations
   813	var zerobase uintptr
   814	
   815	// nextFreeFast returns the next free object if one is quickly available.
   816	// Otherwise it returns 0.
   817	func nextFreeFast(s *mspan) gclinkptr {
   818		theBit := sys.Ctz64(s.allocCache) // Is there a free object in the allocCache?
   819		if theBit < 64 {
   820			result := s.freeindex + uintptr(theBit)
   821			if result < s.nelems {
   822				freeidx := result + 1
   823				if freeidx%64 == 0 && freeidx != s.nelems {
   824					return 0
   825				}
   826				s.allocCache >>= uint(theBit + 1)
   827				s.freeindex = freeidx
   828				s.allocCount++
   829				return gclinkptr(result*s.elemsize + s.base())
   830			}
   831		}
   832		return 0
   833	}
   834	
   835	// nextFree returns the next free object from the cached span if one is available.
   836	// Otherwise it refills the cache with a span with an available object and
   837	// returns that object along with a flag indicating that this was a heavy
   838	// weight allocation. If it is a heavy weight allocation the caller must
   839	// determine whether a new GC cycle needs to be started or if the GC is active
   840	// whether this goroutine needs to assist the GC.
   841	//
   842	// Must run in a non-preemptible context since otherwise the owner of
   843	// c could change.
   844	func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bool) {
   845		s = c.alloc[spc]
   846		shouldhelpgc = false
   847		freeIndex := s.nextFreeIndex()
   848		if freeIndex == s.nelems {
   849			// The span is full.
   850			if uintptr(s.allocCount) != s.nelems {
   851				println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
   852				throw("s.allocCount != s.nelems && freeIndex == s.nelems")
   853			}
   854			c.refill(spc)
   855			shouldhelpgc = true
   856			s = c.alloc[spc]
   857	
   858			freeIndex = s.nextFreeIndex()
   859		}
   860	
   861		if freeIndex >= s.nelems {
   862			throw("freeIndex is not valid")
   863		}
   864	
   865		v = gclinkptr(freeIndex*s.elemsize + s.base())
   866		s.allocCount++
   867		if uintptr(s.allocCount) > s.nelems {
   868			println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
   869			throw("s.allocCount > s.nelems")
   870		}
   871		return
   872	}
   873	
   874	// Allocate an object of size bytes.
   875	// Small objects are allocated from the per-P cache's free lists.
   876	// Large objects (> 32 kB) are allocated straight from the heap.
   877	func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
   878		if gcphase == _GCmarktermination {
   879			throw("mallocgc called with gcphase == _GCmarktermination")
   880		}
   881	
   882		if size == 0 {
   883			return unsafe.Pointer(&zerobase)
   884		}
   885	
   886		if debug.sbrk != 0 {
   887			align := uintptr(16)
   888			if typ != nil {
   889				// TODO(austin): This should be just
   890				//   align = uintptr(typ.align)
   891				// but that's only 4 on 32-bit platforms,
   892				// even if there's a uint64 field in typ (see #599).
   893				// This causes 64-bit atomic accesses to panic.
   894				// Hence, we use stricter alignment that matches
   895				// the normal allocator better.
   896				if size&7 == 0 {
   897					align = 8
   898				} else if size&3 == 0 {
   899					align = 4
   900				} else if size&1 == 0 {
   901					align = 2
   902				} else {
   903					align = 1
   904				}
   905			}
   906			return persistentalloc(size, align, &memstats.other_sys)
   907		}
   908	
   909		// assistG is the G to charge for this allocation, or nil if
   910		// GC is not currently active.
   911		var assistG *g
   912		if gcBlackenEnabled != 0 {
   913			// Charge the current user G for this allocation.
   914			assistG = getg()
   915			if assistG.m.curg != nil {
   916				assistG = assistG.m.curg
   917			}
   918			// Charge the allocation against the G. We'll account
   919			// for internal fragmentation at the end of mallocgc.
   920			assistG.gcAssistBytes -= int64(size)
   921	
   922			if assistG.gcAssistBytes < 0 {
   923				// This G is in debt. Assist the GC to correct
   924				// this before allocating. This must happen
   925				// before disabling preemption.
   926				gcAssistAlloc(assistG)
   927			}
   928		}
   929	
   930		// Set mp.mallocing to keep from being preempted by GC.
   931		mp := acquirem()
   932		if mp.mallocing != 0 {
   933			throw("malloc deadlock")
   934		}
   935		if mp.gsignal == getg() {
   936			throw("malloc during signal")
   937		}
   938		mp.mallocing = 1
   939	
   940		shouldhelpgc := false
   941		dataSize := size
   942		c := gomcache()
   943		var x unsafe.Pointer
   944		noscan := typ == nil || typ.ptrdata == 0
   945		if size <= maxSmallSize {
   946			if noscan && size < maxTinySize {
   947				// Tiny allocator.
   948				//
   949				// Tiny allocator combines several tiny allocation requests
   950				// into a single memory block. The resulting memory block
   951				// is freed when all subobjects are unreachable. The subobjects
   952				// must be noscan (don't have pointers), this ensures that
   953				// the amount of potentially wasted memory is bounded.
   954				//
   955				// Size of the memory block used for combining (maxTinySize) is tunable.
   956				// Current setting is 16 bytes, which relates to 2x worst case memory
   957				// wastage (when all but one subobjects are unreachable).
   958				// 8 bytes would result in no wastage at all, but provides less
   959				// opportunities for combining.
   960				// 32 bytes provides more opportunities for combining,
   961				// but can lead to 4x worst case wastage.
   962				// The best case winning is 8x regardless of block size.
   963				//
   964				// Objects obtained from tiny allocator must not be freed explicitly.
   965				// So when an object will be freed explicitly, we ensure that
   966				// its size >= maxTinySize.
   967				//
   968				// SetFinalizer has a special case for objects potentially coming
   969				// from tiny allocator, it such case it allows to set finalizers
   970				// for an inner byte of a memory block.
   971				//
   972				// The main targets of tiny allocator are small strings and
   973				// standalone escaping variables. On a json benchmark
   974				// the allocator reduces number of allocations by ~12% and
   975				// reduces heap size by ~20%.
   976				off := c.tinyoffset
   977				// Align tiny pointer for required (conservative) alignment.
   978				if size&7 == 0 {
   979					off = round(off, 8)
   980				} else if size&3 == 0 {
   981					off = round(off, 4)
   982				} else if size&1 == 0 {
   983					off = round(off, 2)
   984				}
   985				if off+size <= maxTinySize && c.tiny != 0 {
   986					// The object fits into existing tiny block.
   987					x = unsafe.Pointer(c.tiny + off)
   988					c.tinyoffset = off + size
   989					c.local_tinyallocs++
   990					mp.mallocing = 0
   991					releasem(mp)
   992					return x
   993				}
   994				// Allocate a new maxTinySize block.
   995				span := c.alloc[tinySpanClass]
   996				v := nextFreeFast(span)
   997				if v == 0 {
   998					v, _, shouldhelpgc = c.nextFree(tinySpanClass)
   999				}
  1000				x = unsafe.Pointer(v)
  1001				(*[2]uint64)(x)[0] = 0
  1002				(*[2]uint64)(x)[1] = 0
  1003				// See if we need to replace the existing tiny block with the new one
  1004				// based on amount of remaining free space.
  1005				if size < c.tinyoffset || c.tiny == 0 {
  1006					c.tiny = uintptr(x)
  1007					c.tinyoffset = size
  1008				}
  1009				size = maxTinySize
  1010			} else {
  1011				var sizeclass uint8
  1012				if size <= smallSizeMax-8 {
  1013					sizeclass = size_to_class8[(size+smallSizeDiv-1)/smallSizeDiv]
  1014				} else {
  1015					sizeclass = size_to_class128[(size-smallSizeMax+largeSizeDiv-1)/largeSizeDiv]
  1016				}
  1017				size = uintptr(class_to_size[sizeclass])
  1018				spc := makeSpanClass(sizeclass, noscan)
  1019				span := c.alloc[spc]
  1020				v := nextFreeFast(span)
  1021				if v == 0 {
  1022					v, span, shouldhelpgc = c.nextFree(spc)
  1023				}
  1024				x = unsafe.Pointer(v)
  1025				if needzero && span.needzero != 0 {
  1026					memclrNoHeapPointers(unsafe.Pointer(v), size)
  1027				}
  1028			}
  1029		} else {
  1030			var s *mspan
  1031			shouldhelpgc = true
  1032			systemstack(func() {
  1033				s = largeAlloc(size, needzero, noscan)
  1034			})
  1035			s.freeindex = 1
  1036			s.allocCount = 1
  1037			x = unsafe.Pointer(s.base())
  1038			size = s.elemsize
  1039		}
  1040	
  1041		var scanSize uintptr
  1042		if !noscan {
  1043			// If allocating a defer+arg block, now that we've picked a malloc size
  1044			// large enough to hold everything, cut the "asked for" size down to
  1045			// just the defer header, so that the GC bitmap will record the arg block
  1046			// as containing nothing at all (as if it were unused space at the end of
  1047			// a malloc block caused by size rounding).
  1048			// The defer arg areas are scanned as part of scanstack.
  1049			if typ == deferType {
  1050				dataSize = unsafe.Sizeof(_defer{})
  1051			}
  1052			heapBitsSetType(uintptr(x), size, dataSize, typ)
  1053			if dataSize > typ.size {
  1054				// Array allocation. If there are any
  1055				// pointers, GC has to scan to the last
  1056				// element.
  1057				if typ.ptrdata != 0 {
  1058					scanSize = dataSize - typ.size + typ.ptrdata
  1059				}
  1060			} else {
  1061				scanSize = typ.ptrdata
  1062			}
  1063			c.local_scan += scanSize
  1064		}
  1065	
  1066		// Ensure that the stores above that initialize x to
  1067		// type-safe memory and set the heap bits occur before
  1068		// the caller can make x observable to the garbage
  1069		// collector. Otherwise, on weakly ordered machines,
  1070		// the garbage collector could follow a pointer to x,
  1071		// but see uninitialized memory or stale heap bits.
  1072		publicationBarrier()
  1073	
  1074		// Allocate black during GC.
  1075		// All slots hold nil so no scanning is needed.
  1076		// This may be racing with GC so do it atomically if there can be
  1077		// a race marking the bit.
  1078		if gcphase != _GCoff {
  1079			gcmarknewobject(uintptr(x), size, scanSize)
  1080		}
  1081	
  1082		if raceenabled {
  1083			racemalloc(x, size)
  1084		}
  1085	
  1086		if msanenabled {
  1087			msanmalloc(x, size)
  1088		}
  1089	
  1090		mp.mallocing = 0
  1091		releasem(mp)
  1092	
  1093		if debug.allocfreetrace != 0 {
  1094			tracealloc(x, size, typ)
  1095		}
  1096	
  1097		if rate := MemProfileRate; rate > 0 {
  1098			if rate != 1 && size < c.next_sample {
  1099				c.next_sample -= size
  1100			} else {
  1101				mp := acquirem()
  1102				profilealloc(mp, x, size)
  1103				releasem(mp)
  1104			}
  1105		}
  1106	
  1107		if assistG != nil {
  1108			// Account for internal fragmentation in the assist
  1109			// debt now that we know it.
  1110			assistG.gcAssistBytes -= int64(size - dataSize)
  1111		}
  1112	
  1113		if shouldhelpgc {
  1114			if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  1115				gcStart(t)
  1116			}
  1117		}
  1118	
  1119		return x
  1120	}
  1121	
  1122	func largeAlloc(size uintptr, needzero bool, noscan bool) *mspan {
  1123		// print("largeAlloc size=", size, "\n")
  1124	
  1125		if size+_PageSize < size {
  1126			throw("out of memory")
  1127		}
  1128		npages := size >> _PageShift
  1129		if size&_PageMask != 0 {
  1130			npages++
  1131		}
  1132	
  1133		// Deduct credit for this span allocation and sweep if
  1134		// necessary. mHeap_Alloc will also sweep npages, so this only
  1135		// pays the debt down to npage pages.
  1136		deductSweepCredit(npages*_PageSize, npages)
  1137	
  1138		s := mheap_.alloc(npages, makeSpanClass(0, noscan), true, needzero)
  1139		if s == nil {
  1140			throw("out of memory")
  1141		}
  1142		s.limit = s.base() + size
  1143		heapBitsForAddr(s.base()).initSpan(s)
  1144		return s
  1145	}
  1146	
  1147	// implementation of new builtin
  1148	// compiler (both frontend and SSA backend) knows the signature
  1149	// of this function
  1150	func newobject(typ *_type) unsafe.Pointer {
  1151		return mallocgc(typ.size, typ, true)
  1152	}
  1153	
  1154	//go:linkname reflect_unsafe_New reflect.unsafe_New
  1155	func reflect_unsafe_New(typ *_type) unsafe.Pointer {
  1156		return mallocgc(typ.size, typ, true)
  1157	}
  1158	
  1159	//go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New
  1160	func reflectlite_unsafe_New(typ *_type) unsafe.Pointer {
  1161		return mallocgc(typ.size, typ, true)
  1162	}
  1163	
  1164	// newarray allocates an array of n elements of type typ.
  1165	func newarray(typ *_type, n int) unsafe.Pointer {
  1166		if n == 1 {
  1167			return mallocgc(typ.size, typ, true)
  1168		}
  1169		mem, overflow := math.MulUintptr(typ.size, uintptr(n))
  1170		if overflow || mem > maxAlloc || n < 0 {
  1171			panic(plainError("runtime: allocation size out of range"))
  1172		}
  1173		return mallocgc(mem, typ, true)
  1174	}
  1175	
  1176	//go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
  1177	func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer {
  1178		return newarray(typ, n)
  1179	}
  1180	
  1181	func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
  1182		mp.mcache.next_sample = nextSample()
  1183		mProf_Malloc(x, size)
  1184	}
  1185	
  1186	// nextSample returns the next sampling point for heap profiling. The goal is
  1187	// to sample allocations on average every MemProfileRate bytes, but with a
  1188	// completely random distribution over the allocation timeline; this
  1189	// corresponds to a Poisson process with parameter MemProfileRate. In Poisson
  1190	// processes, the distance between two samples follows the exponential
  1191	// distribution (exp(MemProfileRate)), so the best return value is a random
  1192	// number taken from an exponential distribution whose mean is MemProfileRate.
  1193	func nextSample() uintptr {
  1194		if GOOS == "plan9" {
  1195			// Plan 9 doesn't support floating point in note handler.
  1196			if g := getg(); g == g.m.gsignal {
  1197				return nextSampleNoFP()
  1198			}
  1199		}
  1200	
  1201		return uintptr(fastexprand(MemProfileRate))
  1202	}
  1203	
  1204	// fastexprand returns a random number from an exponential distribution with
  1205	// the specified mean.
  1206	func fastexprand(mean int) int32 {
  1207		// Avoid overflow. Maximum possible step is
  1208		// -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean.
  1209		switch {
  1210		case mean > 0x7000000:
  1211			mean = 0x7000000
  1212		case mean == 0:
  1213			return 0
  1214		}
  1215	
  1216		// Take a random sample of the exponential distribution exp(-mean*x).
  1217		// The probability distribution function is mean*exp(-mean*x), so the CDF is
  1218		// p = 1 - exp(-mean*x), so
  1219		// q = 1 - p == exp(-mean*x)
  1220		// log_e(q) = -mean*x
  1221		// -log_e(q)/mean = x
  1222		// x = -log_e(q) * mean
  1223		// x = log_2(q) * (-log_e(2)) * mean    ; Using log_2 for efficiency
  1224		const randomBitCount = 26
  1225		q := fastrand()%(1<<randomBitCount) + 1
  1226		qlog := fastlog2(float64(q)) - randomBitCount
  1227		if qlog > 0 {
  1228			qlog = 0
  1229		}
  1230		const minusLog2 = -0.6931471805599453 // -ln(2)
  1231		return int32(qlog*(minusLog2*float64(mean))) + 1
  1232	}
  1233	
  1234	// nextSampleNoFP is similar to nextSample, but uses older,
  1235	// simpler code to avoid floating point.
  1236	func nextSampleNoFP() uintptr {
  1237		// Set first allocation sample size.
  1238		rate := MemProfileRate
  1239		if rate > 0x3fffffff { // make 2*rate not overflow
  1240			rate = 0x3fffffff
  1241		}
  1242		if rate != 0 {
  1243			return uintptr(fastrand() % uint32(2*rate))
  1244		}
  1245		return 0
  1246	}
  1247	
  1248	type persistentAlloc struct {
  1249		base *notInHeap
  1250		off  uintptr
  1251	}
  1252	
  1253	var globalAlloc struct {
  1254		mutex
  1255		persistentAlloc
  1256	}
  1257	
  1258	// persistentChunkSize is the number of bytes we allocate when we grow
  1259	// a persistentAlloc.
  1260	const persistentChunkSize = 256 << 10
  1261	
  1262	// persistentChunks is a list of all the persistent chunks we have
  1263	// allocated. The list is maintained through the first word in the
  1264	// persistent chunk. This is updated atomically.
  1265	var persistentChunks *notInHeap
  1266	
  1267	// Wrapper around sysAlloc that can allocate small chunks.
  1268	// There is no associated free operation.
  1269	// Intended for things like function/type/debug-related persistent data.
  1270	// If align is 0, uses default align (currently 8).
  1271	// The returned memory will be zeroed.
  1272	//
  1273	// Consider marking persistentalloc'd types go:notinheap.
  1274	func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer {
  1275		var p *notInHeap
  1276		systemstack(func() {
  1277			p = persistentalloc1(size, align, sysStat)
  1278		})
  1279		return unsafe.Pointer(p)
  1280	}
  1281	
  1282	// Must run on system stack because stack growth can (re)invoke it.
  1283	// See issue 9174.
  1284	//go:systemstack
  1285	func persistentalloc1(size, align uintptr, sysStat *uint64) *notInHeap {
  1286		const (
  1287			maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
  1288		)
  1289	
  1290		if size == 0 {
  1291			throw("persistentalloc: size == 0")
  1292		}
  1293		if align != 0 {
  1294			if align&(align-1) != 0 {
  1295				throw("persistentalloc: align is not a power of 2")
  1296			}
  1297			if align > _PageSize {
  1298				throw("persistentalloc: align is too large")
  1299			}
  1300		} else {
  1301			align = 8
  1302		}
  1303	
  1304		if size >= maxBlock {
  1305			return (*notInHeap)(sysAlloc(size, sysStat))
  1306		}
  1307	
  1308		mp := acquirem()
  1309		var persistent *persistentAlloc
  1310		if mp != nil && mp.p != 0 {
  1311			persistent = &mp.p.ptr().palloc
  1312		} else {
  1313			lock(&globalAlloc.mutex)
  1314			persistent = &globalAlloc.persistentAlloc
  1315		}
  1316		persistent.off = round(persistent.off, align)
  1317		if persistent.off+size > persistentChunkSize || persistent.base == nil {
  1318			persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys))
  1319			if persistent.base == nil {
  1320				if persistent == &globalAlloc.persistentAlloc {
  1321					unlock(&globalAlloc.mutex)
  1322				}
  1323				throw("runtime: cannot allocate memory")
  1324			}
  1325	
  1326			// Add the new chunk to the persistentChunks list.
  1327			for {
  1328				chunks := uintptr(unsafe.Pointer(persistentChunks))
  1329				*(*uintptr)(unsafe.Pointer(persistent.base)) = chunks
  1330				if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) {
  1331					break
  1332				}
  1333			}
  1334			persistent.off = round(sys.PtrSize, align)
  1335		}
  1336		p := persistent.base.add(persistent.off)
  1337		persistent.off += size
  1338		releasem(mp)
  1339		if persistent == &globalAlloc.persistentAlloc {
  1340			unlock(&globalAlloc.mutex)
  1341		}
  1342	
  1343		if sysStat != &memstats.other_sys {
  1344			mSysStatInc(sysStat, size)
  1345			mSysStatDec(&memstats.other_sys, size)
  1346		}
  1347		return p
  1348	}
  1349	
  1350	// inPersistentAlloc reports whether p points to memory allocated by
  1351	// persistentalloc. This must be nosplit because it is called by the
  1352	// cgo checker code, which is called by the write barrier code.
  1353	//go:nosplit
  1354	func inPersistentAlloc(p uintptr) bool {
  1355		chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks)))
  1356		for chunk != 0 {
  1357			if p >= chunk && p < chunk+persistentChunkSize {
  1358				return true
  1359			}
  1360			chunk = *(*uintptr)(unsafe.Pointer(chunk))
  1361		}
  1362		return false
  1363	}
  1364	
  1365	// linearAlloc is a simple linear allocator that pre-reserves a region
  1366	// of memory and then maps that region into the Ready state as needed. The
  1367	// caller is responsible for locking.
  1368	type linearAlloc struct {
  1369		next   uintptr // next free byte
  1370		mapped uintptr // one byte past end of mapped space
  1371		end    uintptr // end of reserved space
  1372	}
  1373	
  1374	func (l *linearAlloc) init(base, size uintptr) {
  1375		l.next, l.mapped = base, base
  1376		l.end = base + size
  1377	}
  1378	
  1379	func (l *linearAlloc) alloc(size, align uintptr, sysStat *uint64) unsafe.Pointer {
  1380		p := round(l.next, align)
  1381		if p+size > l.end {
  1382			return nil
  1383		}
  1384		l.next = p + size
  1385		if pEnd := round(l.next-1, physPageSize); pEnd > l.mapped {
  1386			// Transition from Reserved to Prepared to Ready.
  1387			sysMap(unsafe.Pointer(l.mapped), pEnd-l.mapped, sysStat)
  1388			sysUsed(unsafe.Pointer(l.mapped), pEnd-l.mapped)
  1389			l.mapped = pEnd
  1390		}
  1391		return unsafe.Pointer(p)
  1392	}
  1393	
  1394	// notInHeap is off-heap memory allocated by a lower-level allocator
  1395	// like sysAlloc or persistentAlloc.
  1396	//
  1397	// In general, it's better to use real types marked as go:notinheap,
  1398	// but this serves as a generic type for situations where that isn't
  1399	// possible (like in the allocators).
  1400	//
  1401	// TODO: Use this as the return type of sysAlloc, persistentAlloc, etc?
  1402	//
  1403	//go:notinheap
  1404	type notInHeap struct{}
  1405	
  1406	func (p *notInHeap) add(bytes uintptr) *notInHeap {
  1407		return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes))
  1408	}
  1409	

View as plain text