1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Memory statistics 6 7 package runtime 8 9 import ( 10 "runtime/internal/atomic" 11 "runtime/internal/sys" 12 "unsafe" 13 ) 14 15 // Statistics. 16 // If you edit this structure, also edit type MemStats below. 17 // Their layouts must match exactly. 18 // 19 // For detailed descriptions see the documentation for MemStats. 20 // Fields that differ from MemStats are further documented here. 21 // 22 // Many of these fields are updated on the fly, while others are only 23 // updated when updatememstats is called. 24 type mstats struct { 25 // General statistics. 26 alloc uint64 // bytes allocated and not yet freed 27 total_alloc uint64 // bytes allocated (even if freed) 28 sys uint64 // bytes obtained from system (should be sum of xxx_sys below, no locking, approximate) 29 nlookup uint64 // number of pointer lookups (unused) 30 nmalloc uint64 // number of mallocs 31 nfree uint64 // number of frees 32 33 // Statistics about malloc heap. 34 // Protected by mheap.lock 35 // 36 // Like MemStats, heap_sys and heap_inuse do not count memory 37 // in manually-managed spans. 38 heap_alloc uint64 // bytes allocated and not yet freed (same as alloc above) 39 heap_sys uint64 // virtual address space obtained from system for GC'd heap 40 heap_idle uint64 // bytes in idle spans 41 heap_inuse uint64 // bytes in mSpanInUse spans 42 heap_released uint64 // bytes released to the os 43 heap_objects uint64 // total number of allocated objects 44 45 // Statistics about allocation of low-level fixed-size structures. 46 // Protected by FixAlloc locks. 47 stacks_inuse uint64 // bytes in manually-managed stack spans 48 stacks_sys uint64 // only counts newosproc0 stack in mstats; differs from MemStats.StackSys 49 mspan_inuse uint64 // mspan structures 50 mspan_sys uint64 51 mcache_inuse uint64 // mcache structures 52 mcache_sys uint64 53 buckhash_sys uint64 // profiling bucket hash table 54 gc_sys uint64 55 other_sys uint64 56 57 // Statistics about garbage collector. 58 // Protected by mheap or stopping the world during GC. 59 next_gc uint64 // goal heap_live for when next GC ends; ^0 if disabled 60 last_gc_unix uint64 // last gc (in unix time) 61 pause_total_ns uint64 62 pause_ns [256]uint64 // circular buffer of recent gc pause lengths 63 pause_end [256]uint64 // circular buffer of recent gc end times (nanoseconds since 1970) 64 numgc uint32 65 numforcedgc uint32 // number of user-forced GCs 66 gc_cpu_fraction float64 // fraction of CPU time used by GC 67 enablegc bool 68 debuggc bool 69 70 // Statistics about allocation size classes. 71 72 by_size [_NumSizeClasses]struct { 73 size uint32 74 nmalloc uint64 75 nfree uint64 76 } 77 78 // Statistics below here are not exported to MemStats directly. 79 80 last_gc_nanotime uint64 // last gc (monotonic time) 81 tinyallocs uint64 // number of tiny allocations that didn't cause actual allocation; not exported to go directly 82 last_next_gc uint64 // next_gc for the previous GC 83 last_heap_inuse uint64 // heap_inuse at mark termination of the previous GC 84 85 // triggerRatio is the heap growth ratio that triggers marking. 86 // 87 // E.g., if this is 0.6, then GC should start when the live 88 // heap has reached 1.6 times the heap size marked by the 89 // previous cycle. This should be ≤ GOGC/100 so the trigger 90 // heap size is less than the goal heap size. This is set 91 // during mark termination for the next cycle's trigger. 92 triggerRatio float64 93 94 // gc_trigger is the heap size that triggers marking. 95 // 96 // When heap_live ≥ gc_trigger, the mark phase will start. 97 // This is also the heap size by which proportional sweeping 98 // must be complete. 99 // 100 // This is computed from triggerRatio during mark termination 101 // for the next cycle's trigger. 102 gc_trigger uint64 103 104 // heap_live is the number of bytes considered live by the GC. 105 // That is: retained by the most recent GC plus allocated 106 // since then. heap_live <= heap_alloc, since heap_alloc 107 // includes unmarked objects that have not yet been swept (and 108 // hence goes up as we allocate and down as we sweep) while 109 // heap_live excludes these objects (and hence only goes up 110 // between GCs). 111 // 112 // This is updated atomically without locking. To reduce 113 // contention, this is updated only when obtaining a span from 114 // an mcentral and at this point it counts all of the 115 // unallocated slots in that span (which will be allocated 116 // before that mcache obtains another span from that 117 // mcentral). Hence, it slightly overestimates the "true" live 118 // heap size. It's better to overestimate than to 119 // underestimate because 1) this triggers the GC earlier than 120 // necessary rather than potentially too late and 2) this 121 // leads to a conservative GC rate rather than a GC rate that 122 // is potentially too low. 123 // 124 // Reads should likewise be atomic (or during STW). 125 // 126 // Whenever this is updated, call traceHeapAlloc() and 127 // gcController.revise(). 128 heap_live uint64 129 130 // heap_scan is the number of bytes of "scannable" heap. This 131 // is the live heap (as counted by heap_live), but omitting 132 // no-scan objects and no-scan tails of objects. 133 // 134 // Whenever this is updated, call gcController.revise(). 135 heap_scan uint64 136 137 // heap_marked is the number of bytes marked by the previous 138 // GC. After mark termination, heap_live == heap_marked, but 139 // unlike heap_live, heap_marked does not change until the 140 // next mark termination. 141 heap_marked uint64 142 } 143 144 var memstats mstats 145 146 // A MemStats records statistics about the memory allocator. 147 type MemStats struct { 148 // General statistics. 149 150 // Alloc is bytes of allocated heap objects. 151 // 152 // This is the same as HeapAlloc (see below). 153 Alloc uint64 154 155 // TotalAlloc is cumulative bytes allocated for heap objects. 156 // 157 // TotalAlloc increases as heap objects are allocated, but 158 // unlike Alloc and HeapAlloc, it does not decrease when 159 // objects are freed. 160 TotalAlloc uint64 161 162 // Sys is the total bytes of memory obtained from the OS. 163 // 164 // Sys is the sum of the XSys fields below. Sys measures the 165 // virtual address space reserved by the Go runtime for the 166 // heap, stacks, and other internal data structures. It's 167 // likely that not all of the virtual address space is backed 168 // by physical memory at any given moment, though in general 169 // it all was at some point. 170 Sys uint64 171 172 // Lookups is the number of pointer lookups performed by the 173 // runtime. 174 // 175 // This is primarily useful for debugging runtime internals. 176 Lookups uint64 177 178 // Mallocs is the cumulative count of heap objects allocated. 179 // The number of live objects is Mallocs - Frees. 180 Mallocs uint64 181 182 // Frees is the cumulative count of heap objects freed. 183 Frees uint64 184 185 // Heap memory statistics. 186 // 187 // Interpreting the heap statistics requires some knowledge of 188 // how Go organizes memory. Go divides the virtual address 189 // space of the heap into "spans", which are contiguous 190 // regions of memory 8K or larger. A span may be in one of 191 // three states: 192 // 193 // An "idle" span contains no objects or other data. The 194 // physical memory backing an idle span can be released back 195 // to the OS (but the virtual address space never is), or it 196 // can be converted into an "in use" or "stack" span. 197 // 198 // An "in use" span contains at least one heap object and may 199 // have free space available to allocate more heap objects. 200 // 201 // A "stack" span is used for goroutine stacks. Stack spans 202 // are not considered part of the heap. A span can change 203 // between heap and stack memory; it is never used for both 204 // simultaneously. 205 206 // HeapAlloc is bytes of allocated heap objects. 207 // 208 // "Allocated" heap objects include all reachable objects, as 209 // well as unreachable objects that the garbage collector has 210 // not yet freed. Specifically, HeapAlloc increases as heap 211 // objects are allocated and decreases as the heap is swept 212 // and unreachable objects are freed. Sweeping occurs 213 // incrementally between GC cycles, so these two processes 214 // occur simultaneously, and as a result HeapAlloc tends to 215 // change smoothly (in contrast with the sawtooth that is 216 // typical of stop-the-world garbage collectors). 217 HeapAlloc uint64 218 219 // HeapSys is bytes of heap memory obtained from the OS. 220 // 221 // HeapSys measures the amount of virtual address space 222 // reserved for the heap. This includes virtual address space 223 // that has been reserved but not yet used, which consumes no 224 // physical memory, but tends to be small, as well as virtual 225 // address space for which the physical memory has been 226 // returned to the OS after it became unused (see HeapReleased 227 // for a measure of the latter). 228 // 229 // HeapSys estimates the largest size the heap has had. 230 HeapSys uint64 231 232 // HeapIdle is bytes in idle (unused) spans. 233 // 234 // Idle spans have no objects in them. These spans could be 235 // (and may already have been) returned to the OS, or they can 236 // be reused for heap allocations, or they can be reused as 237 // stack memory. 238 // 239 // HeapIdle minus HeapReleased estimates the amount of memory 240 // that could be returned to the OS, but is being retained by 241 // the runtime so it can grow the heap without requesting more 242 // memory from the OS. If this difference is significantly 243 // larger than the heap size, it indicates there was a recent 244 // transient spike in live heap size. 245 HeapIdle uint64 246 247 // HeapInuse is bytes in in-use spans. 248 // 249 // In-use spans have at least one object in them. These spans 250 // can only be used for other objects of roughly the same 251 // size. 252 // 253 // HeapInuse minus HeapAlloc estimates the amount of memory 254 // that has been dedicated to particular size classes, but is 255 // not currently being used. This is an upper bound on 256 // fragmentation, but in general this memory can be reused 257 // efficiently. 258 HeapInuse uint64 259 260 // HeapReleased is bytes of physical memory returned to the OS. 261 // 262 // This counts heap memory from idle spans that was returned 263 // to the OS and has not yet been reacquired for the heap. 264 HeapReleased uint64 265 266 // HeapObjects is the number of allocated heap objects. 267 // 268 // Like HeapAlloc, this increases as objects are allocated and 269 // decreases as the heap is swept and unreachable objects are 270 // freed. 271 HeapObjects uint64 272 273 // Stack memory statistics. 274 // 275 // Stacks are not considered part of the heap, but the runtime 276 // can reuse a span of heap memory for stack memory, and 277 // vice-versa. 278 279 // StackInuse is bytes in stack spans. 280 // 281 // In-use stack spans have at least one stack in them. These 282 // spans can only be used for other stacks of the same size. 283 // 284 // There is no StackIdle because unused stack spans are 285 // returned to the heap (and hence counted toward HeapIdle). 286 StackInuse uint64 287 288 // StackSys is bytes of stack memory obtained from the OS. 289 // 290 // StackSys is StackInuse, plus any memory obtained directly 291 // from the OS for OS thread stacks (which should be minimal). 292 StackSys uint64 293 294 // Off-heap memory statistics. 295 // 296 // The following statistics measure runtime-internal 297 // structures that are not allocated from heap memory (usually 298 // because they are part of implementing the heap). Unlike 299 // heap or stack memory, any memory allocated to these 300 // structures is dedicated to these structures. 301 // 302 // These are primarily useful for debugging runtime memory 303 // overheads. 304 305 // MSpanInuse is bytes of allocated mspan structures. 306 MSpanInuse uint64 307 308 // MSpanSys is bytes of memory obtained from the OS for mspan 309 // structures. 310 MSpanSys uint64 311 312 // MCacheInuse is bytes of allocated mcache structures. 313 MCacheInuse uint64 314 315 // MCacheSys is bytes of memory obtained from the OS for 316 // mcache structures. 317 MCacheSys uint64 318 319 // BuckHashSys is bytes of memory in profiling bucket hash tables. 320 BuckHashSys uint64 321 322 // GCSys is bytes of memory in garbage collection metadata. 323 GCSys uint64 324 325 // OtherSys is bytes of memory in miscellaneous off-heap 326 // runtime allocations. 327 OtherSys uint64 328 329 // Garbage collector statistics. 330 331 // NextGC is the target heap size of the next GC cycle. 332 // 333 // The garbage collector's goal is to keep HeapAlloc ≤ NextGC. 334 // At the end of each GC cycle, the target for the next cycle 335 // is computed based on the amount of reachable data and the 336 // value of GOGC. 337 NextGC uint64 338 339 // LastGC is the time the last garbage collection finished, as 340 // nanoseconds since 1970 (the UNIX epoch). 341 LastGC uint64 342 343 // PauseTotalNs is the cumulative nanoseconds in GC 344 // stop-the-world pauses since the program started. 345 // 346 // During a stop-the-world pause, all goroutines are paused 347 // and only the garbage collector can run. 348 PauseTotalNs uint64 349 350 // PauseNs is a circular buffer of recent GC stop-the-world 351 // pause times in nanoseconds. 352 // 353 // The most recent pause is at PauseNs[(NumGC+255)%256]. In 354 // general, PauseNs[N%256] records the time paused in the most 355 // recent N%256th GC cycle. There may be multiple pauses per 356 // GC cycle; this is the sum of all pauses during a cycle. 357 PauseNs [256]uint64 358 359 // PauseEnd is a circular buffer of recent GC pause end times, 360 // as nanoseconds since 1970 (the UNIX epoch). 361 // 362 // This buffer is filled the same way as PauseNs. There may be 363 // multiple pauses per GC cycle; this records the end of the 364 // last pause in a cycle. 365 PauseEnd [256]uint64 366 367 // NumGC is the number of completed GC cycles. 368 NumGC uint32 369 370 // NumForcedGC is the number of GC cycles that were forced by 371 // the application calling the GC function. 372 NumForcedGC uint32 373 374 // GCCPUFraction is the fraction of this program's available 375 // CPU time used by the GC since the program started. 376 // 377 // GCCPUFraction is expressed as a number between 0 and 1, 378 // where 0 means GC has consumed none of this program's CPU. A 379 // program's available CPU time is defined as the integral of 380 // GOMAXPROCS since the program started. That is, if 381 // GOMAXPROCS is 2 and a program has been running for 10 382 // seconds, its "available CPU" is 20 seconds. GCCPUFraction 383 // does not include CPU time used for write barrier activity. 384 // 385 // This is the same as the fraction of CPU reported by 386 // GODEBUG=gctrace=1. 387 GCCPUFraction float64 388 389 // EnableGC indicates that GC is enabled. It is always true, 390 // even if GOGC=off. 391 EnableGC bool 392 393 // DebugGC is currently unused. 394 DebugGC bool 395 396 // BySize reports per-size class allocation statistics. 397 // 398 // BySize[N] gives statistics for allocations of size S where 399 // BySize[N-1].Size < S ≤ BySize[N].Size. 400 // 401 // This does not report allocations larger than BySize[60].Size. 402 BySize [61]struct { 403 // Size is the maximum byte size of an object in this 404 // size class. 405 Size uint32 406 407 // Mallocs is the cumulative count of heap objects 408 // allocated in this size class. The cumulative bytes 409 // of allocation is Size*Mallocs. The number of live 410 // objects in this size class is Mallocs - Frees. 411 Mallocs uint64 412 413 // Frees is the cumulative count of heap objects freed 414 // in this size class. 415 Frees uint64 416 } 417 } 418 419 // Size of the trailing by_size array differs between mstats and MemStats, 420 // and all data after by_size is local to runtime, not exported. 421 // NumSizeClasses was changed, but we cannot change MemStats because of backward compatibility. 422 // sizeof_C_MStats is the size of the prefix of mstats that 423 // corresponds to MemStats. It should match Sizeof(MemStats{}). 424 var sizeof_C_MStats = unsafe.Offsetof(memstats.by_size) + 61*unsafe.Sizeof(memstats.by_size[0]) 425 426 func init() { 427 var memStats MemStats 428 if sizeof_C_MStats != unsafe.Sizeof(memStats) { 429 println(sizeof_C_MStats, unsafe.Sizeof(memStats)) 430 throw("MStats vs MemStatsType size mismatch") 431 } 432 433 if unsafe.Offsetof(memstats.heap_live)%8 != 0 { 434 println(unsafe.Offsetof(memstats.heap_live)) 435 throw("memstats.heap_live not aligned to 8 bytes") 436 } 437 } 438 439 // ReadMemStats populates m with memory allocator statistics. 440 // 441 // The returned memory allocator statistics are up to date as of the 442 // call to ReadMemStats. This is in contrast with a heap profile, 443 // which is a snapshot as of the most recently completed garbage 444 // collection cycle. 445 func ReadMemStats(m *MemStats) { 446 stopTheWorld("read mem stats") 447 448 systemstack(func() { 449 readmemstats_m(m) 450 }) 451 452 startTheWorld() 453 } 454 455 func readmemstats_m(stats *MemStats) { 456 updatememstats() 457 458 // The size of the trailing by_size array differs between 459 // mstats and MemStats. NumSizeClasses was changed, but we 460 // cannot change MemStats because of backward compatibility. 461 memmove(unsafe.Pointer(stats), unsafe.Pointer(&memstats), sizeof_C_MStats) 462 463 // memstats.stacks_sys is only memory mapped directly for OS stacks. 464 // Add in heap-allocated stack memory for user consumption. 465 stats.StackSys += stats.StackInuse 466 } 467 468 //go:linkname readGCStats runtime/debug.readGCStats 469 func readGCStats(pauses *[]uint64) { 470 systemstack(func() { 471 readGCStats_m(pauses) 472 }) 473 } 474 475 // readGCStats_m must be called on the system stack because it acquires the heap 476 // lock. See mheap for details. 477 //go:systemstack 478 func readGCStats_m(pauses *[]uint64) { 479 p := *pauses 480 // Calling code in runtime/debug should make the slice large enough. 481 if cap(p) < len(memstats.pause_ns)+3 { 482 throw("short slice passed to readGCStats") 483 } 484 485 // Pass back: pauses, pause ends, last gc (absolute time), number of gc, total pause ns. 486 lock(&mheap_.lock) 487 488 n := memstats.numgc 489 if n > uint32(len(memstats.pause_ns)) { 490 n = uint32(len(memstats.pause_ns)) 491 } 492 493 // The pause buffer is circular. The most recent pause is at 494 // pause_ns[(numgc-1)%len(pause_ns)], and then backward 495 // from there to go back farther in time. We deliver the times 496 // most recent first (in p[0]). 497 p = p[:cap(p)] 498 for i := uint32(0); i < n; i++ { 499 j := (memstats.numgc - 1 - i) % uint32(len(memstats.pause_ns)) 500 p[i] = memstats.pause_ns[j] 501 p[n+i] = memstats.pause_end[j] 502 } 503 504 p[n+n] = memstats.last_gc_unix 505 p[n+n+1] = uint64(memstats.numgc) 506 p[n+n+2] = memstats.pause_total_ns 507 unlock(&mheap_.lock) 508 *pauses = p[:n+n+3] 509 } 510 511 //go:nowritebarrier 512 func updatememstats() { 513 memstats.mcache_inuse = uint64(mheap_.cachealloc.inuse) 514 memstats.mspan_inuse = uint64(mheap_.spanalloc.inuse) 515 memstats.sys = memstats.heap_sys + memstats.stacks_sys + memstats.mspan_sys + 516 memstats.mcache_sys + memstats.buckhash_sys + memstats.gc_sys + memstats.other_sys 517 518 // We also count stacks_inuse as sys memory. 519 memstats.sys += memstats.stacks_inuse 520 521 // Calculate memory allocator stats. 522 // During program execution we only count number of frees and amount of freed memory. 523 // Current number of alive object in the heap and amount of alive heap memory 524 // are calculated by scanning all spans. 525 // Total number of mallocs is calculated as number of frees plus number of alive objects. 526 // Similarly, total amount of allocated memory is calculated as amount of freed memory 527 // plus amount of alive heap memory. 528 memstats.alloc = 0 529 memstats.total_alloc = 0 530 memstats.nmalloc = 0 531 memstats.nfree = 0 532 for i := 0; i < len(memstats.by_size); i++ { 533 memstats.by_size[i].nmalloc = 0 534 memstats.by_size[i].nfree = 0 535 } 536 537 // Flush mcache's to mcentral. 538 systemstack(flushallmcaches) 539 540 // Aggregate local stats. 541 cachestats() 542 543 // Collect allocation stats. This is safe and consistent 544 // because the world is stopped. 545 var smallFree, totalAlloc, totalFree uint64 546 // Collect per-spanclass stats. 547 for spc := range mheap_.central { 548 // The mcaches are now empty, so mcentral stats are 549 // up-to-date. 550 c := &mheap_.central[spc].mcentral 551 memstats.nmalloc += c.nmalloc 552 i := spanClass(spc).sizeclass() 553 memstats.by_size[i].nmalloc += c.nmalloc 554 totalAlloc += c.nmalloc * uint64(class_to_size[i]) 555 } 556 // Collect per-sizeclass stats. 557 for i := 0; i < _NumSizeClasses; i++ { 558 if i == 0 { 559 memstats.nmalloc += mheap_.nlargealloc 560 totalAlloc += mheap_.largealloc 561 totalFree += mheap_.largefree 562 memstats.nfree += mheap_.nlargefree 563 continue 564 } 565 566 // The mcache stats have been flushed to mheap_. 567 memstats.nfree += mheap_.nsmallfree[i] 568 memstats.by_size[i].nfree = mheap_.nsmallfree[i] 569 smallFree += mheap_.nsmallfree[i] * uint64(class_to_size[i]) 570 } 571 totalFree += smallFree 572 573 memstats.nfree += memstats.tinyallocs 574 memstats.nmalloc += memstats.tinyallocs 575 576 // Calculate derived stats. 577 memstats.total_alloc = totalAlloc 578 memstats.alloc = totalAlloc - totalFree 579 memstats.heap_alloc = memstats.alloc 580 memstats.heap_objects = memstats.nmalloc - memstats.nfree 581 } 582 583 // cachestats flushes all mcache stats. 584 // 585 // The world must be stopped. 586 // 587 //go:nowritebarrier 588 func cachestats() { 589 for _, p := range allp { 590 c := p.mcache 591 if c == nil { 592 continue 593 } 594 purgecachedstats(c) 595 } 596 } 597 598 // flushmcache flushes the mcache of allp[i]. 599 // 600 // The world must be stopped. 601 // 602 //go:nowritebarrier 603 func flushmcache(i int) { 604 p := allp[i] 605 c := p.mcache 606 if c == nil { 607 return 608 } 609 c.releaseAll() 610 stackcache_clear(c) 611 } 612 613 // flushallmcaches flushes the mcaches of all Ps. 614 // 615 // The world must be stopped. 616 // 617 //go:nowritebarrier 618 func flushallmcaches() { 619 for i := 0; i < int(gomaxprocs); i++ { 620 flushmcache(i) 621 } 622 } 623 624 //go:nosplit 625 func purgecachedstats(c *mcache) { 626 // Protected by either heap or GC lock. 627 h := &mheap_ 628 memstats.heap_scan += uint64(c.local_scan) 629 c.local_scan = 0 630 memstats.tinyallocs += uint64(c.local_tinyallocs) 631 c.local_tinyallocs = 0 632 h.largefree += uint64(c.local_largefree) 633 c.local_largefree = 0 634 h.nlargefree += uint64(c.local_nlargefree) 635 c.local_nlargefree = 0 636 for i := 0; i < len(c.local_nsmallfree); i++ { 637 h.nsmallfree[i] += uint64(c.local_nsmallfree[i]) 638 c.local_nsmallfree[i] = 0 639 } 640 } 641 642 // Atomically increases a given *system* memory stat. We are counting on this 643 // stat never overflowing a uintptr, so this function must only be used for 644 // system memory stats. 645 // 646 // The current implementation for little endian architectures is based on 647 // xadduintptr(), which is less than ideal: xadd64() should really be used. 648 // Using xadduintptr() is a stop-gap solution until arm supports xadd64() that 649 // doesn't use locks. (Locks are a problem as they require a valid G, which 650 // restricts their useability.) 651 // 652 // A side-effect of using xadduintptr() is that we need to check for 653 // overflow errors. 654 //go:nosplit 655 func mSysStatInc(sysStat *uint64, n uintptr) { 656 if sysStat == nil { 657 return 658 } 659 if sys.BigEndian { 660 atomic.Xadd64(sysStat, int64(n)) 661 return 662 } 663 if val := atomic.Xadduintptr((*uintptr)(unsafe.Pointer(sysStat)), n); val < n { 664 print("runtime: stat overflow: val ", val, ", n ", n, "\n") 665 exit(2) 666 } 667 } 668 669 // Atomically decreases a given *system* memory stat. Same comments as 670 // mSysStatInc apply. 671 //go:nosplit 672 func mSysStatDec(sysStat *uint64, n uintptr) { 673 if sysStat == nil { 674 return 675 } 676 if sys.BigEndian { 677 atomic.Xadd64(sysStat, -int64(n)) 678 return 679 } 680 if val := atomic.Xadduintptr((*uintptr)(unsafe.Pointer(sysStat)), uintptr(-int64(n))); val+n < n { 681 print("runtime: stat underflow: val ", val, ", n ", n, "\n") 682 exit(2) 683 } 684 } 685