// Add 将 len 添加到 ptr 并返回更新后的指针 Pointer(uintptr(ptr) + uintptr(len))。 // len 参数必须是整数类型或无类型常量。 // 一个常量 len 参数必须可以用一个 int 类型的值来表示; // 如果它是一个无类型常量,它被赋予 int 类型。 Pointer 的有效使用规则仍然适用。 funcAdd(ptr Pointer, len IntegerType) Pointer
// 函数 Slice 返回一个切片,其底层数组从 ptr 开始,长度和容量为 len。 // Slice(ptr, len) 等价于 ([len]ArbitraryType)(unsafe.Pointer(ptr))[:] , // 除了作为特殊情况,如果 ptr 为 nil 且 len 为零,Slice 返回 nil。 // len 参数必须是整数类型或无类型常量。 // 一个常量 len 参数必须是非负的并且可以用一个int类型的值来表示; // 如果它是一个无类型常量,它被赋予int类型。在运行时, // 如果len为负数,或者ptr为nil且len不为零,则会发生运行时恐慌 funcSlice(ptr *ArbitraryType, len IntegerType) []ArbitraryType
// $GOROOT/src/reflect/values.go // emptyInterface is the header for an interface{} value. type emptyInterface struct { typ *rtype word unsafe.Pointer }
// unpackEface converts the empty interface i to a Value. funcunpackEface(i any) Value { e := (*emptyInterface)(unsafe.Pointer(&i)) // NOTE: don't read e.word until we know whether it is really a pointer or not. t := e.typ if t == nil { return Value{} } f := flag(t.Kind()) if ifaceIndir(t) { f |= flagIndir } return Value{t, e.word, f} }
// $GOROOT/src/reflect/type.go // If i is a nil interface value, TypeOf returns nil. funcTypeOf(i any) Type { eface := *(*emptyInterface)(unsafe.Pointer(&i)) return toType(eface.typ) }
// $GOROOT/src/runtime/stack.go type stack struct { lo uintptr hi uintptr }
// $GOROOT/src/runtime/runtime.go funcstackalloc(n uint32) stack { // Stackalloc must be called on scheduler stack, so that we // never try to grow the stack during the code that stackalloc runs. // Doing so would cause a deadlock (issue 1547). thisg := getg() if thisg != thisg.m.g0 { throw("stackalloc not on scheduler stack") } if n&(n-1) != 0 { throw("stack size not a power of 2") } if stackDebug >= 1 { print("stackalloc ", n, "\n") }
if debug.efence != 0 || stackFromSystem != 0 { n = uint32(alignUp(uintptr(n), physPageSize)) v := sysAlloc(uintptr(n), &memstats.stacks_sys) if v == nil { throw("out of memory (stackalloc)") } return stack{uintptr(v), uintptr(v) + uintptr(n)} }
// Small stacks are allocated with a fixed-size free-list allocator. // If we need a stack of a bigger size, we fall back on allocating // a dedicated span. var v unsafe.Pointer if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { order := uint8(0) n2 := n for n2 > _FixedStack { order++ n2 >>= 1 } var x gclinkptr if stackNoCache != 0 || thisg.m.p == 0 || thisg.m.preemptoff != "" { // thisg.m.p == 0 can happen in the guts of exitsyscall // or procresize. Just get a stack from the global pool. // Also don't touch stackcache during gc // as it's flushed concurrently. lock(&stackpool[order].item.mu) x = stackpoolalloc(order) unlock(&stackpool[order].item.mu) } else { c := thisg.m.p.ptr().mcache x = c.stackcache[order].list if x.ptr() == nil { stackcacherefill(c, order) x = c.stackcache[order].list } c.stackcache[order].list = x.ptr().next c.stackcache[order].size -= uintptr(n) } v = unsafe.Pointer(x) } else { var s *mspan npage := uintptr(n) >> _PageShift log2npage := stacklog2(npage)
// Try to get a stack from the large stack cache. lock(&stackLarge.lock) if !stackLarge.free[log2npage].isEmpty() { s = stackLarge.free[log2npage].first stackLarge.free[log2npage].remove(s) } unlock(&stackLarge.lock)
if s == nil { // Allocate a new stack from the heap. s = mheap_.allocManual(npage, spanAllocStack) if s == nil { throw("out of memory") } osStackAlloc(s) s.elemsize = uintptr(n) } v = unsafe.Pointer(s.base()) }
if raceenabled { racemalloc(v, uintptr(n)) } if msanenabled { msanmalloc(v, uintptr(n)) } if asanenabled { asanunpoison(v, uintptr(n)) } if stackDebug >= 1 { print(" allocated ", v, "\n") } return stack{uintptr(v), uintptr(v) + uintptr(n)} }