源码阅读
👨🏼‍💻

源码阅读

 

备注:

源码版本:const TheVersion = `go1.16.4`
 

源码起源:

runtime.main()
// 文件路径:src/runtime/proc/mian() // The main goroutine. func main() { g := getg() /* // getg returns the pointer to the current g. // The compiler rewrites calls to this function into instructions // that fetch the g directly (from TLS or from the dedicated register). // getg 返回指向当前 g 的指针。 // 编译器将对这个函数的调用重写为指令 // 直接获取 g(从 TLS 或从专用寄存器)。 func getg() *g */ // Racectx of m0->g0 is used only as the parent of the main goroutine. // It must not be used for anything else. // m0->g0 的 Racectx 仅用作主 goroutine 的父级。 // 它不能用于其他任何事情。 g.m.g0.racectx = 0 // Max stack size is 1 GB on 64-bit, 250 MB on 32-bit. // Using decimal instead of binary GB and MB because // they look nicer in the stack overflow failure message. // 最大堆栈大小在 64 位上为 1 GB,在 32 位上为 250 MB。 // 使用十进制代替二进制 GB 和 MB,因为 // 在堆栈溢出失败消息中它们看起来更好。 if sys.PtrSize == 8 { maxstacksize = 1000000000 } else { maxstacksize = 250000000 } // An upper limit for max stack size. Used to avoid random crashes // after calling SetMaxStack and trying to allocate a stack that is too big, // since stackalloc works with 32-bit sizes. // 最大堆栈大小的上限。 用于防止随机崩溃 // 在调用 SetMaxStack 并试图分配一个太大的堆栈后, // 因为 stackalloc 可以使用 32 位大小。(修改后)恢复原文 maxstackceiling = 2 * maxstacksize /* var maxstacksize uintptr = 1 << 20 // 足够了,直到 runtime.main 将它设置为真正的 var maxstackceiling = maxstacksize */ // Allow newproc to start new Ms. // 允许 newproc 启动新的 Ms。 mainStarted = true /* // mainStarted indicates that the main M has started. // mainStarted 表示主 M 已启动。 var mainStarted bool */ /* wasm 架构没有线程,不支持 */ if GOARCH != "wasm" { // no threads on wasm yet, so no sysmon // For runtime_syscall_doAllThreadsSyscall, we // register sysmon is not ready for the world to be // stopped. atomic.Store(&sched.sysmonStarting, 1) systemstack(func() { newm(sysmon, nil, -1) }) } // Lock the main goroutine onto this, the main OS thread, // during initialization. Most programs won't care, but a few // do require certain calls to be made by the main thread. // Those can arrange for main.main to run in the main thread // by calling runtime.LockOSThread during initialization // to preserve the lock. /* 给主M上锁 */ lockOSThread() /* 判断当前的线程M 是否是M0 */ if g.m != &m0 { throw("runtime.main not on m0") } /* 标记为非P运行的M */ m0.doesPark = true // Record when the world started. // Must be before doInit for tracing init. // 获取启动时间 // 需要在 doInit() 函数之前 runtimeInitTime = nanotime() if runtimeInitTime == 0 { throw("nanotime returning zero") } /* 判断是否开启调试 */ if debug.inittrace != 0 { inittrace.id = getg().goid inittrace.active = true } doInit(&runtime_inittask) // Must be before defer.(需要在defer()函数之前) // Defer unlock so that runtime.Goexit during init does the unlock too. // derer 函数中解锁,初始化的时候也能解锁 needUnlock := true defer func() { if needUnlock { unlockOSThread() } }() /* gc启动 */ gcenable() /* // gcenable is called after the bulk of the runtime initialization, // just before we're about to start letting user code run. // It kicks off the background sweeper goroutine, the background // scavenger goroutine, and enables GC. // gcenable 在大部分运行时初始化之后被调用, // 就在我们即将开始让用户代码运行之前。 // 它启动背景清扫器 goroutine,背景 // scavenger goroutine,并启用 GC。 func gcenable() { // Kick off sweeping and scavenging. c := make(chan int, 2) go bgsweep(c) go bgscavenge(c) <-c <-c memstats.enablegc = true // now that runtime is initialized, GC is okay } */ main_init_done = make(chan bool) /* 判断是开启cgo */ if iscgo { if _cgo_thread_start == nil { throw("_cgo_thread_start missing") } if GOOS != "windows" { if _cgo_setenv == nil { throw("_cgo_setenv missing") } if _cgo_unsetenv == nil { throw("_cgo_unsetenv missing") } } if _cgo_notify_runtime_init_done == nil { throw("_cgo_notify_runtime_init_done missing") } // Start the template thread in case we enter Go from // a C-created thread and need to create a new thread. startTemplateThread() cgocall(_cgo_notify_runtime_init_done, nil) } /* 调用所有包的初始化init()函数 */ doInit(&main_inittask) // Disable init tracing after main init done to avoid overhead // of collecting statistics in malloc and newproc // 在主初始化完成后禁用初始化跟踪以避免开销 // 在 malloc 和 newproc 中收集统计信息 inittrace.active = false /* 通知CGO那边可以继续执行了 -> runtime/cgocall/cgocallbackg1()函数中有对这个管道的检测 */ close(main_init_done) /* 将defer函数中的解锁关闭 */ needUnlock = false unlockOSThread() /* 判断是否是静态库或者动态库 */ if isarchive || islibrary { // A program compiled with -buildmode=c-archive or c-shared // has a main, but it is not executed. // 使用 -buildmode=c-archive 或 c-shared 编译的程序 // 有一个 main,但它没有被执行。 return } fn := main_main // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime // 进行间接调用,因为链接器在放置运行时时不知道主包的地址 fn() /* 虚拟竞争检测 API,在未使用 -race 构建时使用。 */ if raceenabled { racefini() } // Make racy client program work: if panicking on // another goroutine at the same time as main returns, // let the other goroutine finish printing the panic trace. // Once it does, it will exit. See issues 3934 and 20018. if atomic.Load(&runningPanicDefers) != 0 { // Running deferred functions should not take long. for c := 0; c < 1000; c++ { if atomic.Load(&runningPanicDefers) == 0 { break } Gosched() } } if atomic.Load(&panicking) != 0 { gopark(nil, nil, waitReasonPanicWait, traceEvGoStop, 1) } exit(0) for { var x *int32 *x = 0 } }
 
 

调度

runtime.schedule()
// One round of scheduler: find a runnable goroutine and execute it. // Never returns. func schedule() { _g_ := getg() if _g_.m.locks != 0 { throw("schedule: holding locks") } if _g_.m.lockedg != 0 { stoplockedm() execute(_g_.m.lockedg.ptr(), false) // Never returns. } // We should not schedule away from a g that is executing a cgo call, // since the cgo call is using the m's g0 stack. if _g_.m.incgo { throw("schedule: in cgo") } top: pp := _g_.m.p.ptr() pp.preempt = false if sched.gcwaiting != 0 { gcstopm() goto top } if pp.runSafePointFn != 0 { runSafePointFn() } // Sanity check: if we are spinning, the run queue should be empty. // Check this before calling checkTimers, as that might call // goready to put a ready goroutine on the local run queue. if _g_.m.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) { throw("schedule: spinning with local work") } checkTimers(pp, 0) var gp *g var inheritTime bool // Normal goroutines will check for need to wakeP in ready, // but GCworkers and tracereaders will not, so the check must // be done here instead. tryWakeP := false if trace.enabled || trace.shutdown { gp = traceReader() if gp != nil { casgstatus(gp, _Gwaiting, _Grunnable) traceGoUnpark(gp, 0) tryWakeP = true } } if gp == nil && gcBlackenEnabled != 0 { gp = gcController.findRunnableGCWorker(_g_.m.p.ptr()) tryWakeP = tryWakeP || gp != nil } if gp == nil { // Check the global runnable queue once in a while to ensure fairness. // Otherwise two goroutines can completely occupy the local runqueue // by constantly respawning each other. if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 { lock(&sched.lock) gp = globrunqget(_g_.m.p.ptr(), 1) unlock(&sched.lock) } } if gp == nil { gp, inheritTime = runqget(_g_.m.p.ptr()) // We can see gp != nil here even if the M is spinning, // if checkTimers added a local goroutine via goready. } if gp == nil { gp, inheritTime = findrunnable() // blocks until work is available } // This thread is going to run a goroutine and is not spinning anymore, // so if it was marked as spinning we need to reset it now and potentially // start a new spinning M. if _g_.m.spinning { resetspinning() } if sched.disable.user && !schedEnabled(gp) { // Scheduling of this goroutine is disabled. Put it on // the list of pending runnable goroutines for when we // re-enable user scheduling and look again. lock(&sched.lock) if schedEnabled(gp) { // Something re-enabled scheduling while we // were acquiring the lock. unlock(&sched.lock) } else { sched.disable.runnable.pushBack(gp) sched.disable.n++ unlock(&sched.lock) goto top } } // If about to schedule a not-normal goroutine (a GCworker or tracereader), // wake a P if there is one. if tryWakeP { wakep() } if gp.lockedm != 0 { // Hands off own p to the locked m, // then blocks waiting for a new p. startlockedm(gp) goto top } execute(gp, inheritTime) }

调度 启动/暂停 文档说明 (内容来自于 runtime/proc.go 文件中上面的注释)

我们需要在保持足够的运行工作线程以利用可用的硬件并行性和停放过多运行的工作线程以节省 CPU 资源和功率之间取得平衡。这并不简单,原因有二:
  • 调度器状态是有意分布的(特别是每 P 个工作队列),因此不可能在快速路径上计算全局谓词;
  • 为了优化线程管理,我们需要知道未来(在不久的将来准备好新的 goroutine 时不要停放工作线程)。

三种被拒绝但效果不佳的方法:

  1. 集中所有调度器状态(会抑制可扩展性)。
  1. 直接 goroutine 切换。也就是说,当我们准备好一个新的 goroutine 并且有一个空闲的 P 时,取消驻留线程并将线程和 goroutine 移交给它。这将导致线程状态抖动,因为准备好 goroutine 的线程可能会在下一次停止工作片刻,我们需要停车。此外,它会破坏计算的局部性,因为我们想在同一个线程上保留依赖的 goroutine;并引入额外的延迟。
  1. 当我们准备好一个 goroutine 并且有一个空闲的 P 时,取消一个额外的线程,但不要进行切换。这将导致过多的线程停放/取消停放,因为额外的线程将立即停放而没有发现任何工作要做。

目前的做法:

当我们准备好一个 goroutine 时,如果 (1) 有一个空闲的 P 并且没有“自旋”的工作线程,我们就会解除一个额外的线程。如果工作线程不在本地工作并且在全局运行队列/netpoller 中没有找到工作,则认为工作线程正在自旋;自旋状态在 m.spinning 和 sched.nmspinning 中表示。以这种方式释放的线程也被认为是自旋的;我们不进行 goroutine 切换,因此这些线程最初无法工作。自旋线程在停车前会在 per-P 运行队列中进行一些自旋寻找工作。如果一个自旋线程找到工作,它会将自己从自旋状态中取出并继续执行。如果它没有找到工作,它会自行退出自旋状态,然后停止。如果至少有一个自旋线程(sched.nmspinning>1),我们在准备 goroutine 时不会取消新线程。为了弥补这一点,如果最后一个自旋线程找到工作并停止自旋,它必须解开一个新的自旋线程。这种方法消除了不合理的线程释放峰值,但同时保证最终的最大 CPU 并行度利用率。主要的实现复杂性是我们需要在自旋->非自旋线程转换过程中非常小心。这种转换可能会随着新 goroutine 的提交而竞争,并且其中一个部分或另一个部分需要解除另一个工作线程的驻留。如果他们都未能做到这一点,我们最终可能会出现半持久的 CPU 未充分利用。 goroutine 准备的一般模式是:提交一个 goroutine 到本地工作队列,#StoreLoad 样式的内存屏障,检查 sched.nmspinning。 spin->non-spinning 过渡的一般模式是:递减 nmspinning,#StoreLoad 风格的内存屏障,检查所有 per-P 工作队列是否有新工作。请注意,所有这些复杂性都不适用于全局运行队列,因为我们在提交到全局队列时不会马虎地取消线程。另请参阅 nmspinning 操作的注释。
 
🗾
map 源码解读
🥏
docker create 源码解读