本文基于go1.15.8源码对GPM的系统初始化、协程创建及调度进行了分析,GPM调度器的相关结构体存在于runtime/runtime2.go中,而调度过程的源码主要集中在runtime/proc.go中。
关于GPM调度器的结构及容器参考:
https://arong.blog.csdn.net/article/details/117604004
一、系统初始化
核心流程
Go程序的引导程序启动进行系统初始化,核心步骤:
- 调用osinit方法,根据操作系统CPU核数为ncpu字段赋值
- 调用schedinit方法,调度器初始化
- 调用newproc方法,创建G并加入队列中
- 调用mstart方法,开始初始化M,关联P对G进行调度
在系统初始化阶段,osinit及schedinit都是为运行时系统及调度器系统进行初始化,而newproc及mstart则是进行第一轮调度,主要是用于执行g0及main函数的主goroutine,后续创建用户G时仍会调用newproc及mstart对其进行调度。
osinit
Go程序在启动时会根据当前所处的操作系统环境,去调用特定操作系统包的osinit方法:
osinit方法不仅对CPU核数进行判断,还会获取到当前操作系统下的页存大小:
func osinit() {// 获取CPU核数ncpu = getncpu()if physPageSize == 0 {physPageSize = getPageSize()}
}
schedinit
schedinit方法主要对M和P进行初始化,M最大数量被限制为10000,M实例会在需要时被初始化,并在不需要时进行销毁,而P数量默认为cpu核心数和参数GOMAXPROCS即为ncpu数目,并且会被直接初始化。
func schedinit() {// 从TLS或者专用寄存器获取当前g的指针类型_g_ := getg()// 设置M最大的数量sched.maxmcount = 10000// 初始化栈的复用空间stackinit()// 初始化当前Mmcommoninit(_g_.m)// P数目默认为cpu核心数和参数GOMAXPROCS即为ncpu数目procs := ncpuif n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {procs = n}// 生成设定数量的Pif procresize(procs) != nil {throw("unknown runnable goroutine during bootstrap")}
}
mcommoninit
mcommoninit对当前M进行初始化,实际上是把M与allm列表进行关联,将M设置为allm列表表头。
func mcommoninit(mp *m) {// 获取当前的G_g_ := getg()// 判断M数量是否比maxmcount设定的要多checkmcount()// 将当前M设置为allm列表表头mp.alllink = allm
}
procresize
procresize可以初始化或更改指定数目的P,并将其添加到allp列表中,在调用procresize过程中,Go程序处于Stop the world状态,调度器也无法执行。
func procresize(nprocs int32) *p {// 获取旧的P数目old := gomaxprocsif old < 0 || nprocs <= 0 {throw("procresize: invalid arg")}now := nanotime()if sched.procresizetime != 0 {// 累加所有P初始化花费的时间sched.totaltime += int64(old) * (now - sched.procresizetime)}// 为调度器标记P初始化的时间sched.procresizetime = now// 如果需要初始化的P数目大于旧P列表数目,则重新初始化allp列表// 首先进行旧P列表的复制形成nallp,再复制回去allp = nallp,这是扩容操作if nprocs > int32(len(allp)) {// Synchronize with retake, which could be running// concurrently since it doesn't run on a P.lock(&allpLock)if nprocs <= int32(cap(allp)) {allp = allp[:nprocs]} else {nallp := make([]*p, nprocs)// Copy everything up to allp's cap so we// never lose old allocated Ps.copy(nallp, allp[:cap(allp)])allp = nallp}unlock(&allpLock)}// 对扩容过的allp中,未初始化的P进行初始化for i := old; i < nprocs; i++ {pp := allp[i]if pp == nil {pp = new(p)}pp.init(i)atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))}// 对当前G及其关联的P进行校验,若已经存在关联的P则继续运行,不存在则从allp列表中取表头// 关联到该G_g_ := getg()if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs {_g_.m.p.ptr().status = _Prunning_g_.m.p.ptr().mcache.prepareForSweep()} else {// release the current P and acquire allp[0].//// We must do this before destroying our current P// because p.destroy itself has write barriers, so we// need to do that from a valid P.if _g_.m.p != 0 {if trace.enabled {// Pretend that we were descheduled// and then scheduled again to keep// the trace sane.traceGoSched()traceProcStop(_g_.m.p.ptr())}_g_.m.p.ptr().m = 0}_g_.m.p = 0_g_.m.mcache = nilp := allp[0]p.m = 0p.status = _Pidleacquirep(p)if trace.enabled {traceGoStart()}}// 如果是缩小了P列表,则需要销毁掉用不到的Pfor i := nprocs; i < old; i++ {p := allp[i]p.destroy()}// 构建空闲P列表返回var runnablePs *pfor i := nprocs - 1; i >= 0; i-- {p := allp[i]if _g_.m.p.ptr() == p {continue}p.status = _Pidleif runqempty(p) {pidleput(p)} else {p.m.set(mget())p.link.set(runnablePs)runnablePs = p}}stealOrder.reset(uint32(nprocs))var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))return runnablePs
}
二、协程创建
核心流程
使用go func()关键字开启协程时,实质上运行时系统会调用runtime/proc.go下的newproc函数获取到一个G并封装需要执行的func(),并且调用startm方法开启M并绑定P,其源码中的核心步骤如下:
- 调用gfget方法,从本地P的gfree列表获取空闲G,不存在则从sched调度器的全局gfreeStack及gfreeNoStack列表获取空闲G,在此处将全局空闲G偷取都本地空闲P中
- 若当前不存在空闲G,则调用malg方法申请一个G的栈内存(默认为2kb),创建新的G,并将其由Gidle状态置换为GDead状态
- 将新生成的G放入全局G列表中(runtime.allgs)
- 为该G绑定当前传入的func()函数
- 将该G状态由GDead状态设置为GRunnable状态,并执行runqput方法,绑定当前P.runnext为该G,使其能够被运行,并将该G放入sched调度器的全局可运行列表runq中
- 调用wakep方法,本质是调用startm方法唤醒或创建M,并关联P
- startm方法会对M进行一系列内存分配、内核线程绑定操作(newm),最终执行mstart方法开启GPM调度
newproc
newproc方法获取了方法参数地址和当前的G,并通过getcallerpc方法获取开启该G的父G程序片段地址,通过g0调用newproc1执行创建或获取可用的G
func newproc(siz int32, fn *funcval) {// 获取参数地址argp := add(unsafe.Pointer(&fn), sys.PtrSize)// 获取当前执行的Ggp := getg()// 获取开启该G的父G程序片段地址pc := getcallerpc()systemstack(func() {// g0执行newproc1newproc1(fn, (*uint8)(argp), siz, gp, pc)})
}
newproc1
newproc1的作用是初始化G,分配内存空间,保存调用现场信息并关联func,最后生成一个可运行的G并放入队列中,唤醒M执行。
func newproc1(fn *funcval, argp *uint8, narg int32, callergp *g, callerpc uintptr) {// 获取当前G_g_ := getg()// 要携带的func不能为nilif fn == nil {_g_.m.throwing = -1 // do not dump full stacksthrow("go of nil func value")}// 获取当前G的M,加锁禁止其他G占有该Macquirem()// 判断func参数是否过多导致栈内存溢出siz := nargsiz = (siz + 7) &^ 7if siz >= _StackMin-4*sys.RegSize-sys.RegSize {throw("newproc: function arguments too large for new goroutine")}// 查找是否有可用的G_p_ := _g_.m.p.ptr()newg := gfget(_p_)if newg == nil {// 不存在可用的G则创建一个新的G,栈内存为2kbnewg = malg(_StackMin)// CAS更新该G的状态为Gdeadcasgstatus(newg, _Gidle, _Gdead)// 将新的G添加到runtime.allg列表末尾allgadd(newg)}if newg.stack.hi == 0 {throw("newproc1: newg missing stack")}if readgstatus(newg) != _Gdead {throw("newproc1: new g is not Gdead")}totalSize := 4*sys.RegSize + uintptr(siz) + sys.MinFrameSize // extra space in case of reads slightly beyond frametotalSize += -totalSize & (sys.SpAlign - 1) // align to spAlignsp := newg.stack.hi - totalSizespArg := spif usesLR {// caller's LR*(*uintptr)(unsafe.Pointer(sp)) = 0prepGoExitFrame(sp)spArg += sys.MinFrameSize}// 如果func需要携带参数则封装进入栈中if narg > 0 {memmove(unsafe.Pointer(spArg), unsafe.Pointer(argp), uintptr(narg))// This is a stack-to-stack copy. If write barriers// are enabled and the source stack is grey (the// destination is always black), then perform a// barrier copy. We do this *after* the memmove// because the destination stack may have garbage on// it.if writeBarrier.needed && !_g_.m.curg.gcscandone {f := findfunc(fn.fn)stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))if stkmap.nbit > 0 {// We're in the prologue, so it's always stack map index 0.bv := stackmapdata(stkmap, 0)bulkBarrierBitmap(spArg, spArg, uintptr(bv.n)*sys.PtrSize, 0, bv.bytedata)}}}// 保留调用现场信息、相关栈顶指针信息memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))newg.sched.sp = spnewg.stktopsp = sp// 保存goexit指令指针和关联G,使G在执行完成func后退出newg.sched.pc = funcPC(goexit) + sys.PCQuantum // +PCQuantum so that previous instruction is in same functionnewg.sched.g = guintptr(unsafe.Pointer(newg))// 保存func指针信息,使其能够执行funcgostartcallfn(&newg.sched, fn)newg.gopc = callerpcnewg.ancestors = saveAncestors(callergp)newg.startpc = fn.fnif _g_.m.curg != nil {newg.labels = _g_.m.curg.labels}if isSystemGoroutine(newg, false) {atomic.Xadd(&sched.ngsys, +1)}newg.gcscanvalid = false// G初始化完成,将其转化为G runnable状态使其能够被执行casgstatus(newg, _Gdead, _Grunnable)if _p_.goidcache == _p_.goidcacheend {// Sched.goidgen is the last allocated id,// this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].// At startup sched.goidgen=0, so main goroutine receives goid=1._p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)_p_.goidcache -= _GoidCacheBatch - 1_p_.goidcacheend = _p_.goidcache + _GoidCacheBatch}newg.goid = int64(_p_.goidcache)_p_.goidcache++if raceenabled {newg.racectx = racegostart(callerpc)}if trace.enabled {traceGoCreate(newg, newg.startpc)}// 将G放入到P的本地队列或全局队列runqput(_p_, newg, true)// 判断是否有空闲P,且是否需要唤醒一个M来执行Gif atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && mainStarted {wakep()}// 执行完成需要释放Mreleasem(_g_.m)
}
gfget
gfget查看当前P是否有空闲的G,没有则去全局的freeg队列中查找,并检查获取到的G栈内存是否为空,为空则再次为该G分配栈内存。
func gfget(_p_ *p) *g {
retry:if _p_.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {lock(&sched.gFree.lock)// Move a batch of free Gs to the P.for _p_.gFree.n < 32 {// Prefer Gs with stacks.gp := sched.gFree.stack.pop()if gp == nil {gp = sched.gFree.noStack.pop()if gp == nil {break}}sched.gFree.n--_p_.gFree.push(gp)_p_.gFree.n++}unlock(&sched.gFree.lock)goto retry}gp := _p_.gFree.pop()if gp == nil {return nil}_p_.gFree.n--if gp.stack.lo == 0 {// Stack was deallocated in gfput. Allocate a new one.systemstack(func() {gp.stack = stackalloc(_FixedStack)})gp.stackguard0 = gp.stack.lo + _StackGuard} else {if raceenabled {racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)}if msanenabled {msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)}}return gp
}
runqput
runqput方法会把新的G(状态被置为runnable)放入P的本地队列或者p.runnext进行预联,如果P的本地队列过长,则把G放到全局队列(runqputslow):
func runqput(_p_ *p, gp *g, next bool) {if randomizeScheduler && next && fastrand()%2 == 0 {next = false}// 如果next为true,则放入到p.runnext里面,并把原先runnext的G交换出来if next {retryNext:oldnext := _p_.runnextif !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {goto retryNext}if oldnext == 0 {return}// Kick the old runnext out to the regular run queue.gp = oldnext.ptr()}retry:h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumerst := _p_.runqtail// runq是一个长度为256的数组,是P的本地可执行G队列,如果可以容纳则添加到该队列中if t-h < uint32(len(_p_.runq)) {_p_.runq[t%uint32(len(_p_.runq))].set(gp)atomic.Store(&_p_.runqtail, t+1) // store-release, makes the item available for consumptionreturn}// runq无法容纳G,放到全局队列if runqputslow(_p_, gp, h, t) {return}// the queue is not full, now the put above must succeedgoto retry
}
wakeup
当将G放入到可执行队列之后,尝试执行wakeup方法唤醒一个M来执行G,注意,并不一定会执行到该新建的G,而是看具体的情况。
func wakep() {// 一次只需要唤醒1个Mif !atomic.Cas(&sched.nmspinning, 0, 1) {return}// 调用startmstartm(nil, true)
}
startm
startm的作用是获取空闲的或者新建一个M,并且获取空闲的P,将该P关联为m.nextp,此时形成M-P关联后,就可以通过P获取到G,开始执行相应的func:
func startm(_p_ *p, spinning bool) {lock(&sched.lock)if _p_ == nil {// 如果没有指定P, 则从sched.pidle获取空闲P_p_ = pidleget()if _p_ == nil {unlock(&sched.lock)// 如果没有获取到P,nmspinning计数器-1if spinning {// The caller incremented nmspinning, but there are no idle Ps,// so it's okay to just undo the increment and give up.if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {throw("startm: negative nmspinning")}}return}}// 尝试从 sched.midle获取一个空闲Mmp := mget()unlock(&sched.lock)if mp == nil {// 获取不到空闲的M,则创建一个 mspining = true的M,并将P绑定到M上,直接返回,下次唤醒时则有可用的M了var fn func()if spinning {// The caller incremented nmspinning, so set m.spinning in the new M.fn = mspinning}// 创建内核线程与M关联,并且使M绑定该P及fnnewm(fn, _p_)return}// 当前M必须为自旋状态if mp.spinning {throw("startm: m is spinning")}// 当前M必须关联Pif mp.nextp != 0 {throw("startm: m has p")}// 当前关联的P必须处于空闲状态,runq队列为空if spinning && !runqempty(_p_) {throw("startm: p has runnable gs")}// 设定该M为自旋状态并绑定Pmp.spinning = spinningmp.nextp.set(_p_)// 唤醒Mnotewakeup(&mp.park)
}
newm
newm方法真正地创建内核线程与M的关联,并且将P和func绑定在M上,并且执行mstart开启真正的调度过程:
func newm(fn func(), _p_ *p) {// 新建M绑定P及fnmp := allocm(_p_, fn)// M绑定指定Pmp.nextp.set(_p_)// 创建内核线程与M绑定newm1(mp)
}
func newm1(mp *m) {if iscgo {var ts cgothreadstartif _cgo_thread_start == nil {throw("_cgo_thread_start missing")}ts.g.set(mp.g0)ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))// 绑定该内核线程启动调用mstart方法ts.fn = unsafe.Pointer(funcPC(mstart))if msanenabled {msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))}execLock.rlock() // Prevent process clone.asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))execLock.runlock()return}execLock.rlock() // Prevent process clone.newosproc(mp)execLock.runlock()
}func newosproc(mp *m) {stk := unsafe.Pointer(mp.g0.stack.hi)// Initialize an attribute object.var attr pthreadattrvar err int32err = pthread_attr_init(&attr)// Finally, create the thread. It starts at mstart_stub, which does some low-level// setup and then calls mstart.var oset sigsetsigprocmask(_SIG_SETMASK, &sigset_all, &oset)// 创建内核线程,并传入启动启动函数 mstart_stub, mstart_stub 之后调用mstarterr = pthread_create(&attr, funcPC(mstart_stub), unsafe.Pointer(mp))sigprocmask(_SIG_SETMASK, &oset, nil)if err != 0 {write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))exit(1)}
}
三、调度过程
经过系统初始化以及go func创建协程后,G会封装好需要执行的func位于p.runnext、p.runq或sched.runq这三个队列之一,接下来就是调用mstart开启真正的调度过程(startm是对M进行初始化操作,而不是进行调度)。需要注意的是,调度程序会在多个M实例中进行,并且会在以下场景中进行触发schedule函数进行调度:
- Go程序初始化
- 使用go关键字开启协程
- 执行runtime.Gosched,当前G暂停运行让给其他G运行机会
- 执行runtime.Goexit,当前G结束运行程序
- G状态转化时
mstart
mstart在Go程序初始化或使用go关键字开启协程时被调用,本质上也是调用schedule函数进行调度:
func mstart() {_g_ := getg()osStack := _g_.stack.lo == 0if osStack {// Initialize stack bounds from system stack.// Cgo may have left stack size in stack.hi.// minit may update the stack bounds.size := _g_.stack.hiif size == 0 {size = 8192 * sys.StackGuardMultiplier}_g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))_g_.stack.lo = _g_.stack.hi - size + 1024}// Initialize stack guard so that we can start calling regular// Go code._g_.stackguard0 = _g_.stack.lo + _StackGuard// This is the g0, so we can also call go:systemstack// functions, which check stackguard1._g_.stackguard1 = _g_.stackguard0mstart1()// Exit this thread.if GOOS == "windows" || GOOS == "solaris" || GOOS == "illumos" || GOOS == "plan9" || GOOS == "darwin" || GOOS == "aix" {// Windows, Solaris, illumos, Darwin, AIX and Plan 9 always system-allocate// the stack, but put it in _g_.stack before mstart,// so the logic above hasn't set osStack yet.osStack = true}mexit(osStack)
}func mstart1() {_g_ := getg()// 保存调用信息save(getcallerpc(), getcallersp())asminit()// 对M进行初始化minit()// 执行m0的任务if _g_.m == &m0 {mstartm0()}// 如果该M存在startfn,则执行startfnif fn := _g_.m.mstartfn; fn != nil {fn()}// 开始进入调度循环schedule()
}
schedule
schedule函数又称为一轮调度函数,它的作用是全力查找可执行的G并最后执行execute函数运行,在恶执行完用户代码后,会执行go exit命令, 会跳转到runtime.Goexit0函数,该函数最后仍调用了schedule函数,形成了一个调度闭环操作。
一轮调度的执行核心流程如下,需要注意的是当当前M已经被特定的G锁定(lockedg)时,则需要停止已锁定的当前M,待该锁定的G达到可运行状态时对该M进行唤醒,这是因为M与G锁定后,该G会占用M相关栈空间,所以不能够使用无关的M来运行已经被其他M锁定的G。
func schedule() {_g_ := getg()if _g_.m.locks != 0 {throw("schedule: holding locks")}// 当前M有lockg,停止执行当前Mif _g_.m.lockedg != 0 {// 停止当前M,直到其绑定的G处于可运行状态stoplockedm()// 开始执行绑定的Gexecute(_g_.m.lockedg.ptr(), false) // Never returns.}top:// GC进行中,停止Mif sched.gcwaiting != 0 {gcstopm()// 跳转到top循环,再次寻找可执行Ggoto top}// 需要执行的Gvar gp *g// 继承时间片var inheritTime boolif gp == nil {// 每隔61次从全局队列上获取可执行的Gif _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 {lock(&sched.lock)gp = globrunqget(_g_.m.p.ptr(), 1)unlock(&sched.lock)}}if gp == nil {// 从P的本地队列获取可执行的Ggp, inheritTime = runqget(_g_.m.p.ptr())if gp != nil && _g_.m.spinning {throw("schedule: spinning with local work")}}if gp == nil {// 如果从全局队列以及P本地队列都无法获取到可执行的P,开始全力查找可执行的Ggp, inheritTime = findrunnable() }if _g_.m.spinning {// 如果M是自旋状态,取消自旋resetspinning()}if gp.lockedm != 0 {// 如果当前G已经被其他M锁定,则唤醒与该G锁定的Mstartlockedm(gp)// 跳转到top循环,再次寻找可执行Ggoto top}// 开始执行这个Gexecute(gp, inheritTime)
}
execute
开始真正执行G,在执行完G携带的func后,会执行runtime.Goexit函数:
func execute(gp *g, inheritTime bool) {_g_ := getg()// 更改G的状态为runningcasgstatus(gp, _Grunnable, _Grunning)gp.waitsince = 0// 不允许抢占gp.preempt = falsegp.stackguard0 = gp.stack.lo + _StackGuardif !inheritTime {// 每调度一次则计数+1_g_.m.p.ptr().schedtick++}// 关联M->G_g_.m.curg = gp// 关联G->Mgp.m = _g_.m// 开始执行G的代码gogo(&gp.sched)
}
Goexit
最后一个环节,执行完G的代码后进行程序退出,执行链路是gogo->runtime.Goexit->runtime.goexit1->runtime.goexit0,在goexit0函数中会真正地把已经执行完的G状态变更为dead,然后再次执行schedule函数,进入下一个调度循环之中,这就是整个GPM调度器的执行核心流程了。
func Goexit() {// 忽略goexit1()
}
func goexit1() {// 忽略// 调用goexit0mcall(goexit0)
}
func goexit0(gp *g) {_g_ := getg()// 将运行完成G状态转换为deadcasgstatus(gp, _Grunning, _Gdead)// 忽略schedule()
}
四、核心流程总结
根据上面的源码可以梳理一个核心的协程调度流程图,总结下整个调度过程,关键的步骤就是Go程序启动时对调度器的初始化,会把M和P存储到GPM容器中,然后在go func启动协程时,会进行G的获取及存储,封装需要执行的func,最后调用schedule函数启动调度器,调度过程是一个闭环操作,该过程存在于各个M实例之中,所以每个M都可以独立进行调度,启动调度程序后会寻找可执行的G,最后执行完G的程序后,会执行runtime.Goexit退出协程程序,此时会跳转到schedule函数继续执行调度操作,形成闭环。