怎样免费网站建设,磁力珠,seo工资一般多少,wordpress插件轮播图其实在看go源码的时候#xff0c;发现除了sync包里有个mutex以外#xff0c;runtime包里也有一个mutex#xff0c;这个mutex在runtime很多地方都在用。 这个runtime包里面的mutex的结构如下#xff1a;
目录: /runtime/runtime2.go
代码#xff1a;
type mutex struct …其实在看go源码的时候发现除了sync包里有个mutex以外runtime包里也有一个mutex这个mutex在runtime很多地方都在用。 这个runtime包里面的mutex的结构如下
目录: /runtime/runtime2.go
代码
type mutex struct {lockRankStructkey uintptr
}
可以看到他有两个成员一个是lockRankStruct一个是key。
先看下lockRankStruct
lockRankStruct 这个结构体是个空的结构体在网上有大佬这样描述 lockRankStruct提供了一种运行时的静态锁排名的机制。静态锁排名会建立文件化的锁之间的总排序顺序如果违反总的排序则会报错。只要锁排序是按照文档设计的顺序锁排序死锁就不会发生。如果要做Go运行时使用这个机制你需要设置GOEXPERIMENTstaticlockranking。 默认未开启此时lockRanStruct是一个空结构体。lockWithRank()等效于lock() 啥意思啊按照文档设计顺序文档是啥文档顺序是啥
其实这里是一种非常机翻的说法他其实说的是在go的配置中启动GOEXPERIMENTstaticlockranking就会按照一定的顺序检查锁如果锁不是按照这个顺序进行了就会出现死锁的情况那么就会抛出throw在runtime包内部很多错误是throw不是panicthrow方法并没有暴露给用户使用用户只能用recover接panic也就说用户无法处理throw错误项目会直接崩掉
那这个顺序是哪来的
这个顺序其实是已经规定好的当然是go语言的开发人员提交的提交这个changes的地址是https://go-review.googlesource.com/c/go//207619
他这个顺序其实是一个map一个已经规定好的maplockPartialOrder位置是runtime包中的lockrank.go
var lockPartialOrder [][]lockRank [][]lockRank{lockRankSysmon: {},lockRankScavenge: {lockRankSysmon},lockRankForcegc: {lockRankSysmon},lockRankDefer: {},lockRankSweepWaiters: {},lockRankAssistQueue: {},lockRankSweep: {},lockRankPollDesc: {},lockRankCpuprof: {},lockRankSched: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof},lockRankAllg: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched},lockRankAllp: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched},lockRankTimers: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllp, lockRankTimers},lockRankNetpollInit: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllp, lockRankTimers},lockRankHchan: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankHchan},lockRankNotifyList: {},lockRankSudog: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankHchan, lockRankNotifyList},lockRankRwmutexW: {},lockRankRwmutexR: {lockRankSysmon, lockRankRwmutexW},lockRankRoot: {},lockRankItab: {},lockRankReflectOffs: {lockRankItab},lockRankUserArenaState: {},lockRankTraceBuf: {lockRankSysmon, lockRankScavenge},lockRankTraceStrings: {lockRankSysmon, lockRankScavenge, lockRankTraceBuf},lockRankFin: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},lockRankGcBitsArenas: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},lockRankMspanSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},lockRankSpanSetSpine: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},lockRankProfInsert: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},lockRankProfBlock: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},lockRankProfMemActive: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},lockRankProfMemFuture: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankProfMemActive},lockRankGscan: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture},lockRankStackpool: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},lockRankStackLarge: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},lockRankHchanLeaf: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankHchanLeaf},lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankMspanSpecial, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankMspanSpecial, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans},lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankMheapSpecial, lockRankMspanSpecial, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap},lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankMspanSpecial, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap},lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankMspanSpecial, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace},lockRankPanic: {},lockRankDeadlock: {lockRankPanic, lockRankDeadlock},
}其实这玩意称之为“文档”多少有点不太直观但是你知道是什么意思就行不必纠结这个命名很多命名有一定的历史因素或者直译不够信雅达追求这个考古结果太浪费时间
那这个顺序是怎么验证的
其实验证这个顺序的方法也不复杂就在runtime/lockrank_on.go文件里的checkRanks方法里
func checkRanks(gp *g, prevRank, rank lockRank) {rankOK : falseif rank prevRank {// If rank prevRank, then we definitely have a rank errorrankOK false} else if rank lockRankLeafRank {// If new lock is a leaf lock, then the preceding lock can// be anything except another leaf lock.rankOK prevRank lockRankLeafRank} else {// Weve now verified the total lock ranking, but we// also enforce the partial ordering specified by// lockPartialOrder as well. Two locks with the same rank// can only be acquired at the same time if explicitly// listed in the lockPartialOrder table.list : lockPartialOrder[rank]for _, entry : range list {if entry prevRank {rankOK truebreak}}}if !rankOK {printlock()println(gp.m.procid, )printHeldLocks(gp)throw(lock ordering problem)}
}
不知道这个有啥影响么
没有不开启GOEXPERIMENTstaticlockranking 都不用关心这个问题不看源码甚至不知道这个玩意不必太在意看到了当个乐子就行 再看下key
key的实现方式有两种
runtime/lock_futex.go 文件里的futex实现
//go:build dragonfly || freebsd || linux
主要是dragonflyFreeBSDlinux 系统
另一个就是 runtime/lock_sema.go 文件里的 sema实现
//go:build aix || darwin || netbsd || openbsd || plan9 || solaris || windows
主要是 aix darwin netbsd openbsd plan9 solaris windows系统
但是实现起来也是大差不差的。
以linux的为例毕竟日常开发还是linux的项目比较多。
首先锁的状态有三种
const (mutex_unlocked 0mutex_locked 1mutex_sleeping 2
mutex_unlocked 没有锁 mutex_locked 锁了 mutex_sleeping 表示有线程调用futexsleep阻塞了
然后看lock方法这个是获取锁的方法
func lock2(l *mutex) {// 获取当前运行该方法的协程Ggp : getg()// 锁都是负数了出问题了报错throw扔出去if gp.m.locks 0 {throw(runtime·lock: lock count)}// 当前协程G绑定的M上的lock计数1不懂的去看GMP结构gp.m.locks// 资源比较空闲时候第一次请求锁就成功了直接返回表示获取锁成功// Xchg直接交换值没有CAS操作Xchg比较简单理论上更快Cas还要做值对比Xchg更适合去抢锁v : atomic.Xchg(key32(l.key), mutex_locked)if v mutex_unlocked {return}// 大部分情况下你获取是失败的毕竟加锁的场景就是高并发引起的冲突不然你加个毛的锁// 当v是mutex_unlock和mutex_sleeping的时候也就说加锁没有成功// 修改名称把状态改成wait——等待wait : v// 自旋次数如果ncpu也就是cpu核心数大于1说明是多核cpu那么就自旋等待下active_spin实际上是4那就是自旋4次至于为什么是4我没深究spin : 0if ncpu 1 {// spin是spinning的意思“旋转的”你打红警的时候盖特机炮的台词就有spinspin active_spin}for {// 尝试获取锁for i : 0; i spin; i {for l.key mutex_unlocked {// Cas操作获取锁。这里的wait的值第一次是上面传入的第二次则是从for循环末尾拿到的if atomic.Cas(key32(l.key), mutex_unlocked, wait) {return}}// 这个方法其实是底层调用了CPU的PAUSE指令意在优化自旋等待的效率主要是针对赛扬和志强cpu的procyield(active_spin_cnt)}// passive_spin 1,passive -- 被动的被动自旋就是说上面4次自旋都没拿到再给你一次机会让你再获取一次锁所以整个获取锁的自旋次数是 4 1for i : 0; i passive_spin; i {for l.key mutex_unlocked {// 同样的CAS操作获取锁if atomic.Cas(key32(l.key), mutex_unlocked, wait) {return}}// 让出cpuosyield()}// 切换锁的状态为sleepingxchg返回原来的值v atomic.Xchg(key32(l.key), mutex_sleeping)// 如果原来的值是mutex_unlocked,也就说无锁那么mutex_sleeping 本身就算拿到锁了if v mutex_unlocked {return}// 重新设定wait值为mutex_sleeping为下次循环做准备wait mutex_sleeping// futexsleep调用futex函数进入睡眠。futexsleep(key32(l.key), mutex_sleeping, -1)}
}最后看unlock方法这个是解锁的方法
func unlock2(l *mutex) {// 设置 l.key mutex_unlocked。v是旧值v : atomic.Xchg(key32(l.key), mutex_unlocked)// 如果已经是unlocked 你还执行解锁那么就抛出throw异常if v mutex_unlocked {throw(unlock of unlocked lock)}// 如果旧状态是sleeping说明已经有其他协程在等待这个锁处于sleep状态这时候解锁的时候要把那个sleep的协程唤醒if v mutex_sleeping {// 执行futexwakeupfutexwakeup(key32(l.key), 1)}// 获取当前的goroutinegp : getg()// 当前g所属的M的锁计数器减一gp.m.locks--if gp.m.locks 0 {throw(runtime·unlock: lock count)}// m所有的锁已经释放且g本身的preempt位在解锁unlock之前就被被标记为true说明该g可以被抢占了if gp.m.locks 0 gp.preempt { // 既然需要被抢占那么就要设置stackguard0位置为stackPreempt,手动标记为需要检查栈溢出当调度器检查到栈溢出的时候会根据Goroutine的标记进行相应处理这里就是减产preempt并执行抢占gp.stackguard0 stackPreempt}
} 大致就是这样了