硅云网站建设视频,怎么自己做微信推送新闻,制作招聘海报的app,iis配网站Sarsa-λ#xff08;Sarsa Lambda#xff09;是Sarsa算法的一种变体#xff0c;其中“λ”表示一个介于0和1之间的参数#xff0c;用于平衡当前状态和之前所有状态的重要性。
Sarsa算法是一种基于Q-learning算法的增量式学习方法#xff0c;通过在实际环境中不断探索和学…Sarsa-λSarsa Lambda是Sarsa算法的一种变体其中“λ”表示一个介于0和1之间的参数用于平衡当前状态和之前所有状态的重要性。
Sarsa算法是一种基于Q-learning算法的增量式学习方法通过在实际环境中不断探索和学习逐渐更新策略函数和价值函数以实现最优行为策略的学习。
Sarsa-λ算法在Sarsa算法的基础上引入了一个新的概念即“λ衰减”用于平衡当前状态和之前所有状态的重要性。在Sarsa-λ算法中我们不仅考虑当前状态的奖励和下一个状态的Q值还考虑了之前所有状态的Q值并使用“λ衰减”参数来平衡它们的重要性。这样可以使得学习更具有长远的远见可以对之前的行动进行更好的学习和回溯。
相比之下Sarsa算法只考虑当前状态和下一个状态的Q值不考虑之前所有状态的Q值因此学习过程不够长远和细致。
总的来说Sarsa-λ算法比Sarsa算法更适合在具有长时间依赖关系的任务中使用能够更好地处理延迟奖励问题同时也更加复杂和计算密集。
话不多说来看代码上有什么不同 首先是environment
import numpy as np
import time
import tkinter as tk#定义一些常量
UNIT40
WIDTH4
HIGHT4class Palace(tk.Tk,object):def __init__(self):super(Palace, self).__init__()# 动作空间self.action_space [u, d, l, r]# self.n_actionlen(self.action_space)self.title(maze)# 建立画布self.geometry({0}x{1}.format(HIGHT * UNIT, WIDTH * UNIT))self.build_maze()def build_maze(self):self.canvas tk.Canvas(self, bgwhite, heightHIGHT * UNIT, widthWIDTH * UNIT)# 绘制线框for i in range(0, WIDTH * UNIT, UNIT):x0, y0, x1, y1 i, 0, i, WIDTH * UNITself.canvas.create_line(x0, y0, x1, y1)for j in range(0, HIGHT * UNIT, UNIT):x0, y0, x1, y1 0, j, HIGHT * UNIT, jself.canvas.create_line(x0, y0, x1, y1)# 创建迷宫中的地狱hell_center1 np.array([100, 20])self.hell1 self.canvas.create_rectangle(hell_center1[0] - 15, hell_center1[1] - 15, hell_center1[0] 15,hell_center1[1] 15, fillblack)hell_center2 np.array([20, 100])self.hell2 self.canvas.create_rectangle(hell_center2[0] - 15, hell_center2[1] - 15, hell_center2[0] 15,hell_center2[1] 15, fillgreen)# 创建出口out_center np.array([100, 100])self.oval self.canvas.create_oval(out_center[0] - 15, out_center[1] - 15, out_center[0] 15,out_center[1] 15, fillyellow)# 智能体origin np.array([20, 20])self.finder self.canvas.create_rectangle(origin[0] - 15, origin[1] - 15, origin[0] 15, origin[1] 15,fillred)self.canvas.pack() # 一定不要忘记加括号# 智能体探索步def step(self, action):s self.canvas.coords(self.finder) # 获取智能体当前的位置# 由于移动的函数需要传递移动大小的参数所以这里需要定义一个移动的基准距离base_action np.array([0, 0])# 根据action来确定移动方向if action u:if s[1] UNIT:base_action[1] - UNITelif action d:if s[1] HIGHT * UNIT:base_action[1] UNITelif action l:if s[0] UNIT:base_action[0] - UNITelif action r:if s[0] WIDTH * UNIT:base_action[0] UNIT# 移动self.canvas.move(self.finder, base_action[0], base_action[1])# 移动后记录新位置指标s_ self.canvas.coords(self.finder)# 反馈奖励,terminal不是自己赋予的而是判断出来的if s_ self.canvas.coords(self.oval):reward 1done Trues_ terminal # 结束了elif s_ in (self.canvas.coords(self.hell2), self.canvas.coords(self.hell1)):reward -1done Trues_ terminalelse:reward 0done False# 这个学习函数不但传入的参数多返回的结果也多return s_, reward, donedef reset(self):self.update()time.sleep(0.5)self.canvas.delete(self.rect)origin np.array([20, 20])self.rect self.canvas.create_rectangle(origin[0] - 15, origin[1] - 15,origin[0] 15, origin[1] 15,fillred)# return observationreturn self.canvas.coords(self.rect)def render(self):time.sleep(0.05)self.update()environment没什么变化接下来是智能体agent This part of code is the Q learning brain, which is a brain of the agent.
All decisions are made in here.View more on my tutorial page: https://morvanzhou.github.io/tutorials/
import numpy as np
import pandas as pdclass RL(object):def __init__(self, action_space, learning_rate0.01, reward_decay0.9, e_greedy0.9):self.actions action_space # a listself.lr learning_rateself.gamma reward_decayself.epsilon e_greedyself.q_table pd.DataFrame(columnsself.actions, dtypenp.float64)def check_state_exist(self, state):if state not in self.q_table.index:# append new state to q tableself.q_table self.q_table.append(pd.Series([0] * len(self.actions),indexself.q_table.columns,namestate,))def choose_action(self, observation):self.check_state_exist(observation)# action selectionif np.random.rand() self.epsilon:# choose best actionstate_action self.q_table.loc[observation, :]# some actions may have the same value, randomly choose on in these actionsaction np.random.choice(state_action[state_action np.max(state_action)].index)else:# choose random actionaction np.random.choice(self.actions)return actiondef learn(self, *args):pass# backward eligibility traces
class SarsaLambdaTable(RL):# 注意这里多了一个参数trace_decay步伐的衰减值和奖励的衰减值类似都是让离奖励越远的值影响越小def __init__(self, actions, learning_rate0.01, reward_decay0.9, e_greedy0.9, trace_decay0.9):super(SarsaLambdaTable, self).__init__(actions, learning_rate, reward_decay, e_greedy)# backward view, eligibility trace.# 这里出现了lamba其实它是干什么的我还不清楚self.lambda_ trace_decay# 拷贝把q_table拷贝了一份self.eligibility_trace self.q_table.copy()def check_state_exist(self, state):if state not in self.q_table.index:# append new state to q tableto_be_append pd.Series([0] * len(self.actions),indexself.q_table.columns,namestate,)self.q_table self.q_table.append(to_be_append)# also update eligibility trace# 这份拷贝的表是和原表同步更新的self.eligibility_trace self.eligibility_trace.append(to_be_append)def learn(self, s, a, r, s_, a_):self.check_state_exist(s_)# 先检查状态不在表中就添加q_predict self.q_table.loc[s, a]if s_ ! terminal:# 这是现实q_target就是现实q_target r self.gamma * self.q_table.loc[s_, a_] # next state is not terminalelse:q_target r # next state is terminal# 不直接更新而是把误差计算出来留着后面使用error q_target - q_predict# increase trace amount for visited state-action pair# 这个lamba主要就是一个更新规则一起就是单步更新但是那样效率有点慢# eligiblity_trace就是做一个步伐轨迹的记录# Method 1:# self.eligibility_trace.loc[s, a] 1# Method 2:self.eligibility_trace.loc[s, :] * 0self.eligibility_trace.loc[s, a] 1# Q updateself.q_table self.lr * error * self.eligibility_trace# decay eligibility trace after updateself.eligibility_trace * self.gamma * self.lambda_return self.q_table
在强化学习中Eligibility通常指的是某个状态-动作对State-Action Pair对价值函数的贡献。具体来说它描述了某个状态-动作对对价值函数的影响程度可以用于增量式地更新价值函数。
Eligibility一般被用于Sarsa-Lambda等强化学习算法中。在这些算法中每个状态-动作对都会维护一个相关的Eligibility值表示该状态-动作对对当前的价值函数有多大的贡献。每次更新价值函数时Eligibility值会被相应地更新。
通常情况下Eligibility值会根据时间衰减即先前的状态-动作对对价值函数的贡献会随着时间的推移而逐渐减少而当前状态-动作对对价值函数的贡献会更高。具体来说Sarsa-Lambda等算法会使用一个衰减参数来控制Eligibility值的衰减速度从而平衡过去和现在的状态-动作对对价值函数的贡献。
然后运行run Sarsa is a online updating method for Reinforcement learning.Unlike Q learning which is a offline updating method, Sarsa is updating while in the current trajectory.You will see the sarsa is more coward when punishment is close because it cares about all behaviours,
while q learning is more brave because it only cares about maximum behaviour.
from maze_env import Maze
from RL_brain import SarsaLambdaTabledef update():for episode in range(10):# initial observationobservation env.reset()# RL choose action based on observationaction RL.choose_action(str(observation))# initial all zero eligibility trace,每跑一次都置零哎不管了直接干RL.eligibility_trace * 0while True:# fresh envenv.render()# RL take action and get next observation and rewardobservation_, reward, done env.step(action)# RL choose action based on next observationaction_ RL.choose_action(str(observation_))# RL learn from this transition (s, a, r, s, a) Sarsaq_table RL.learn(str(observation), action, reward, str(observation_), action_)# swap observation and actionobservation observation_action action_# break while loop when end of this episodeif done:break# end of gameprint(game over)print(q_table)q_table.to_csv(output.csv)env.destroy()if __name__ __main__:env Maze()RL SarsaLambdaTable(actionslist(range(env.n_actions)))env.after(10, update)env.mainloop()
不知道是怎么回事Sarsa-lambda的效果有时好于Sarsa并不十分稳定后面再继续研究研究