文章推荐:MFC/C++学习系列之简单记录7——句柄和AFX开头的函数的使用
文章链接:MFC/C++学习系列之简单记录7——句柄和AFX开头的函数的使用-腾讯云开发者社区-腾讯云
文章简介:本文介绍了在MFC使如何使用句柄和AFX开头的函数作用,代码解释详细且包含代码实现后效果,适合初学者已经需要使用句柄的朋友!
PPO(Proximal Policy Optimization)算法在A2C基础上进行样本管理和梯度计算改进。
目标函数的工作原理是:限制策略更新的范围,策略的更新比率超不超过预设的范围(即大于1+ϵ或小于1−ϵ),以防止策略发生剧烈变化。
class Memory:
def __init__(self):
self.actions = []
self.states = []
self.logprobs = []
self.rewards = []
self.is_terminals = []
def clear_memory(self):
del self.actions[:]
del self.states[:]
del self.logprobs[:]
del self.rewards[:]
del self.is_terminals[:]
class ActorCritic(nn.Module):
def __init__(self, state_dim, action_dim, n_latent_var):
super(ActorCritic, self).__init__()
# actor
self.action_layer = nn.Sequential(
nn.Linear(state_dim, n_latent_var),
nn.Tanh(),
nn.Linear(n_latent_var, n_latent_var),
nn.Tanh(),
nn.Linear(n_latent_var, action_dim),
nn.Softmax(dim=-1)
)
# critic
self.value_layer = nn.Sequential(
nn.Linear(state_dim, n_latent_var),
nn.Tanh(),
nn.Linear(n_latent_var, n_latent_var),
nn.Tanh(),
nn.Linear(n_latent_var, 1)
)
def forward(self):
raise NotImplementedError
def act(self, state, memory):
state = torch.from_numpy(state).float().to(device)
action_probs = self.action_layer(state)
dist = Categorical(action_probs)#按照给定的概率分布来进行采样
action = dist.sample()
memory.states.append(state)
memory.actions.append(action)
memory.logprobs.append(dist.log_prob(action))
return action.item()
def evaluate(self, state, action):
action_probs = self.action_layer(state)
dist = Categorical(action_probs)
action_logprobs = dist.log_prob(action)
dist_entropy = dist.entropy()
state_value = self.value_layer(state)
return action_logprobs, torch.squeeze(state_value), dist_entropy
class PPO:
def __init__(self, state_dim, action_dim, n_latent_var, lr, betas, gamma, K_epochs, eps_clip):
self.lr = lr
self.betas = betas
self.gamma = gamma
self.eps_clip = eps_clip
self.K_epochs = K_epochs
self.policy = ActorCritic(state_dim, action_dim, n_latent_var).to(device)
self.optimizer = torch.optim.Adam(self.policy.parameters(), lr=lr, betas=betas)
self.policy_old = ActorCritic(state_dim, action_dim, n_latent_var).to(device)
self.policy_old.load_state_dict(self.policy.state_dict())
self.MseLoss = nn.MSELoss()
def update(self, memory):
# Monte Carlo estimate of state rewards:
rewards = []
discounted_reward = 0
for reward, is_terminal in zip(reversed(memory.rewards), reversed(memory.is_terminals)):
if is_terminal:
discounted_reward = 0
discounted_reward = reward + (self.gamma * discounted_reward)
rewards.insert(0, discounted_reward)
# Normalizing the rewards:
rewards = torch.tensor(rewards, dtype=torch.float32).to(device)
rewards = (rewards - rewards.mean()) / (rewards.std() + 1e-5)
# convert list to tensor
old_states = torch.stack(memory.states).to(device).detach()
old_actions = torch.stack(memory.actions).to(device).detach()
old_logprobs = torch.stack(memory.logprobs).to(device).detach()
# Optimize policy for K epochs:更新多少次
for _ in range(self.K_epochs):
# Evaluating old actions and values :
logprobs, state_values, dist_entropy = self.policy.evaluate(old_states, old_actions)
# Finding the ratio (pi_theta / pi_theta__old):
ratios = torch.exp(logprobs - old_logprobs.detach())
# Finding Surrogate Loss: R(t)-b
advantages = rewards - state_values.detach()
surr1 = ratios * advantages
surr2 = torch.clamp(ratios, 1-self.eps_clip, 1+self.eps_clip) * advantages
loss = -torch.min(surr1, surr2) + 0.5*self.MseLoss(state_values, rewards) - 0.01*dist_entropy
# take gradient step
self.optimizer.zero_grad()
loss.mean().backward()
self.optimizer.step()
# Copy new weights into old policy:
self.policy_old.load_state_dict(self.policy.state_dict())
PPO算法分为三个主要部分:Memory 类、ActorCritic 模型类,以及 PPO 主类。
模型包含 Actor 和 Critic 网络,用于分别计算策略分布和状态价值。
(3)PPO 类:实现策略的存储、更新和优化,是整个算法的主流程
def main():
############## Hyperparameters ##############
env_name = "LunarLander-v2" #月球登录器环境LunarLander-v2
# creating environment
#gym0.26 render_mode='human'可显示界面 不需要“render = True #True显示游戏窗口,False不显示”
env = gym.make(env_name)
state_dim = env.observation_space.shape[0]
action_dim = 4
render = True #True显示游戏窗口,False不显示
solved_reward = 100 # stop training if avg_reward > solved_reward 最大结束奖励
log_interval = 20 # print avg reward in the interval 打印间隔
max_episodes = 50000 # max training episodes 最大训练局数(游戏重新玩几局)
max_timesteps = 300 # max timesteps in one episode 每局最大步长
n_latent_var = 64 # number of variables in hidden layer 神经网络隐藏神经元数量
update_timestep = 2000 # update policy every n timesteps 多少步后更新学习参数
lr = 0.002 #学习率
betas = (0.9, 0.999) #Adam中更新权重参数
gamma = 0.99 # discount factor 折扣因子
K_epochs = 4 # update policy for K epochs 前轮数据学多少次
eps_clip = 0.2 # clip parameter for PPO 1+0.2 | 1-0.2
random_seed = None
#############################################
if random_seed:
torch.manual_seed(random_seed)
env.seed(random_seed)
memory = Memory()
ppo = PPO(state_dim, action_dim, n_latent_var, lr, betas, gamma, K_epochs, eps_clip)
#print(lr,betas)
# logging variables
running_reward = 0
avg_length = 0
timestep = 0
# training loop
for i_episode in range(1, max_episodes+1):
state = env.reset()#初始化(重新玩)
for t in range(max_timesteps):
timestep += 1
# Running policy_old:
action = ppo.policy_old.act(state[0], memory)
#state, reward, done, _ = env.step(action)#执行action,得到(新的状态,奖励,是否终止,额外的调试信息) gym0.23
obs, reward, done, _, _ = env.step(action) #gym0.26 错误原因:获取的变量少了,应该是5个,现在只定义4个,所以报错。
# Saving reward and is_terminal:
memory.rewards.append(reward)
memory.is_terminals.append(done)
# update if its time
if timestep % update_timestep == 0:
ppo.update(memory)
memory.clear_memory()
timestep = 0
running_reward += reward
if render:
env.render()
if done:
break
avg_length += t
# stop training if avg_reward > solved_reward
if running_reward > (log_interval*solved_reward):
print("########## Solved! ##########")
torch.save(ppo.policy.state_dict(), './PPO_20241105_{}.pth'.format(env_name))
break
# logging
if i_episode % log_interval == 0:
avg_length = int(avg_length/log_interval)
running_reward = int((running_reward/log_interval))
print('Episode {} \t avg length: {} \t reward: {}'.format(i_episode, avg_length, running_reward))
running_reward = 0
avg_length = 0
模型训练结果:
模型测试结果:
代码来自bi站学习下载,需要源码的朋友可以关注我或者评论,我会回复和发送源码,欢迎学习交流。
邀请人:升级打怪的菜鸟
原创声明:本文系作者授权腾讯云开发者社区发表,未经许可,不得转载。
如有侵权,请联系 cloudcommunity@tencent.com 删除。
原创声明:本文系作者授权腾讯云开发者社区发表,未经许可,不得转载。
如有侵权,请联系 cloudcommunity@tencent.com 删除。
扫码关注腾讯云开发者
领取腾讯云代金券
Copyright © 2013 - 2025 Tencent Cloud. All Rights Reserved. 腾讯云 版权所有
深圳市腾讯计算机系统有限公司 ICP备案/许可证号:粤B2-20090059 深公网安备号 44030502008569
腾讯云计算(北京)有限责任公司 京ICP证150476号 | 京ICP备11018762号 | 京公网安备号11010802020287
Copyright © 2013 - 2025 Tencent Cloud.
All Rights Reserved. 腾讯云 版权所有