-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy patha3c_for_kung_fu_master.py
255 lines (209 loc) · 8.01 KB
/
a3c_for_kung_fu_master.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
# A3C for Kung Fu
# Part 0 - Installing the required packages and importing the libraries
# Installing Gymnasium
# !pip install gymnasium
# !pip install "gymnasium[atari, accept-rom-license]"
# !apt-get install -y swig
# !pip install gymnasium[box2d]
# Importing the libraries
import cv2
import math
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.multiprocessing as mp
import torch.distributions as distributions
from torch.distributions import Categorical
import gymnasium as gym
from gymnasium import ObservationWrapper
from gymnasium.spaces import Box
# Part 1 - Building the AI
# Creating the architecture of the Neural Network
class Network(nn.Module):
def __init__(self, action_size):
super(Network, self).__init__()
self.conv1 = torch.nn.Conv2d(in_channels = 4, out_channels = 32, kernel_size = (3,3), stride = 2)
self.conv2 = torch.nn.Conv2d(in_channels = 32, out_channels = 32, kernel_size = (3,3), stride = 2)
self.conv3 = torch.nn.Conv2d(in_channels = 32, out_channels = 32, kernel_size = (3,3), stride = 2)
self.flatten = torch.nn.Flatten()
self.fc1 = torch.nn.Linear(512, 128)
self.fc2a = torch.nn.Linear(128, action_size)
self.fc2s = torch.nn.Linear(128, 1)
def forward(self, state):
x = self.conv1(state)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = self.conv3(x)
x = F.relu(x)
x = self.flatten(x)
x = self.fc1(x)
x = F.relu(x)
action_values = self.fc2a(x)
state_value = self.fc2s(x)[0]
return action_values, state_value
# Part 2 - Training the AI
# Setting up the environment
class PreprocessAtari(ObservationWrapper):
def __init__(self, env, height = 42, width = 42, crop = lambda img: img, dim_order = 'pytorch', color = False, n_frames = 4):
super(PreprocessAtari, self).__init__(env)
self.img_size = (height, width)
self.crop = crop
self.dim_order = dim_order
self.color = color
self.frame_stack = n_frames
n_channels = 3 * n_frames if color else n_frames
obs_shape = {'tensorflow': (height, width, n_channels), 'pytorch': (n_channels, height, width)}[dim_order]
self.observation_space = Box(0.0, 1.0, obs_shape)
self.frames = np.zeros(obs_shape, dtype = np.float32)
def reset(self):
self.frames = np.zeros_like(self.frames)
obs, info = self.env.reset()
self.update_buffer(obs)
return self.frames, info
def observation(self, img):
img = self.crop(img)
img = cv2.resize(img, self.img_size)
if not self.color:
if len(img.shape) == 3 and img.shape[2] == 3:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = img.astype('float32') / 255.
if self.color:
self.frames = np.roll(self.frames, shift = -3, axis = 0)
else:
self.frames = np.roll(self.frames, shift = -1, axis = 0)
if self.color:
self.frames[-3:] = img
else:
self.frames[-1] = img
return self.frames
def update_buffer(self, obs):
self.frames = self.observation(obs)
def make_env():
env = gym.make("KungFuMasterDeterministic-v0", render_mode = 'rgb_array')
env = PreprocessAtari(env, height = 42, width = 42, crop = lambda img: img, dim_order = 'pytorch', color = False, n_frames = 4)
return env
env = make_env()
state_shape = env.observation_space.shape
number_actions = env.action_space.n
print("State shape:", state_shape)
print("Number actions:", number_actions)
print("Action names:", env.env.env.get_action_meanings())
# Initializing the hyperparameters
learning_rate = 1e-4
discount_factor = 0.99
number_environments = 10
# Implementing the A3C class
class Agent():
def __init__(self, action_size):
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.action_size = action_size
self.network = Network(action_size).to(self.device)
self.optimizer = torch.optim.Adam(self.network.parameters(), lr = learning_rate)
def act(self, state):
if state.ndim == 3:
state = [state]
state = torch.tensor(state, dtype = torch.float32, device = self.device)
action_values, _ = self.network(state)
policy = F.softmax(action_values, dim = -1)
return np.array([np.random.choice(len(p), p = p) for p in policy.detach().cpu().numpy()])
def step(self, state, action, reward, next_state, done):
batch_size = state.shape[0]
state = torch.tensor(state, dtype = torch.float32, device = self.device)
next_state = torch.tensor(next_state, dtype = torch.float32, device = self.device)
reward = torch.tensor(reward, dtype = torch.float32, device = self.device)
done = torch.tensor(done, dtype = torch.bool, device = self.device).to(dtype = torch.float32)
action_values, state_value = self.network(state)
_, next_state_value = self.network(next_state)
target_state_value = reward + discount_factor * next_state_value * (1 - done)
advantage = target_state_value - state_value
probs = F.softmax(action_values, dim = -1)
logprobs = F.log_softmax(action_values, dim = -1)
entropy = -torch.sum(probs * logprobs, axis = -1)
batch_idx = np.arange(batch_size)
logp_actions = logprobs[batch_idx, action]
actor_loss = -(logp_actions * advantage.detach()).mean() - 0.001 * entropy.mean()
critic_loss = F.mse_loss(target_state_value.detach(), state_value)
total_loss = actor_loss + critic_loss
self.optimizer.zero_grad()
total_loss.backward()
self.optimizer.step()
# Initializing the A3C agent
agent = Agent(number_actions)
# Evaluating our A3C agent on a certain number of episodes
def evaluate(agent, env, n_episodes = 1):
episodes_rewards = []
for _ in range(n_episodes):
state, _ = env.reset()
total_reward = 0
while True:
action = agent.act(state)
state, reward, done, info, _ = env.step(action[0])
total_reward += reward
if done:
break
episodes_rewards.append(total_reward)
return episodes_rewards
# Managing multiple environments simultaneously
class EnvBatch:
def __init__(self, n_envs = 10):
self.envs = [make_env() for _ in range(n_envs)]
def reset(self):
_states = []
for env in self.envs:
_states.append(env.reset()[0])
return np.array(_states)
def step(self, actions):
next_states, rewards, dones, infos, _ = map(np.array, zip(*[env.step(a) for env, a in zip(self.envs, actions)]))
for i in range(len(self.envs)):
if dones[i]:
next_states[i] = self.envs[i].reset()[0]
return next_states, rewards, dones, infos
# Training the A3C agent
import tqdm
env_batch = EnvBatch(number_environments)
batch_states = env_batch.reset()
with tqdm.trange(0, 3001) as progress_bar:
for i in progress_bar:
batch_actions = agent.act(batch_states)
batch_next_states, batch_rewards, batch_dones, _ = env_batch.step(batch_actions)
batch_rewards *= 0.01
agent.step(batch_states, batch_actions, batch_rewards, batch_next_states, batch_dones)
batch_states = batch_next_states
if i % 1000 == 0:
print("Average agent reward: ", np.mean(evaluate(agent, env, n_episodes = 10)))
# Part 3 - Visualizing the results
import glob
import io
import base64
import imageio
from IPython.display import HTML, display
from gymnasium.wrappers.monitoring.video_recorder import VideoRecorder
def show_video_of_model(agent, env):
state, _ = env.reset()
done = False
frames = []
while not done:
frame = env.render()
frames.append(frame)
action = agent.act(state)
state, reward, done, _, _ = env.step(action[0])
env.close()
imageio.mimsave('video.mp4', frames, fps=30)
show_video_of_model(agent, env)
def show_video():
mp4list = glob.glob('*.mp4')
if len(mp4list) > 0:
mp4 = mp4list[0]
video = io.open(mp4, 'r+b').read()
encoded = base64.b64encode(video)
display(HTML(data='''<video alt="test" autoplay
loop controls style="height: 400px;">
<source src="data:video/mp4;base64,{0}" type="video/mp4" />
</video>'''.format(encoded.decode('ascii'))))
else:
print("Could not find video")
show_video()