-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathrun.py
60 lines (54 loc) · 3.68 KB
/
run.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import ray
import numpy as np
import random
import gym
from collections import deque
import torch
import time
from torch.utils.tensorboard import SummaryWriter
import argparse
import json
from scripts.training import train, test
parser = argparse.ArgumentParser(description="")
parser.add_argument("--env", type=str,default="Pendulum-v0", help="Environment name, default = Pendulum-v0")
parser.add_argument("--nstep", type=int, default=1, help ="Nstep bootstrapping, default 1")
parser.add_argument("--per", type=int, default=0, choices=[0,1], help="Adding Priorizied Experience Replay to the agent if set to 1, default = 0")
parser.add_argument("--munchausen", type=int, default=0, choices=[0,1], help="Adding Munchausen RL to the agent if set to 1, default = 0")
parser.add_argument("--iqn", type=int, choices=[0,1], default=0, help="Use distributional IQN Critic if set to 1, default = 1")
parser.add_argument("--noise", type=str, choices=["ou", "gauss"], default="OU", help="Choose noise type: ou = OU-Noise, gauss = Gaussian noise, default ou")
parser.add_argument("--info", type=str, help="Information or name of the run")
parser.add_argument("--device", type=str, default="cpu", help="Training device, default= cpu")
parser.add_argument("--d2rl", type=int, choices=[0,1], default=0, help="Uses Deep Actor and Deep Critic Networks if set to 1 as described in the D2RL Paper: https://arxiv.org/pdf/2010.09163.pdf, default=0")
parser.add_argument("--frames", type=int, default=1000000, help="The amount of training interactions with the environment, default is 1000000")
parser.add_argument("--training_steps", type=int, default=3000, help="Numnber of backprop steps, default=10000")
parser.add_argument("--seed", type=int, default=0, help="Seed for the env and torch network weights, default is 0")
parser.add_argument("--lr_a", type=float, default=5e-4, help="Actor learning rate of adapting the network weights, default is 5e-4")
parser.add_argument("--lr_c", type=float, default=5e-4, help="Critic learning rate of adapting the network weights, default is 5e-4")
parser.add_argument("--layer_size", type=int, default=256, help="Number of nodes per neural network layer, default is 256")
parser.add_argument("-repm", "--replay_memory", type=int, default=int(1e5), help="Size of the Replay memory, default is 1e5")
parser.add_argument("-bs", "--batch_size", type=int, default=256, help="Batch size (!! will be multiplied by the number of workers!!), default is 256")
parser.add_argument("-t", "--tau", type=float, default=1e-2, help="Softupdate factor tau, default is 1e-3") #for per 1e-2 for regular 1e-3 -> Pendulum!
parser.add_argument("-g", "--gamma", type=float, default=0.99, help="discount factor gamma, default is 0.99")
parser.add_argument("--test", type=str, default=None, help="Load a saved model to perform a test run!")
parser.add_argument("--worker_number", type=int, default=1, help="Number of parallel Worker to gather experience, default = 4")
parser.add_argument("--checkpoint_interval", type=int, default=10, help="Number of Network Updates befor next Evaluation run, default 10")
args = parser.parse_args()
def timer(start,end):
""" Helper to print training time """
hours, rem = divmod(end-start, 3600)
minutes, seconds = divmod(rem, 60)
print("\nTraining Time: {:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds))
if __name__ == "__main__":
# if training
if args.test == None:
ray.init()
writer = SummaryWriter("runs/"+args.info)
t0 = time.time()
trained_model = train(args, writer)
t1 = time.time()
time.sleep(1.5)
timer(t0, t1)
# save model
torch.save(trained_model, args.info+".pth")
else:
test(args, args.test)