-
Notifications
You must be signed in to change notification settings - Fork 1
/
arguments.py
82 lines (73 loc) · 4.57 KB
/
arguments.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import argparse
import torch
def get_args():
parser = argparse.ArgumentParser(description='RL')
parser.add_argument('--algo', default='a2c',
help='algorithm to use: a2c | ppo | acktr')
parser.add_argument('--lr', type=float, default=1e-4,
help='learning rate (default: 1e-4)')
parser.add_argument('--eps', type=float, default=1e-5,
help='RMSprop optimizer epsilon (default: 1e-5)')
parser.add_argument('--alpha', type=float, default=0.99,
help='RMSprop optimizer apha (default: 0.99)')
parser.add_argument('--gamma', type=float, default=0.99,
help='discount factor for rewards (default: 0.99)')
parser.add_argument('--use-gae', action='store_true', default=False,
help='use generalized advantage estimation')
parser.add_argument('--tau', type=float, default=0.95,
help='gae parameter (default: 0.95)')
parser.add_argument('--entropy-coef', type=float, default=0.01,
help='entropy term coefficient (default: 0.01)')
parser.add_argument('--value-loss-coef', type=float, default=0.5,
help='value loss coefficient (default: 0.5)')
parser.add_argument('--max-grad-norm', type=float, default=0.5,
help='max norm of gradients (default: 0.5)')
parser.add_argument('--seed', type=int, default=1,
help='random seed (default: 1)')
parser.add_argument('--num-processes', type=int, default=8,
help='how many training CPU processes to use (default: 8)')
parser.add_argument('--num-steps', type=int, default=5,
help='number of forward steps in A2C (default: 5)')
parser.add_argument('--ppo-epoch', type=int, default=4,
help='number of ppo epochs (default: 4)')
parser.add_argument('--num-mini-batch', type=int, default=32,
help='number of batches for ppo (default: 32)')
parser.add_argument('--clip-param', type=float, default=0.2,
help='ppo clip parameter (default: 0.2)')
parser.add_argument('--num-stack', type=int, default=4,
help='number of frames to stack (default: 4)')
parser.add_argument('--log-interval', type=int, default=10,
help='log interval, one log per n updates (default: 10)')
parser.add_argument('--save-interval', type=int, default=100*3,
help='save interval, one save per n updates (default: 10)')
parser.add_argument('--vis-interval', type=int, default=100,
help='vis interval, one log per n updates (default: 100)')
parser.add_argument('--num-frames', type=int, default=100*1e6,
help='number of frames to train (default: 10e6)')
parser.add_argument('--env-name', nargs='*', default=['PongNoFrameskip-v4'],
help='environment to train on (default: PongNoFrameskip-v4)')
parser.add_argument('--log-dir', default='./tmp/gym/',
help='directory to save agent logs (default: /tmp/gym)')
parser.add_argument('--save-dir', default='./trained_models/',
help='directory to save agent logs (default: ./trained_models/)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--recurrent-policy', action='store_true', default=False,
help='use a recurrent policy')
parser.add_argument('--att', help="Attention policy", choices=['spatial', 'temporal', 'None'], default='None')
parser.add_argument('--no-vis', action='store_true', default=True,
help='disables visdom visualization')
parser.add_argument('--load_model', default=None,
help='directory to save agent logs (default: None)')
parser.add_argument('--transfer', action='store_true', default=False,
help='Transfer policy')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
args.vis = not args.no_vis
print(args.vis)
if(args.att):
print("###########################################################################\n")
print("REMEMBER TO SET LEARNING RATE TO 1e-4 SINCE YOU ARE USING ATTENTION NETWORK\n")
print("###########################################################################\n")
print(args.lr," <-------> LEARNING RATE ")
return args