-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathCritic.py
61 lines (44 loc) · 2.32 KB
/
Critic.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
from keras import layers, models, optimizers
from keras import backend as K
class Critic:
"""Critic (Value) Model."""
def __init__(self, state_size, action_size):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
"""
self.state_size = state_size
self.action_size = action_size
# Initialize any other variables here
self.build_model()
def build_model(self):
"""Build a critic (value) network that maps (state, action) pairs -> Q-values."""
# Define input layers
states = layers.Input(shape=(self.state_size,), name='states')
actions = layers.Input(shape=(self.action_size,), name='actions')
# Add hidden layer(s) for state pathway
net_states = layers.Dense(units=32, activation='relu')(states)
net_states = layers.Dense(units=64, activation='relu')(net_states)
# Add hidden layer(s) for action pathway
net_actions = layers.Dense(units=32, activation='relu')(actions)
net_actions = layers.Dense(units=64, activation='relu')(net_actions)
# Try different layer sizes, activations, add batch normalization, regularizers, etc.
# Combine state and action pathways
net = layers.Add()([net_states, net_actions])
net = layers.Activation('relu')(net)
# Add more layers to the combined network if needed
# Add final output layer to prduce action values (Q values)
Q_values = layers.Dense(units=1, name='q_values')(net)
# Create Keras model
self.model = models.Model(inputs=[states, actions], outputs=Q_values)
# Define optimizer and compile model for training with built-in loss function
optimizer = optimizers.Adam()
self.model.compile(optimizer=optimizer, loss='mse')
# Compute action gradients (derivative of Q values w.r.t. to actions)
action_gradients = K.gradients(Q_values, actions)
# Define an additional function to fetch action gradients (to be used by actor model)
self.get_action_gradients = K.function(
inputs=[*self.model.input, K.learning_phase()],
outputs=action_gradients)