Skip to content

Commit

Permalink
Ch18 (rasbt#26)
Browse files Browse the repository at this point in the history
* Added ch18 codes: gridworld example

* Added ch18 codes: cartpole example
  • Loading branch information
vmirly authored Nov 2, 2019
1 parent 463a164 commit 3d4d034
Show file tree
Hide file tree
Showing 4 changed files with 488 additions and 0 deletions.
154 changes: 154 additions & 0 deletions ch18/cartpole/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,154 @@
import gym
import numpy as np
import tensorflow as tf
import random
from collections import namedtuple
from collections import deque

np.random.seed(1)
tf.random.set_seed(1)

Transition = namedtuple(
'Transition', ('state', 'action', 'reward',
'next_state', 'done'))


class DQNAgent:
def __init__(
self, env, discount_factor=0.95,
epsilon_greedy=1.0, epsilon_min=0.01,
epsilon_decay=0.995, learning_rate=1e-3,
max_memory_size=2000):
self.enf = env
self.state_size = env.observation_space.shape[0]
self.action_size = env.action_space.n

self.memory = deque(maxlen=max_memory_size)

self.gamma = discount_factor
self.epsilon = epsilon_greedy
self.epsilon_min = epsilon_min
self.epsilon_decay = epsilon_decay
self.lr = learning_rate
self._build_nn_model()

def _build_nn_model(self, n_layers=3):
self.model = tf.keras.Sequential()

## Hidden layers
for n in range(n_layers - 1):
self.model.add(tf.keras.layers.Dense(
units=32, activation='relu'))
self.model.add(tf.keras.layers.Dense(
units=32, activation='relu'))

## Last layer
self.model.add(tf.keras.layers.Dense(
units=self.action_size))

## Build & compile model
self.model.build(input_shape=(None, self.state_size))
self.model.compile(
loss='mse',
optimizer=tf.keras.optimizers.Adam(lr=self.lr))

def remember(self, transition):
self.memory.append(transition)

def choose_action(self, state):
if np.random.rand() <= self.epsilon:
return random.randrange(self.action_size)
q_values = self.model.predict(state)[0]
return np.argmax(q_values) # returns action

def _learn(self, batch_samples):
batch_states, batch_targets = [], []
for transition in batch_samples:
s, a, r, next_s, done = transition
if done:
target = r
else:
target = (r +
self.gamma * np.amax(
self.model.predict(next_s)[0]
)
)
target_all = self.model.predict(s)[0]
target_all[a] = target
batch_states.append(s.flatten())
batch_targets.append(target_all)
self._adjust_epsilon()
return self.model.fit(x=np.array(batch_states),
y=np.array(batch_targets),
epochs=1,
verbose=0)

def _adjust_epsilon(self):
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay

def replay(self, batch_size):
samples = random.sample(self.memory, batch_size)
history = self._learn(samples)
return history.history['loss'][0]

def plot_learning_history(history):
fig = plt.figure(1, figsize=(14, 5))
ax = fig.add_subplot(1, 1, 1)
episodes = np.arange(len(history[0])) + 1
plt.plot(episodes, history[0], lw=4,
marker='o', markersize=10)
ax.tick_params(axis='both', which='major', labelsize=15)
plt.xlabel('Episodes', size=20)
plt.ylabel('# Total Rewards', size=20)
plt.show()


## General settings
EPISODES = 200
batch_size = 32
init_replay_memory_size = 500

if __name__ == '__main__':
env = gym.make('CartPole-v1')
agent = DQNAgent(env)
state = env.reset()
state = np.reshape(state, [1, agent.state_size])

## Filling up the replay-memory
for i in range(init_replay_memory_size):
action = agent.choose_action(state)
next_state, reward, done, _ = env.step(action)
next_state = np.reshape(next_state, [1, agent.state_size])
agent.remember(Transition(state, action, reward,
next_state, done))
if done:
state = env.reset()
state = np.reshape(state, [1, agent.state_size])
else:
state = next_state

total_rewards, losses = [], []
for e in range(EPISODES):
state = env.reset()
if e % 10 == 0:
env.render()
state = np.reshape(state, [1, agent.state_size])
for i in range(500):
action = agent.choose_action(state)
next_state, reward, done, _ = env.step(action)
next_state = np.reshape(next_state,
[1, agent.state_size])
agent.remember(Transition(state, action, reward,
next_state, done))
state = next_state
if e % 10 == 0:
env.render()
if done:
total_rewards.append(i)
print('Episode: %d/%d, Total reward: %d'
% (e, EPISODES, i))
break
loss = agent.replay(batch_size)
losses.append(loss)
plot_learning_history((total_rewards, losses))
52 changes: 52 additions & 0 deletions ch18/gridworld/agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
## Script: agent.py

from collections import defaultdict
import numpy as np


class Agent(object):
def __init__(
self, env,
learning_rate=0.01,
discount_factor=0.9,
epsilon_greedy=0.9,
epsilon_min=0.1,
epsilon_decay=0.95):
self.env = env
self.lr = learning_rate
self.gamma = discount_factor
self.epsilon = epsilon_greedy
self.epsilon_min = epsilon_min
self.epsilon_decay = epsilon_decay

## Define the q_table
self.q_table = defaultdict(lambda: np.zeros(self.env.nA))

def choose_action(self, state):
if np.random.uniform() < self.epsilon:
action = np.random.choice(self.env.nA)
else:
q_vals = self.q_table[state]
perm_actions = np.random.permutation(self.env.nA)
q_vals = [q_vals[a] for a in perm_actions]
perm_q_argmax = np.argmax(q_vals)
action = perm_actions[perm_q_argmax]
return action

def _learn(self, transition):
s, a, r, next_s, done = transition
q_val = self.q_table[s][a]
if done:
q_target = r
else:
q_target = r + self.gamma*np.max(self.q_table[next_s])

## Update the q_table
self.q_table[s][a] += self.lr * (q_target - q_val)

## Adjust the epislon
self._adjust_epsilon()

def _adjust_epsilon(self):
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
Loading

0 comments on commit 3d4d034

Please sign in to comment.