forked from martius-lab/depRL
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
added more example files for pure dep exploration
- Loading branch information
1 parent
2ba76ff
commit 62f04b3
Showing
3 changed files
with
75 additions
and
1 deletion.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,25 @@ | ||
import gym | ||
import myosuite | ||
import time | ||
from deprl import env_wrappers | ||
from deprl.dep_controller import DEP | ||
|
||
|
||
|
||
env = gym.make('myoLegWalk-v0') | ||
env = env_wrappers.GymWrapper(env) | ||
|
||
# You can also use SconeWrapper for Wrapper | ||
# env = env_wrappers.SconeWrapper(env) | ||
dep = DEP() | ||
dep.initialize(env.observation_space, env.action_space) | ||
|
||
env.reset() | ||
for i in range(1000): | ||
action = dep.step(env.muscle_lengths())[0,:] | ||
print(action.shape) | ||
next_state, reward, done, _ = env.step(action) | ||
time.sleep(0.01) | ||
env.mj_render() | ||
|
||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,49 @@ | ||
import gym | ||
import sconegym | ||
from deprl import env_wrappers | ||
from deprl.dep_controller import DEP | ||
|
||
|
||
# create the sconegym env | ||
env = gym.make('sconewalk_h2190-v1') | ||
|
||
# apply wrapper to environment | ||
env = env_wrappers.SconeWrapper(env) | ||
|
||
# create DEP, parameters are loaded from default path | ||
dep = DEP() | ||
|
||
# give DEP obs and action space to create right dimensions | ||
dep.initialize(env.observation_space, env.action_space) | ||
|
||
env.seed(0) | ||
|
||
for ep in range(5): | ||
if ep % 1 == 0: | ||
env.store_next_episode() # Store results of every Nth episode | ||
|
||
ep_steps = 0 | ||
ep_tot_reward = 0 | ||
state = env.reset() | ||
|
||
while True: | ||
# samples random action | ||
action = dep.step(env.muscle_lengths())[0,:] | ||
# applies action and advances environment by one step | ||
state, reward, done, info = env.step(action) | ||
|
||
ep_steps += 1 | ||
ep_tot_reward += reward | ||
|
||
# check if done | ||
if done or (ep_steps >= 1000): | ||
print( | ||
f"Episode {ep} ending; steps={ep_steps}; reward={ep_tot_reward:0.3f}; \ | ||
com={env.model.com_pos()}" | ||
) | ||
env.write_now() | ||
env.reset() | ||
break | ||
|
||
env.close() | ||
|