forked from StanfordVL/OmniGibson
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest_robot_states_flatcache.py
325 lines (264 loc) · 12.7 KB
/
test_robot_states_flatcache.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
import torch as th
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.action_primitives.starter_semantic_action_primitives import StarterSemanticActionPrimitives
from omnigibson.macros import gm
from omnigibson.robots import *
from omnigibson.sensors import VisionSensor
from omnigibson.utils.transform_utils import mat2pose, pose2mat, quaternions_close, relative_pose_transform
from omnigibson.utils.usd_utils import PoseAPI
def setup_environment(flatcache):
"""
Sets up the environment with or without flatcache based on the flatcache parameter.
"""
if og.sim is None:
# Set global flags
gm.ENABLE_OBJECT_STATES = True
gm.USE_GPU_DYNAMICS = True
gm.ENABLE_FLATCACHE = flatcache # Set based on function parameter
gm.ENABLE_TRANSITION_RULES = False
else:
# Make sure sim is stopped
og.sim.stop()
# Define the environment configuration
config = {
"scene": {
"type": "Scene",
},
"robots": [
{
"type": "Fetch",
"obs_modalities": ["rgb", "seg_semantic", "seg_instance"],
"position": [150, 150, 100],
"orientation": [0, 0, 0, 1],
}
],
}
env = og.Environment(configs=config)
return env
def camera_pose_test(flatcache):
env = setup_environment(flatcache)
robot = env.robots[0]
env.reset()
og.sim.step()
sensors = [s for s in robot.sensors.values() if isinstance(s, VisionSensor)]
assert len(sensors) > 0
vision_sensor = sensors[0]
# Get vision sensor world pose via directly calling get_position_orientation
robot_world_pos, robot_world_ori = robot.get_position_orientation()
sensor_world_pos, sensor_world_ori = vision_sensor.get_position_orientation()
robot_to_sensor_mat = pose2mat(
relative_pose_transform(sensor_world_pos, sensor_world_ori, robot_world_pos, robot_world_ori)
)
sensor_world_pos_gt = th.tensor([150.1620, 149.9999, 101.2193])
sensor_world_ori_gt = th.tensor([-0.2952, 0.2959, 0.6427, -0.6421])
assert th.allclose(sensor_world_pos, sensor_world_pos_gt, atol=1e-3)
assert quaternions_close(sensor_world_ori, sensor_world_ori_gt, atol=1e-3)
# Now, we want to move the robot and check if the sensor pose has been updated
old_camera_local_pose = vision_sensor.get_position_orientation(frame="parent")
robot.set_position_orientation(position=[100, 100, 100])
new_camera_local_pose = vision_sensor.get_position_orientation(frame="parent")
new_camera_world_pose = vision_sensor.get_position_orientation()
robot_pose_mat = pose2mat(robot.get_position_orientation())
expected_camera_world_pos, expected_camera_world_ori = mat2pose(robot_pose_mat @ robot_to_sensor_mat)
assert th.allclose(old_camera_local_pose[0], new_camera_local_pose[0], atol=1e-3)
assert th.allclose(new_camera_world_pose[0], expected_camera_world_pos, atol=1e-3)
assert quaternions_close(new_camera_world_pose[1], expected_camera_world_ori, atol=1e-3)
# Then, we want to move the local pose of the camera and check
# 1) if the world pose is updated 2) if the robot stays in the same position
old_camera_local_pose = vision_sensor.get_position_orientation(frame="parent")
vision_sensor.set_position_orientation(position=[10, 10, 10], orientation=[0, 0, 0, 1], frame="parent")
new_camera_world_pose = vision_sensor.get_position_orientation()
camera_parent_prim = lazy.omni.isaac.core.utils.prims.get_prim_parent(vision_sensor.prim)
camera_parent_path = str(camera_parent_prim.GetPath())
camera_parent_world_transform = PoseAPI.get_world_pose_with_scale(camera_parent_path)
expected_new_camera_world_pos, expected_new_camera_world_ori = mat2pose(
camera_parent_world_transform
@ pose2mat((th.tensor([10, 10, 10], dtype=th.float32), th.tensor([0, 0, 0, 1], dtype=th.float32)))
)
assert th.allclose(new_camera_world_pose[0], expected_new_camera_world_pos, atol=1e-3)
assert quaternions_close(new_camera_world_pose[1], expected_new_camera_world_ori, atol=1e-3)
assert th.allclose(robot.get_position_orientation()[0], th.tensor([100, 100, 100], dtype=th.float32), atol=1e-3)
# Finally, we want to move the world pose of the camera and check
# 1) if the local pose is updated 2) if the robot stays in the same position
robot.set_position_orientation(position=[150, 150, 100])
old_camera_local_pose = vision_sensor.get_position_orientation(frame="parent")
vision_sensor.set_position_orientation(
position=[150, 150, 101.36912537], orientation=[-0.29444987, 0.29444981, 0.64288363, -0.64288352]
)
new_camera_local_pose = vision_sensor.get_position_orientation(frame="parent")
assert not th.allclose(old_camera_local_pose[0], new_camera_local_pose[0], atol=1e-3)
assert not quaternions_close(old_camera_local_pose[1], new_camera_local_pose[1], atol=1e-3)
assert th.allclose(robot.get_position_orientation()[0], th.tensor([150, 150, 100], dtype=th.float32), atol=1e-3)
# Another test we want to try is setting the camera's parent scale and check if the world pose is updated
camera_parent_prim.GetAttribute("xformOp:scale").Set(lazy.pxr.Gf.Vec3d([2.0, 2.0, 2.0]))
camera_parent_world_transform = PoseAPI.get_world_pose_with_scale(camera_parent_path)
camera_local_pose = vision_sensor.get_position_orientation(frame="parent")
expected_mat = camera_parent_world_transform @ pose2mat(camera_local_pose)
expected_mat[:3, :3] = expected_mat[:3, :3] / th.norm(expected_mat[:3, :3], dim=0, keepdim=True)
expected_new_camera_world_pos, _ = mat2pose(expected_mat)
new_camera_world_pose = vision_sensor.get_position_orientation()
assert th.allclose(new_camera_world_pose[0], expected_new_camera_world_pos, atol=1e-3)
og.clear()
def test_camera_pose_flatcache_on():
camera_pose_test(True)
def test_robot_load_drive():
if og.sim is None:
# Set global flags
gm.ENABLE_OBJECT_STATES = True
gm.USE_GPU_DYNAMICS = True
gm.ENABLE_TRANSITION_RULES = False
else:
# Make sure sim is stopped
og.sim.stop()
config = {
"scene": {
"type": "Scene",
},
}
env = og.Environment(configs=config)
og.sim.stop()
# Iterate over all robots and test their motion
for robot_name, robot_cls in REGISTERED_ROBOTS.items():
if robot_name in ["FrankaMounted", "Stretch"]:
# TODO: skipping FrankaMounted and Stretch for now because CI doesn't have the required assets
continue
if robot_name in ["Husky", "BehaviorRobot"]:
# Husky base motion is a little messed up because of the 4-wheel drive; skipping for now
# BehaviorRobot does not work with the primitive actions at the moment
continue
robot = robot_cls(
name=robot_name,
obs_modalities=[],
)
env.scene.add_object(robot)
# At least one step is always needed while sim is playing for any imported object to be fully initialized
og.sim.play()
# Reset robot and make sure it's not moving
robot.reset()
robot.keep_still()
og.sim.step()
# Set viewer in front facing robot
og.sim.viewer_camera.set_position_orientation(
position=[2.69918369, -3.63686664, 4.57894564],
orientation=[0.39592411, 0.1348514, 0.29286304, 0.85982],
)
# If this is a manipulation robot, we want to test moving the arm
if isinstance(robot, ManipulationRobot):
# load IK controller
controller_config = {
f"arm_{robot.default_arm}": {"name": "InverseKinematicsController", "mode": "pose_absolute_ori"}
}
robot.reload_controllers(controller_config=controller_config)
env.scene.update_initial_state()
action_primitives = StarterSemanticActionPrimitives(env)
eef_pos = env.robots[0].get_eef_position()
eef_orn = env.robots[0].get_eef_orientation()
if isinstance(robot, Stretch): # Stretch arm faces the y-axis
target_eef_pos = th.tensor([eef_pos[0], eef_pos[1] - 0.1, eef_pos[2]], dtype=th.float32)
else:
target_eef_pos = th.tensor([eef_pos[0] + 0.1, eef_pos[1], eef_pos[2]], dtype=th.float32)
target_eef_orn = eef_orn
for action in action_primitives._move_hand_direct_ik((target_eef_pos, target_eef_orn)):
env.step(action)
assert th.norm(robot.get_eef_position() - target_eef_pos) < 0.05
# If this is a locomotion robot, we want to test driving
if isinstance(robot, LocomotionRobot):
action_primitives = StarterSemanticActionPrimitives(env)
goal_location = th.tensor([0, 1, 0], dtype=th.float32)
for action in action_primitives._navigate_to_pose_direct(goal_location):
env.step(action)
assert th.norm(robot.get_position()[:2] - goal_location[:2]) < 0.1
# Stop the simulator and remove the robot
og.sim.stop()
env.scene.remove_object(obj=robot)
og.clear()
def test_grasping_mode():
if og.sim is None:
# Set global flags
gm.ENABLE_FLATCACHE = True
else:
# Make sure sim is stopped
og.sim.stop()
scene_cfg = dict(type="Scene")
objects_cfg = []
objects_cfg.append(
dict(
type="DatasetObject",
name=f"table",
category="breakfast_table",
model="lcsizg",
bounding_box=[0.5, 0.5, 0.8],
fixed_base=True,
position=[0.7, -0.1, 0.6],
)
)
objects_cfg.append(
dict(
type="PrimitiveObject",
name=f"box",
primitive_type="Cube",
rgba=[1.0, 0, 0, 1.0],
size=0.05,
position=[0.53, 0.0, 0.87],
)
)
cfg = dict(scene=scene_cfg, objects=objects_cfg)
env = og.Environment(configs=cfg)
og.sim.viewer_camera.set_position_orientation(
position=[1.0170, 0.5663, 1.0554],
orientation=[0.1734, 0.5006, 0.8015, 0.2776],
)
og.sim.stop()
grasping_modes = dict(
sticky="Sticky Mitten - Objects are magnetized when they touch the fingers and a CLOSE command is given",
assisted="Assisted Grasping - Objects are magnetized when they touch the fingers, are within the hand, and a CLOSE command is given",
physical="Physical Grasping - No additional grasping assistance applied",
)
def object_is_in_hand(robot, obj):
eef_position = robot.get_eef_position()
obj_position = obj.get_position_orientation()[0]
return th.norm(eef_position - obj_position) < 0.05
for grasping_mode in grasping_modes:
robot = Fetch(
name="Fetch",
obs_modalities=[],
controller_config={f"arm_0": {"name": "InverseKinematicsController", "mode": "pose_absolute_ori"}},
grasping_mode=grasping_mode,
)
env.scene.add_object(robot)
# At least one step is always needed while sim is playing for any imported object to be fully initialized
og.sim.play()
env.scene.reset()
# Reset robot and make sure it's not moving
robot.reset()
robot.keep_still()
# Let the box settle
for _ in range(10):
og.sim.step()
action_primitives = StarterSemanticActionPrimitives(env)
box_object = env.scene.object_registry("name", "box")
target_eef_pos = box_object.get_position_orientation()[0]
target_eef_orn = robot.get_eef_orientation()
# Move eef to the box
for action in action_primitives._move_hand_direct_ik((target_eef_pos, target_eef_orn), pos_thresh=0.01):
env.step(action)
# Grasp the box
for action in action_primitives._execute_grasp():
env.step(action)
assert object_is_in_hand(robot, box_object), f"Grasping mode {grasping_mode} failed to grasp the object"
# Move eef
eef_offset = th.tensor([0.0, 0.2, 0.2])
for action in action_primitives._move_hand_direct_ik((target_eef_pos + eef_offset, target_eef_orn)):
env.step(action)
assert object_is_in_hand(robot, box_object), f"Grasping mode {grasping_mode} failed to keep the object in hand"
# Release the box
for action in action_primitives._execute_release():
env.step(action)
for _ in range(10):
og.sim.step()
assert not object_is_in_hand(robot, box_object), f"Grasping mode {grasping_mode} failed to release the object"
# Stop the simulator and remove the robot
og.sim.stop()
env.scene.remove_object(obj=robot)
og.clear()