forked from StanfordVL/OmniGibson
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest_robot_states_no_flatcache.py
44 lines (37 loc) · 1.58 KB
/
test_robot_states_no_flatcache.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
import torch as th
from test_robot_states_flatcache import camera_pose_test, setup_environment
import omnigibson as og
from omnigibson.object_states import ObjectsInFOVOfRobot
from omnigibson.sensors import VisionSensor
from omnigibson.utils.constants import semantic_class_name_to_id
def test_camera_pose_flatcache_off():
camera_pose_test(False)
def test_camera_semantic_segmentation():
env = setup_environment(False)
robot = env.robots[0]
env.reset()
sensors = [s for s in robot.sensors.values() if isinstance(s, VisionSensor)]
assert len(sensors) > 0
vision_sensor = sensors[0]
env.reset()
all_observation, all_info = vision_sensor.get_obs()
seg_semantic = all_observation["seg_semantic"]
seg_semantic_info = all_info["seg_semantic"]
agent_label = semantic_class_name_to_id()["agent"]
background_label = semantic_class_name_to_id()["background"]
assert th.all(th.isin(seg_semantic, th.tensor([agent_label, background_label], device=seg_semantic.device)))
assert set(seg_semantic_info.keys()) == {agent_label, background_label}
og.clear()
def test_object_in_FOV_of_robot():
env = setup_environment(False)
robot = env.robots[0]
env.reset()
assert robot.states[ObjectsInFOVOfRobot].get_value() == [robot]
sensors = [s for s in robot.sensors.values() if isinstance(s, VisionSensor)]
assert len(sensors) > 0
vision_sensor = sensors[0]
vision_sensor.set_position_orientation(position=[100, 150, 100])
og.sim.step()
og.sim.step()
assert robot.states[ObjectsInFOVOfRobot].get_value() == [robot]
og.clear()