Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix/playback scale #1119

Merged
merged 16 commits into from
Feb 22, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,4 +22,4 @@ repos:
omnigibson/utils/numpy_utils\.py| # Utilities specifically for numpy operations and dtype
tests/test_transform_utils\.py # This test file uses Scipy and Numpy
)$
stages: [commit]
stages: [pre-commit]
24 changes: 18 additions & 6 deletions omnigibson/action_primitives/curobo.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,14 +90,11 @@ def __init__(
MotionGenConfig.load_from_robot_config(...)
batch_size (int): Size of batches for computing trajectories. This must be FIXED
use_cuda_graph (bool): Whether to use CUDA graph for motion generation or not
collision_activation_distance (float): Distance threshold at which a collision is detected.
Increasing this value will make the motion planner more conservative in its planning with respect
to the underlying sphere representation of the robot
debug (bool): Whether to debug generation or not, setting this True will set use_cuda_graph to False implicitly
use_default_embodiment_only (bool): Whether to use only the default embodiment for the robot or not
collision_activation_distance (float): Activation distance for collision checking; this affects
1) how close the computed trajectories can get to obstacles and
2) how close the robot can get to obstacles during collision checks
collision_activation_distance (float): Distance threshold at which a collision with the world is detected.
Increasing this value will make the motion planner more conservative in its planning with respect
to the underlying sphere representation of the robot. Note that this does not affect self-collisions detection.
"""
# Only support one scene for now -- verify that this is the case
assert len(og.sim.scenes) == 1
Expand Down Expand Up @@ -132,13 +129,28 @@ def __init__(
self.ee_link = dict()
self.additional_links = dict()
self.base_link = dict()

# Grab mapping from robot joint name to index
reset_qpos = self.robot.reset_joint_pos
joint_idx_mapping = {joint.joint_name: i for i, joint in enumerate(self.robot.joints.values())}
for emb_sel, robot_cfg_path in robot_cfg_path_dict.items():
content_path = lazy.curobo.types.file_path.ContentPath(
robot_config_absolute_path=robot_cfg_path, robot_usd_absolute_path=robot_usd_path
)
robot_cfg_dict = lazy.curobo.cuda_robot_model.util.load_robot_yaml(content_path)["robot_cfg"]
robot_cfg_dict["kinematics"]["use_usd_kinematics"] = True

# Automatically populate the locked joints and retract config from the robot values
for joint_name, lock_val in robot_cfg_dict["kinematics"]["lock_joints"].items():
if lock_val is None:
joint_idx = joint_idx_mapping[joint_name]
robot_cfg_dict["kinematics"]["lock_joints"][joint_name] = reset_qpos[joint_idx]
if robot_cfg_dict["kinematics"]["cspace"]["retract_config"] is None:
robot_cfg_dict["kinematics"]["cspace"]["retract_config"] = [
reset_qpos[joint_idx_mapping[joint_name]]
for joint_name in robot_cfg_dict["kinematics"]["cspace"]["joint_names"]
]

self.ee_link[emb_sel] = robot_cfg_dict["kinematics"]["ee_link"]
# RobotConfig.from_dict will append ee_link to link_names, so we make a copy here.
self.additional_links[emb_sel] = robot_cfg_dict["kinematics"]["link_names"].copy()
Expand Down
99 changes: 86 additions & 13 deletions omnigibson/envs/data_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,11 +29,13 @@ class DataWrapper(EnvironmentWrapper):
An OmniGibson environment wrapper for writing data to an HDF5 file.
"""

def __init__(self, env, output_path, only_successes=True):
def __init__(self, env, output_path, overwrite=True, only_successes=True):
"""
Args:
env (Environment): The environment to wrap
output_path (str): path to store hdf5 data file
overwrite (bool): If set, will overwrite any pre-existing data found at @output_path.
Otherwise, will load the data and append to it
only_successes (bool): Whether to only save successful episodes
"""
# Make sure the wrapped environment inherits correct omnigibson format
Expand All @@ -53,13 +55,17 @@ def __init__(self, env, output_path, only_successes=True):

Path(os.path.dirname(output_path)).mkdir(parents=True, exist_ok=True)
log.info(f"\nWriting OmniGibson dataset hdf5 to: {output_path}\n")
self.hdf5_file = h5py.File(output_path, "w")
data_grp = self.hdf5_file.create_group("data")
env.task.write_task_metadata()
scene_file = og.sim.save()[0]
config = deepcopy(env.config)
self.add_metadata(group=data_grp, name="config", data=config)
self.add_metadata(group=data_grp, name="scene_file", data=scene_file)
self.hdf5_file = h5py.File(output_path, "w" if overwrite else "a")
if "data" not in set(self.hdf5_file.keys()):
data_grp = self.hdf5_file.create_group("data")
else:
data_grp = self.hdf5_file["data"]
if overwrite or "config" not in set(data_grp.attrs.keys()):
env.task.write_task_metadata()
scene_file = og.sim.save()[0]
config = deepcopy(env.config)
self.add_metadata(group=data_grp, name="config", data=config)
self.add_metadata(group=data_grp, name="scene_file", data=scene_file)

# Run super
super().__init__(env=env)
Expand All @@ -73,7 +79,6 @@ def step(self, action, n_render_iterations=1):
n_render_iterations (int): Number of rendering iterations to use before returning observations

Returns:
5-tuple:
5-tuple:
- dict: state, i.e. next observation
- float: reward, i.e. reward at this current timestep
Expand Down Expand Up @@ -277,22 +282,40 @@ class DataCollectionWrapper(DataWrapper):
"""

def __init__(
self, env, output_path, viewport_camera_path="/World/viewer_camera", only_successes=True, use_vr=False
self,
env,
output_path,
viewport_camera_path="/World/viewer_camera",
overwrite=True,
only_successes=True,
use_vr=False,
obj_attr_keys=None,
):
"""
Args:
env (Environment): The environment to wrap
output_path (str): path to store hdf5 data file
viewport_camera_path (str): prim path to the camera to use when rendering the main viewport during
data collection
overwrite (bool): If set, will overwrite any pre-existing data found at @output_path.
Otherwise, will load the data and append to it
only_successes (bool): Whether to only save successful episodes
use_vr (bool): Whether to use VR headset for data collection
obj_attr_keys (None or list of str): If set, a list of object attributes that should be
cached at the beginning of every episode, e.g.: "scale", "visible", etc. This is useful
for domain randomization settings where specific object attributes not directly tied to
the object's runtime kinematic state are being modified once at the beginning of every episode,
while the simulation is stopped.
"""
# Store additional variables needed for optimized data collection

# Denotes the maximum serialized state size for the current episode
self.max_state_size = 0

# Dict capturing serialized per-episode initial information (e.g.: scales / visibilities) about every object
self.obj_attr_keys = [] if obj_attr_keys is None else obj_attr_keys
self.init_metadata = dict()

# Maps episode step ID to dictionary of systems and objects that should be added / removed to the simulator at
# the given simulator step. See add_transition_info() for more info
self.current_transitions = dict()
Expand All @@ -315,7 +338,12 @@ def __init__(
)

# Run super
super().__init__(env=env, output_path=output_path, only_successes=only_successes)
super().__init__(
env=env,
output_path=output_path,
overwrite=overwrite,
only_successes=only_successes,
)

# Configure the simulator to optimize for data collection
self._optimize_sim_for_data_collection(viewport_camera_path=viewport_camera_path)
Expand Down Expand Up @@ -390,6 +418,17 @@ def reset(self):
# Update max state size
self.max_state_size = max(self.max_state_size, len(state))

# Also store initial metadata not recorded in serialized state
# This is simply serialized
metadata = {key: [] for key in self.obj_attr_keys}
for obj in self.scene.objects:
for key in self.obj_attr_keys:
metadata[key].append(getattr(obj, key))
self.init_metadata = {
key: th.stack(vals, dim=0) if isinstance(vals[0], th.Tensor) else th.tensor(vals, dtype=type(vals[0]))
for key, vals in metadata.items()
}

return init_obs, init_info

def _parse_step_data(self, action, obs, reward, terminated, truncated, info):
Expand Down Expand Up @@ -422,6 +461,11 @@ def process_traj_to_hdf5(self, traj_data, traj_grp_name, nested_keys=("obs",)):
# Add in transition info
self.add_metadata(group=traj_grp, name="transitions", data=self.current_transitions)

# Add initial metadata information
metadata_grp = traj_grp.create_group("init_metadata")
for name, data in self.init_metadata.items():
metadata_grp.create_dataset(name, data=data)

return traj_grp

def flush_current_traj(self):
Expand Down Expand Up @@ -476,6 +520,7 @@ def create_from_hdf5(
include_sensor_names=None,
exclude_sensor_names=None,
n_render_iterations=5,
overwrite=True,
only_successes=False,
include_env_wrapper=False,
additional_wrapper_configs=None,
Expand Down Expand Up @@ -509,6 +554,8 @@ def create_from_hdf5(
underlying physical state by a few frames, and additionally produces transient visual artifacts when
the physical state changes. Increasing this number will improve the rendered quality at the expense of
speed.
overwrite (bool): If set, will overwrite any pre-existing data found at @output_path.
Otherwise, will load the data and append to it
only_successes (bool): Whether to only save successful episodes
include_env_wrapper (bool): Whether to include environment wrapper stored in the underlying env config
additional_wrapper_configs (None or list of dict): If specified, list of wrapper config(s) specifying
Expand Down Expand Up @@ -568,17 +615,28 @@ def create_from_hdf5(
input_path=input_path,
output_path=output_path,
n_render_iterations=n_render_iterations,
overwrite=overwrite,
only_successes=only_successes,
)

def __init__(self, env, input_path, output_path, n_render_iterations=5, only_successes=False):
def __init__(
self,
env,
input_path,
output_path,
n_render_iterations=5,
overwrite=True,
only_successes=False,
):
"""
Args:
env (Environment): The environment to wrap
input_path (str): path to input hdf5 collected data file
output_path (str): path to store output hdf5 data file
n_render_iterations (int): Number of rendering iterations to use when loading each stored frame from the
recorded data
overwrite (bool): If set, will overwrite any pre-existing data found at @output_path.
Otherwise, will load the data and append to it
only_successes (bool): Whether to only save successful episodes
"""
# Make sure transition rules are DISABLED for playback since we manually propagate transitions
Expand All @@ -592,7 +650,12 @@ def __init__(self, env, input_path, output_path, n_render_iterations=5, only_suc
self.n_render_iterations = n_render_iterations

# Run super
super().__init__(env=env, output_path=output_path, only_successes=only_successes)
super().__init__(
env=env,
output_path=output_path,
overwrite=overwrite,
only_successes=only_successes,
)

def _process_obs(self, obs, info):
"""
Expand Down Expand Up @@ -634,6 +697,7 @@ def playback_episode(self, episode_id, record_data=True, video_writer=None, vide
# Grab episode data
transitions = json.loads(traj_grp.attrs["transitions"])
traj_grp = h5py_group_to_torch(traj_grp)
init_metadata = traj_grp["init_metadata"]
action = traj_grp["action"]
state = traj_grp["state"]
state_size = traj_grp["state_size"]
Expand All @@ -643,6 +707,15 @@ def playback_episode(self, episode_id, record_data=True, video_writer=None, vide

# Reset environment
og.sim.restore(scene_files=[self.scene_file])

# Reset object attributes from the stored metadata
with og.sim.stopped():
for attr, vals in init_metadata.items():
assert len(vals) == self.scene.n_objects
for i, obj in enumerate(self.scene.objects):
for attr, vals in init_metadata.items():
val = vals[i]
setattr(obj, attr, val.item() if val.ndim == 0 else val)
self.reset()

# Restore to initial state
Expand Down
54 changes: 19 additions & 35 deletions omnigibson/examples/robots/import_custom_robot.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,8 @@
lock_joints: {} # (dict) Maps joint name to "locked" joint configuration. Any joints
# specified here will not be considered active when motion planning
# NOTE: All gripper joints and non-controllable holonomic joints
# will automatically be added here
# will automatically be added here. Null means that the value will be
# dynamically computed at runtime based on the robot's reset qpos
self_collision_ignore: # (dict) Maps link name to list of other ignore links to ignore collisions
# with. Note that bi-directional specification is not necessary,
# e.g.: "torso_link1" does not need to be specified in
Expand Down Expand Up @@ -317,27 +318,6 @@
"radius": 0.005
- "center": [0.02, 0.0, -0.007]
"radius": 0.003
default_qpos: # (list of float): Default joint configuration
- 0.0
- 0.0
- 0.0
- 0.0
- 0.0
- 0.0
- 1.906
- 1.906
- -0.991
- -0.991
- 1.571
- 1.571
- 0.915
- 0.915
- -1.571
- -1.571
- 0.03
- 0.03
- 0.03
- 0.03

"""

Expand Down Expand Up @@ -755,17 +735,13 @@ def get_joint_upper_limit(root_prim, joint_name):

joint_prims = find_all_joints(robot_prim, only_articulated=True)
all_joint_names = [joint_prim.GetName() for joint_prim in joint_prims]
retract_cfg = curobo_cfg.default_qpos
lock_joints = curobo_cfg.lock_joints.to_dict() if curobo_cfg.lock_joints else {}
if is_holonomic:
# Move the final six joints to the beginning, since the holonomic joints are added at the end
all_joint_names = list(reversed(all_joint_names[-6:])) + all_joint_names[:-6]
retract_cfg = [0] * 6 + retract_cfg
lock_joints["base_footprint_z_joint"] = 0.0
lock_joints["base_footprint_rx_joint"] = 0.0
lock_joints["base_footprint_ry_joint"] = 0.0

joint_to_default_q = {jnt_name: q for jnt_name, q in zip(all_joint_names, retract_cfg)}
lock_joints["base_footprint_z_joint"] = None
lock_joints["base_footprint_rx_joint"] = None
lock_joints["base_footprint_ry_joint"] = None

default_generated_cfg = {
"usd_robot_root": f"/{robot_prim.GetName()}",
Expand All @@ -781,13 +757,13 @@ def get_joint_upper_limit(root_prim, joint_name):
"collision_sphere_buffer": 0.002,
"extra_collision_spheres": {},
"self_collision_ignore": curobo_cfg.self_collision_ignore.to_dict(),
"self_collision_buffer": {root_link: 0.02},
"self_collision_buffer": curobo_cfg.self_collision_buffer.to_dict(),
"use_global_cumul": True,
"mesh_link_names": deepcopy(all_collision_link_names),
"external_asset_path": None,
"cspace": {
"joint_names": all_joint_names,
"retract_config": retract_cfg,
"retract_config": None,
"null_space_weight": [1] * len(all_joint_names),
"cspace_distance_weight": [1] * len(all_joint_names),
"max_jerk": 500.0,
Expand All @@ -798,7 +774,7 @@ def get_joint_upper_limit(root_prim, joint_name):
for eef_link_name, gripper_info in curobo_cfg.eef_to_gripper_info.items():
attached_obj_link_name = f"attached_object_{eef_link_name}"
for jnt_name in gripper_info["joints"]:
default_generated_cfg["lock_joints"][jnt_name] = get_joint_upper_limit(robot_prim, jnt_name)
default_generated_cfg["lock_joints"][jnt_name] = None
default_generated_cfg["extra_links"][attached_obj_link_name] = {
"parent_link_name": eef_link_name,
"link_name": attached_obj_link_name,
Expand Down Expand Up @@ -827,22 +803,30 @@ def get_joint_upper_limit(root_prim, joint_name):
base_only_cfg = deepcopy(default_generated_cfg)
base_only_cfg["ee_link"] = root_link
base_only_cfg["link_names"] = []
for jnt_name, default_q in joint_to_default_q.items():
for jnt_name in all_joint_names:
if jnt_name not in base_only_cfg["lock_joints"] and "base_footprint" not in jnt_name:
# Lock this joint
base_only_cfg["lock_joints"][jnt_name] = default_q
base_only_cfg["lock_joints"][jnt_name] = None
save_base_fpath = f"{save_dir}/{robot_name}_description_curobo_base.yaml"
with open(save_base_fpath, "w+") as f:
yaml.dump({"robot_cfg": {"kinematics": base_only_cfg}}, f)

# Create arm only config
arm_only_cfg = deepcopy(default_generated_cfg)
for jnt_name in {"base_footprint_x_joint", "base_footprint_y_joint", "base_footprint_rz_joint"}:
arm_only_cfg["lock_joints"][jnt_name] = 0.0
arm_only_cfg["lock_joints"][jnt_name] = None
save_arm_fpath = f"{save_dir}/{robot_name}_description_curobo_arm.yaml"
with open(save_arm_fpath, "w+") as f:
yaml.dump({"robot_cfg": {"kinematics": arm_only_cfg}}, f)

# Create arm only no torso config
arm_only_no_torso_cfg = deepcopy(arm_only_cfg)
for jnt_name in curobo_cfg.torso_joints:
arm_only_no_torso_cfg["lock_joints"][jnt_name] = None
save_arm_no_torso_fpath = f"{save_dir}/{robot_name}_description_curobo_arm_no_torso.yaml"
with open(save_arm_no_torso_fpath, "w+") as f:
yaml.dump({"robot_cfg": {"kinematics": arm_only_no_torso_cfg}}, f)


@click.command(help=_DOCSTRING)
@click.option(
Expand Down
Loading