Skip to content

Commit

Permalink
Merge branch 'og-develop' into fixes/curobo-particle
Browse files Browse the repository at this point in the history
  • Loading branch information
ChengshuLi authored Feb 22, 2025
2 parents 539ef82 + 2e85a5a commit 43e82a7
Show file tree
Hide file tree
Showing 34 changed files with 569 additions and 141 deletions.
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,4 +22,4 @@ repos:
omnigibson/utils/numpy_utils\.py| # Utilities specifically for numpy operations and dtype
tests/test_transform_utils\.py # This test file uses Scipy and Numpy
)$
stages: [commit]
stages: [pre-commit]
24 changes: 18 additions & 6 deletions omnigibson/action_primitives/curobo.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,14 +91,11 @@ def __init__(
MotionGenConfig.load_from_robot_config(...)
batch_size (int): Size of batches for computing trajectories. This must be FIXED
use_cuda_graph (bool): Whether to use CUDA graph for motion generation or not
collision_activation_distance (float): Distance threshold at which a collision is detected.
Increasing this value will make the motion planner more conservative in its planning with respect
to the underlying sphere representation of the robot
debug (bool): Whether to debug generation or not, setting this True will set use_cuda_graph to False implicitly
use_default_embodiment_only (bool): Whether to use only the default embodiment for the robot or not
collision_activation_distance (float): Activation distance for collision checking; this affects
1) how close the computed trajectories can get to obstacles and
2) how close the robot can get to obstacles during collision checks
collision_activation_distance (float): Distance threshold at which a collision with the world is detected.
Increasing this value will make the motion planner more conservative in its planning with respect
to the underlying sphere representation of the robot. Note that this does not affect self-collisions detection.
"""
# Only support one scene for now -- verify that this is the case
assert len(og.sim.scenes) == 1
Expand Down Expand Up @@ -133,13 +130,28 @@ def __init__(
self.ee_link = dict()
self.additional_links = dict()
self.base_link = dict()

# Grab mapping from robot joint name to index
reset_qpos = self.robot.reset_joint_pos
joint_idx_mapping = {joint.joint_name: i for i, joint in enumerate(self.robot.joints.values())}
for emb_sel, robot_cfg_path in robot_cfg_path_dict.items():
content_path = lazy.curobo.types.file_path.ContentPath(
robot_config_absolute_path=robot_cfg_path, robot_usd_absolute_path=robot_usd_path
)
robot_cfg_dict = lazy.curobo.cuda_robot_model.util.load_robot_yaml(content_path)["robot_cfg"]
robot_cfg_dict["kinematics"]["use_usd_kinematics"] = True

# Automatically populate the locked joints and retract config from the robot values
for joint_name, lock_val in robot_cfg_dict["kinematics"]["lock_joints"].items():
if lock_val is None:
joint_idx = joint_idx_mapping[joint_name]
robot_cfg_dict["kinematics"]["lock_joints"][joint_name] = reset_qpos[joint_idx]
if robot_cfg_dict["kinematics"]["cspace"]["retract_config"] is None:
robot_cfg_dict["kinematics"]["cspace"]["retract_config"] = [
reset_qpos[joint_idx_mapping[joint_name]]
for joint_name in robot_cfg_dict["kinematics"]["cspace"]["joint_names"]
]

self.ee_link[emb_sel] = robot_cfg_dict["kinematics"]["ee_link"]
# RobotConfig.from_dict will append ee_link to link_names, so we make a copy here.
self.additional_links[emb_sel] = robot_cfg_dict["kinematics"]["link_names"].copy()
Expand Down
4 changes: 2 additions & 2 deletions omnigibson/controllers/joint_controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from omnigibson.macros import create_module_macros
from omnigibson.utils.backend_utils import _compute_backend as cb
from omnigibson.utils.backend_utils import add_compute_function
from omnigibson.utils.python_utils import assert_valid_key
from omnigibson.utils.python_utils import assert_valid_key, torch_compile
from omnigibson.utils.ui_utils import create_module_logger

# Create module logger
Expand Down Expand Up @@ -307,7 +307,7 @@ def command_dim(self):
return len(self.dof_idx)


@th.compile
@torch_compile
def _compute_joint_torques_torch(
u: th.Tensor,
mm: th.Tensor,
Expand Down
99 changes: 86 additions & 13 deletions omnigibson/envs/data_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,11 +29,13 @@ class DataWrapper(EnvironmentWrapper):
An OmniGibson environment wrapper for writing data to an HDF5 file.
"""

def __init__(self, env, output_path, only_successes=True):
def __init__(self, env, output_path, overwrite=True, only_successes=True):
"""
Args:
env (Environment): The environment to wrap
output_path (str): path to store hdf5 data file
overwrite (bool): If set, will overwrite any pre-existing data found at @output_path.
Otherwise, will load the data and append to it
only_successes (bool): Whether to only save successful episodes
"""
# Make sure the wrapped environment inherits correct omnigibson format
Expand All @@ -53,13 +55,17 @@ def __init__(self, env, output_path, only_successes=True):

Path(os.path.dirname(output_path)).mkdir(parents=True, exist_ok=True)
log.info(f"\nWriting OmniGibson dataset hdf5 to: {output_path}\n")
self.hdf5_file = h5py.File(output_path, "w")
data_grp = self.hdf5_file.create_group("data")
env.task.write_task_metadata()
scene_file = og.sim.save()[0]
config = deepcopy(env.config)
self.add_metadata(group=data_grp, name="config", data=config)
self.add_metadata(group=data_grp, name="scene_file", data=scene_file)
self.hdf5_file = h5py.File(output_path, "w" if overwrite else "a")
if "data" not in set(self.hdf5_file.keys()):
data_grp = self.hdf5_file.create_group("data")
else:
data_grp = self.hdf5_file["data"]
if overwrite or "config" not in set(data_grp.attrs.keys()):
env.task.write_task_metadata()
scene_file = og.sim.save()[0]
config = deepcopy(env.config)
self.add_metadata(group=data_grp, name="config", data=config)
self.add_metadata(group=data_grp, name="scene_file", data=scene_file)

# Run super
super().__init__(env=env)
Expand All @@ -73,7 +79,6 @@ def step(self, action, n_render_iterations=1):
n_render_iterations (int): Number of rendering iterations to use before returning observations
Returns:
5-tuple:
5-tuple:
- dict: state, i.e. next observation
- float: reward, i.e. reward at this current timestep
Expand Down Expand Up @@ -277,22 +282,40 @@ class DataCollectionWrapper(DataWrapper):
"""

def __init__(
self, env, output_path, viewport_camera_path="/World/viewer_camera", only_successes=True, use_vr=False
self,
env,
output_path,
viewport_camera_path="/World/viewer_camera",
overwrite=True,
only_successes=True,
use_vr=False,
obj_attr_keys=None,
):
"""
Args:
env (Environment): The environment to wrap
output_path (str): path to store hdf5 data file
viewport_camera_path (str): prim path to the camera to use when rendering the main viewport during
data collection
overwrite (bool): If set, will overwrite any pre-existing data found at @output_path.
Otherwise, will load the data and append to it
only_successes (bool): Whether to only save successful episodes
use_vr (bool): Whether to use VR headset for data collection
obj_attr_keys (None or list of str): If set, a list of object attributes that should be
cached at the beginning of every episode, e.g.: "scale", "visible", etc. This is useful
for domain randomization settings where specific object attributes not directly tied to
the object's runtime kinematic state are being modified once at the beginning of every episode,
while the simulation is stopped.
"""
# Store additional variables needed for optimized data collection

# Denotes the maximum serialized state size for the current episode
self.max_state_size = 0

# Dict capturing serialized per-episode initial information (e.g.: scales / visibilities) about every object
self.obj_attr_keys = [] if obj_attr_keys is None else obj_attr_keys
self.init_metadata = dict()

# Maps episode step ID to dictionary of systems and objects that should be added / removed to the simulator at
# the given simulator step. See add_transition_info() for more info
self.current_transitions = dict()
Expand All @@ -315,7 +338,12 @@ def __init__(
)

# Run super
super().__init__(env=env, output_path=output_path, only_successes=only_successes)
super().__init__(
env=env,
output_path=output_path,
overwrite=overwrite,
only_successes=only_successes,
)

# Configure the simulator to optimize for data collection
self._optimize_sim_for_data_collection(viewport_camera_path=viewport_camera_path)
Expand Down Expand Up @@ -390,6 +418,17 @@ def reset(self):
# Update max state size
self.max_state_size = max(self.max_state_size, len(state))

# Also store initial metadata not recorded in serialized state
# This is simply serialized
metadata = {key: [] for key in self.obj_attr_keys}
for obj in self.scene.objects:
for key in self.obj_attr_keys:
metadata[key].append(getattr(obj, key))
self.init_metadata = {
key: th.stack(vals, dim=0) if isinstance(vals[0], th.Tensor) else th.tensor(vals, dtype=type(vals[0]))
for key, vals in metadata.items()
}

return init_obs, init_info

def _parse_step_data(self, action, obs, reward, terminated, truncated, info):
Expand Down Expand Up @@ -422,6 +461,11 @@ def process_traj_to_hdf5(self, traj_data, traj_grp_name, nested_keys=("obs",)):
# Add in transition info
self.add_metadata(group=traj_grp, name="transitions", data=self.current_transitions)

# Add initial metadata information
metadata_grp = traj_grp.create_group("init_metadata")
for name, data in self.init_metadata.items():
metadata_grp.create_dataset(name, data=data)

return traj_grp

def flush_current_traj(self):
Expand Down Expand Up @@ -476,6 +520,7 @@ def create_from_hdf5(
include_sensor_names=None,
exclude_sensor_names=None,
n_render_iterations=5,
overwrite=True,
only_successes=False,
include_env_wrapper=False,
additional_wrapper_configs=None,
Expand Down Expand Up @@ -509,6 +554,8 @@ def create_from_hdf5(
underlying physical state by a few frames, and additionally produces transient visual artifacts when
the physical state changes. Increasing this number will improve the rendered quality at the expense of
speed.
overwrite (bool): If set, will overwrite any pre-existing data found at @output_path.
Otherwise, will load the data and append to it
only_successes (bool): Whether to only save successful episodes
include_env_wrapper (bool): Whether to include environment wrapper stored in the underlying env config
additional_wrapper_configs (None or list of dict): If specified, list of wrapper config(s) specifying
Expand Down Expand Up @@ -568,17 +615,28 @@ def create_from_hdf5(
input_path=input_path,
output_path=output_path,
n_render_iterations=n_render_iterations,
overwrite=overwrite,
only_successes=only_successes,
)

def __init__(self, env, input_path, output_path, n_render_iterations=5, only_successes=False):
def __init__(
self,
env,
input_path,
output_path,
n_render_iterations=5,
overwrite=True,
only_successes=False,
):
"""
Args:
env (Environment): The environment to wrap
input_path (str): path to input hdf5 collected data file
output_path (str): path to store output hdf5 data file
n_render_iterations (int): Number of rendering iterations to use when loading each stored frame from the
recorded data
overwrite (bool): If set, will overwrite any pre-existing data found at @output_path.
Otherwise, will load the data and append to it
only_successes (bool): Whether to only save successful episodes
"""
# Make sure transition rules are DISABLED for playback since we manually propagate transitions
Expand All @@ -592,7 +650,12 @@ def __init__(self, env, input_path, output_path, n_render_iterations=5, only_suc
self.n_render_iterations = n_render_iterations

# Run super
super().__init__(env=env, output_path=output_path, only_successes=only_successes)
super().__init__(
env=env,
output_path=output_path,
overwrite=overwrite,
only_successes=only_successes,
)

def _process_obs(self, obs, info):
"""
Expand Down Expand Up @@ -634,6 +697,7 @@ def playback_episode(self, episode_id, record_data=True, video_writer=None, vide
# Grab episode data
transitions = json.loads(traj_grp.attrs["transitions"])
traj_grp = h5py_group_to_torch(traj_grp)
init_metadata = traj_grp["init_metadata"]
action = traj_grp["action"]
state = traj_grp["state"]
state_size = traj_grp["state_size"]
Expand All @@ -643,6 +707,15 @@ def playback_episode(self, episode_id, record_data=True, video_writer=None, vide

# Reset environment
og.sim.restore(scene_files=[self.scene_file])

# Reset object attributes from the stored metadata
with og.sim.stopped():
for attr, vals in init_metadata.items():
assert len(vals) == self.scene.n_objects
for i, obj in enumerate(self.scene.objects):
for attr, vals in init_metadata.items():
val = vals[i]
setattr(obj, attr, val.item() if val.ndim == 0 else val)
self.reset()

# Restore to initial state
Expand Down
Loading

0 comments on commit 43e82a7

Please sign in to comment.