Skip to content

robot_base

BaseRobot

Bases: USDObject, ControllableObject, GymObservable

Base class for USD-based robot agents.

This class handles object loading, and provides method interfaces that should be implemented by subclassed robots.

Source code in omnigibson/robots/robot_base.py
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
class BaseRobot(USDObject, ControllableObject, GymObservable):
    """
    Base class for USD-based robot agents.

    This class handles object loading, and provides method interfaces that should be
    implemented by subclassed robots.
    """
    def __init__(
        self,
        # Shared kwargs in hierarchy
        name,
        prim_path=None,
        class_id=None,
        uuid=None,
        scale=None,
        visible=True,
        fixed_base=False,
        visual_only=False,
        self_collisions=False,
        load_config=None,

        # Unique to USDObject hierarchy
        abilities=None,

        # Unique to ControllableObject hierarchy
        control_freq=None,
        controller_config=None,
        action_type="continuous",
        action_normalize=True,
        reset_joint_pos=None,

        # Unique to this class
        obs_modalities="all",
        proprio_obs="default",

        **kwargs,
    ):
        """
        Args:
            name (str): Name for the object. Names need to be unique per scene
            prim_path (None or str): global path in the stage to this object. If not specified, will automatically be
                created at /World/<name>
            class_id (None or int): What class ID the object should be assigned in semantic segmentation rendering mode.
                If None, the ID will be inferred from this object's category.
            uuid (None or int): Unique unsigned-integer identifier to assign to this object (max 8-numbers).
                If None is specified, then it will be auto-generated
            scale (None or float or 3-array): if specified, sets either the uniform (float) or x,y,z (3-array) scale
                for this object. A single number corresponds to uniform scaling along the x,y,z axes, whereas a
                3-array specifies per-axis scaling.
            visible (bool): whether to render this object or not in the stage
            fixed_base (bool): whether to fix the base of this object or not
            visual_only (bool): Whether this object should be visual only (and not collide with any other objects)
            self_collisions (bool): Whether to enable self collisions for this object
            load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
                loading this prim at runtime.
            abilities (None or dict): If specified, manually adds specific object states to this object. It should be
                a dict in the form of {ability: {param: value}} containing object abilities and parameters to pass to
                the object state instance constructor.
            control_freq (float): control frequency (in Hz) at which to control the object. If set to be None,
                simulator.import_object will automatically set the control frequency to be 1 / render_timestep by default.
            controller_config (None or dict): nested dictionary mapping controller name(s) to specific controller
                configurations for this object. This will override any default values specified by this class.
            action_type (str): one of {discrete, continuous} - what type of action space to use
            action_normalize (bool): whether to normalize inputted actions. This will override any default values
                specified by this class.
            reset_joint_pos (None or n-array): if specified, should be the joint positions that the object should
                be set to during a reset. If None (default), self.default_joint_pos will be used instead.
            obs_modalities (str or list of str): Observation modalities to use for this robot. Default is "all", which
                corresponds to all modalities being used.
                Otherwise, valid options should be part of omnigibson.sensors.ALL_SENSOR_MODALITIES.
            proprio_obs (str or list of str): proprioception observation key(s) to use for generating proprioceptive
                observations. If str, should be exactly "default" -- this results in the default proprioception
                observations being used, as defined by self.default_proprio_obs. See self._get_proprioception_dict
                for valid key choices
            kwargs (dict): Additional keyword arguments that are used for other super() calls from subclasses, allowing
                for flexible compositions of various object subclasses (e.g.: Robot is USDObject + ControllableObject).
        """
        # Store inputs
        self._obs_modalities = obs_modalities if obs_modalities == "all" else \
            {obs_modalities} if isinstance(obs_modalities, str) else set(obs_modalities)              # this will get updated later when we fill in our sensors
        self._proprio_obs = self.default_proprio_obs if proprio_obs == "default" else list(proprio_obs)

        # Process abilities
        robot_abilities = {"robot": {}}
        abilities = robot_abilities if abilities is None else robot_abilities.update(abilities)

        # Initialize internal attributes that will be loaded later
        self._sensors = None                     # e.g.: scan sensor, vision sensor

        # If specified, make sure scale is uniform -- this is because non-uniform scale can result in non-matching
        # collision representations for parts of the robot that were optimized (e.g.: bounding sphere for wheels)
        assert scale is None or isinstance(scale, int) or isinstance(scale, float) or np.all(scale == scale[0]), \
            f"Robot scale must be uniform! Got: {scale}"

        # Run super init
        super().__init__(
            prim_path=prim_path,
            usd_path=self.usd_path,
            name=name,
            category=m.ROBOT_CATEGORY,
            class_id=class_id,
            uuid=uuid,
            scale=scale,
            visible=visible,
            fixed_base=fixed_base,
            visual_only=visual_only,
            self_collisions=self_collisions,
            prim_type=PrimType.RIGID,
            include_default_states=True,
            load_config=load_config,
            abilities=abilities,
            control_freq=control_freq,
            controller_config=controller_config,
            action_type=action_type,
            action_normalize=action_normalize,
            reset_joint_pos=reset_joint_pos,
            **kwargs,
        )

    def _post_load(self):
        # Run super post load first
        super()._post_load()

        # Search for any sensors this robot might have attached to any of its links
        self._sensors = dict()
        obs_modalities = set()
        for link_name, link in self._links.items():
            # Search through all children prims and see if we find any sensor
            for prim in link.prim.GetChildren():
                prim_type = prim.GetPrimTypeInfo().GetTypeName()
                if prim_type in SENSOR_PRIMS_TO_SENSOR_CLS:
                    # Infer what obs modalities to use for this sensor
                    sensor_cls = SENSOR_PRIMS_TO_SENSOR_CLS[prim_type]
                    modalities = sensor_cls.all_modalities if self._obs_modalities == "all" else \
                        sensor_cls.all_modalities.intersection(self._obs_modalities)
                    obs_modalities = obs_modalities.union(modalities)
                    # Create the sensor and store it internally
                    sensor = create_sensor(
                        sensor_type=prim_type,
                        prim_path=str(prim.GetPrimPath()),
                        name=f"{self.name}:{link_name}_{prim_type}_sensor",
                        modalities=modalities,
                    )
                    self._sensors[sensor.name] = sensor

        # Since proprioception isn't an actual sensor, we need to possibly manually add it here as well
        if self._obs_modalities == "all":
            obs_modalities.add("proprio")

        # Update our overall obs modalities
        self._obs_modalities = obs_modalities

    def _initialize(self):
        # Run super first
        super()._initialize()

        # Initialize all sensors
        for sensor in self._sensors.values():
            sensor.initialize()

        # Load the observation space for this robot
        self.load_observation_space()

        # Validate this robot configuration
        self._validate_configuration()

    def _validate_configuration(self):
        """
        Run any needed sanity checks to make sure this robot was created correctly.
        """
        pass

    def can_toggle(self, toggle_position, toggle_distance_threshold):
        """
        Returns True if the part of the robot that can toggle a toggleable is within the given range of a
        point corresponding to a toggle marker
        by default, we assume robot cannot toggle toggle markers

        Args:
            toggle_position (3-array): (x,y,z) cartesian position values as a reference point for evaluating
                whether a toggle can occur
            toggle_distance_threshold (float): distance value below which a toggle is allowed

        Returns:
            bool: True if the part of the robot that can toggle a toggleable is within the given range of a
                point corresponding to a toggle marker. By default, we assume robot cannot toggle toggle markers
        """
        return False

    def get_obs(self):
        """
        Grabs all observations from the robot. This is keyword-mapped based on each observation modality
            (e.g.: proprio, rgb, etc.)

        Returns:
            dict: Keyword-mapped dictionary mapping observation modality names to
                observations (usually np arrays)
        """
        # Our sensors already know what observation modalities it has, so we simply iterate over all of them
        # and grab their observations, processing them into a flat dict
        obs_dict = dict()
        for sensor_name, sensor in self._sensors.items():
            sensor_obs = sensor.get_obs()
            for obs_modality, obs in sensor_obs.items():
                obs_dict[f"{sensor_name}_{obs_modality}"] = obs

        # Have to handle proprio separately since it's not an actual sensor
        if "proprio" in self._obs_modalities:
            obs_dict["proprio"] = self.get_proprioception()

        return obs_dict

    def get_proprioception(self):
        """
        Returns:
            n-array: numpy array of all robot-specific proprioceptive observations.
        """
        proprio_dict = self._get_proprioception_dict()
        return np.concatenate([proprio_dict[obs] for obs in self._proprio_obs])

    def _get_proprioception_dict(self):
        """
        Returns:
            dict: keyword-mapped proprioception observations available for this robot.
                Can be extended by subclasses
        """
        joint_positions = self.get_joint_positions(normalized=False)
        joint_velocities = self.get_joint_velocities(normalized=False)
        joint_efforts = self.get_joint_efforts(normalized=False)
        pos, ori = self.get_position(), self.get_rpy()
        return dict(
            joint_qpos=joint_positions,
            joint_qpos_sin=np.sin(joint_positions),
            joint_qpos_cos=np.cos(joint_positions),
            joint_qvel=joint_velocities,
            joint_qeffort=joint_efforts,
            robot_pos=pos,
            robot_ori_cos=np.cos(ori),
            robot_ori_sin=np.sin(ori),
            robot_lin_vel=self.get_linear_velocity(),
            robot_ang_vel=self.get_angular_velocity(),
        )

    def _load_observation_space(self):
        # We compile observation spaces from our sensors
        obs_space = dict()

        for sensor_name, sensor in self._sensors.items():
            # Load the sensor observation space
            sensor_obs_space = sensor.load_observation_space()
            for obs_modality, obs_modality_space in sensor_obs_space.items():
                obs_space[f"{sensor_name}_{obs_modality}"] = obs_modality_space

        # Have to handle proprio separately since it's not an actual sensor
        if "proprio" in self._obs_modalities:
            obs_space["proprio"] = self._build_obs_box_space(shape=(self.proprioception_dim,), low=-np.inf, high=np.inf)

        return obs_space

    def add_obs_modality(self, modality):
        """
        Adds observation modality @modality to this robot. Note: Should be one of omnigibson.sensors.ALL_SENSOR_MODALITIES

        Args:
            modality (str): Observation modality to add to this robot
        """
        # Iterate over all sensors we own, and if the requested modality is a part of its possible valid modalities,
        # then we add it
        for sensor in self._sensors.values():
            if modality in sensor.all_modalities:
                sensor.add_modality(modality=modality)

    def remove_obs_modality(self, modality):
        """
        Remove observation modality @modality from this robot. Note: Should be one of
        omnigibson.sensors.ALL_SENSOR_MODALITIES

        Args:
            modality (str): Observation modality to remove from this robot
        """
        # Iterate over all sensors we own, and if the requested modality is a part of its possible valid modalities,
        # then we remove it
        for sensor in self._sensors.values():
            if modality in sensor.all_modalities:
                sensor.remove_modality(modality=modality)

    def visualize_sensors(self):
        """
        Renders this robot's key sensors, visualizing them via matplotlib plots
        """
        frames = dict()
        remaining_obs_modalities = deepcopy(self.obs_modalities)
        for sensor in self.sensors.values():
            obs = sensor.get_obs()
            sensor_frames = []
            if isinstance(sensor, VisionSensor):
                # We check for rgb, depth, normal, seg_instance
                for modality in ["rgb", "depth", "normal", "seg_instance"]:
                    if modality in sensor.modalities:
                        ob = obs[modality]
                        if modality == "rgb":
                            # Ignore alpha channel, map to floats
                            ob = ob[:, :, :3] / 255.0
                        elif modality == "seg_instance":
                            # Map IDs to rgb
                            ob = segmentation_to_rgb(ob, N=256) / 255.0
                        elif modality == "normal":
                            # Re-map to 0 - 1 range
                            ob = (ob + 1.0) / 2.0
                        else:
                            # Depth, nothing to do here
                            pass
                        # Add this observation to our frames and remove the modality
                        sensor_frames.append((modality, ob))
                        remaining_obs_modalities -= {modality}
                    else:
                        # Warn user that we didn't find this modality
                        print(f"Modality {modality} is not active in sensor {sensor.name}, skipping...")
            elif isinstance(sensor, ScanSensor):
                # We check for occupancy_grid
                occupancy_grid = obs.get("occupancy_grid", None)
                if occupancy_grid is not None:
                    sensor_frames.append(("occupancy_grid", occupancy_grid))
                    remaining_obs_modalities -= {"occupancy_grid"}

            # Map the sensor name to the frames for that sensor
            frames[sensor.name] = sensor_frames

        # Warn user that any remaining modalities are not able to be visualized
        if len(remaining_obs_modalities) > 0:
            print(f"Modalities: {remaining_obs_modalities} cannot be visualized, skipping...")

        # Write all the frames to a plot
        for sensor_name, sensor_frames in frames.items():
            n_sensor_frames = len(sensor_frames)
            if n_sensor_frames > 0:
                fig, axes = plt.subplots(nrows=1, ncols=n_sensor_frames)
                if n_sensor_frames == 1:
                    axes = [axes]
                # Dump frames and set each subtitle
                for i, (modality, frame) in enumerate(sensor_frames):
                    axes[i].imshow(frame)
                    axes[i].set_title(modality)
                    axes[i].set_axis_off()
                # Set title
                fig.suptitle(sensor_name)
                plt.show(block=False)

        # One final plot show so all the figures get rendered
        plt.show()

    def remove(self):
        # Remove all sensors
        for sensor in self._sensors.values():
            sensor.remove()

        # Run super
        super().remove()

    @property
    def sensors(self):
        """
        Returns:
            dict: Keyword-mapped dictionary mapping sensor names to BaseSensor instances owned by this robot
        """
        return self._sensors

    @property
    def obs_modalities(self):
        """
        Returns:
            set of str: Observation modalities used for this robot (e.g.: proprio, rgb, etc.)
        """
        assert self._loaded, "Cannot check observation modalities until we load this robot!"
        return self._obs_modalities

    @property
    def proprioception_dim(self):
        """
        Returns:
            int: Size of self.get_proprioception() vector
        """
        return len(self.get_proprioception())

    @property
    def default_proprio_obs(self):
        """
        Returns:
            list of str: Default proprioception observations to use
        """
        return []

    @property
    def model_name(self):
        """
        Returns:
            str: name of this robot model. usually corresponds to the class name of a given robot model
        """
        return self.__class__.__name__

    @property
    @abstractmethod
    def usd_path(self):
        # For all robots, this must be specified a priori, before we actually initialize the USDObject constructor!
        # So we override the parent implementation, and make this an abstract method
        raise NotImplementedError

    @property
    def urdf_path(self):
        """
        Returns:
            str: file path to the robot urdf file.
        """
        raise NotImplementedError

    @classproperty
    def _do_not_register_classes(cls):
        # Don't register this class since it's an abstract template
        classes = super()._do_not_register_classes
        classes.add("BaseRobot")
        return classes

    @classproperty
    def _cls_registry(cls):
        # Global robot registry -- override super registry
        global REGISTERED_ROBOTS
        return REGISTERED_ROBOTS

default_proprio_obs property

Returns:

Type Description

list of str: Default proprioception observations to use

model_name property

Returns:

Name Type Description
str

name of this robot model. usually corresponds to the class name of a given robot model

obs_modalities property

Returns:

Type Description

set of str: Observation modalities used for this robot (e.g.: proprio, rgb, etc.)

proprioception_dim property

Returns:

Name Type Description
int

Size of self.get_proprioception() vector

sensors property

Returns:

Name Type Description
dict

Keyword-mapped dictionary mapping sensor names to BaseSensor instances owned by this robot

urdf_path property

Returns:

Name Type Description
str

file path to the robot urdf file.

__init__(name, prim_path=None, class_id=None, uuid=None, scale=None, visible=True, fixed_base=False, visual_only=False, self_collisions=False, load_config=None, abilities=None, control_freq=None, controller_config=None, action_type='continuous', action_normalize=True, reset_joint_pos=None, obs_modalities='all', proprio_obs='default', **kwargs)

Parameters:

Name Type Description Default
name str

Name for the object. Names need to be unique per scene

required
prim_path None or str

global path in the stage to this object. If not specified, will automatically be created at /World/

None
class_id None or int

What class ID the object should be assigned in semantic segmentation rendering mode. If None, the ID will be inferred from this object's category.

None
uuid None or int

Unique unsigned-integer identifier to assign to this object (max 8-numbers). If None is specified, then it will be auto-generated

None
scale None or float or 3-array

if specified, sets either the uniform (float) or x,y,z (3-array) scale for this object. A single number corresponds to uniform scaling along the x,y,z axes, whereas a 3-array specifies per-axis scaling.

None
visible bool

whether to render this object or not in the stage

True
fixed_base bool

whether to fix the base of this object or not

False
visual_only bool

Whether this object should be visual only (and not collide with any other objects)

False
self_collisions bool

Whether to enable self collisions for this object

False
load_config None or dict

If specified, should contain keyword-mapped values that are relevant for loading this prim at runtime.

None
abilities None or dict

If specified, manually adds specific object states to this object. It should be a dict in the form of {ability: {param: value}} containing object abilities and parameters to pass to the object state instance constructor.

None
control_freq float

control frequency (in Hz) at which to control the object. If set to be None, simulator.import_object will automatically set the control frequency to be 1 / render_timestep by default.

None
controller_config None or dict

nested dictionary mapping controller name(s) to specific controller configurations for this object. This will override any default values specified by this class.

None
action_type str

one of {discrete, continuous} - what type of action space to use

'continuous'
action_normalize bool

whether to normalize inputted actions. This will override any default values specified by this class.

True
reset_joint_pos None or n-array

if specified, should be the joint positions that the object should be set to during a reset. If None (default), self.default_joint_pos will be used instead.

None
obs_modalities str or list of str

Observation modalities to use for this robot. Default is "all", which corresponds to all modalities being used. Otherwise, valid options should be part of omnigibson.sensors.ALL_SENSOR_MODALITIES.

'all'
proprio_obs str or list of str

proprioception observation key(s) to use for generating proprioceptive observations. If str, should be exactly "default" -- this results in the default proprioception observations being used, as defined by self.default_proprio_obs. See self._get_proprioception_dict for valid key choices

'default'
kwargs dict

Additional keyword arguments that are used for other super() calls from subclasses, allowing for flexible compositions of various object subclasses (e.g.: Robot is USDObject + ControllableObject).

{}
Source code in omnigibson/robots/robot_base.py
def __init__(
    self,
    # Shared kwargs in hierarchy
    name,
    prim_path=None,
    class_id=None,
    uuid=None,
    scale=None,
    visible=True,
    fixed_base=False,
    visual_only=False,
    self_collisions=False,
    load_config=None,

    # Unique to USDObject hierarchy
    abilities=None,

    # Unique to ControllableObject hierarchy
    control_freq=None,
    controller_config=None,
    action_type="continuous",
    action_normalize=True,
    reset_joint_pos=None,

    # Unique to this class
    obs_modalities="all",
    proprio_obs="default",

    **kwargs,
):
    """
    Args:
        name (str): Name for the object. Names need to be unique per scene
        prim_path (None or str): global path in the stage to this object. If not specified, will automatically be
            created at /World/<name>
        class_id (None or int): What class ID the object should be assigned in semantic segmentation rendering mode.
            If None, the ID will be inferred from this object's category.
        uuid (None or int): Unique unsigned-integer identifier to assign to this object (max 8-numbers).
            If None is specified, then it will be auto-generated
        scale (None or float or 3-array): if specified, sets either the uniform (float) or x,y,z (3-array) scale
            for this object. A single number corresponds to uniform scaling along the x,y,z axes, whereas a
            3-array specifies per-axis scaling.
        visible (bool): whether to render this object or not in the stage
        fixed_base (bool): whether to fix the base of this object or not
        visual_only (bool): Whether this object should be visual only (and not collide with any other objects)
        self_collisions (bool): Whether to enable self collisions for this object
        load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
            loading this prim at runtime.
        abilities (None or dict): If specified, manually adds specific object states to this object. It should be
            a dict in the form of {ability: {param: value}} containing object abilities and parameters to pass to
            the object state instance constructor.
        control_freq (float): control frequency (in Hz) at which to control the object. If set to be None,
            simulator.import_object will automatically set the control frequency to be 1 / render_timestep by default.
        controller_config (None or dict): nested dictionary mapping controller name(s) to specific controller
            configurations for this object. This will override any default values specified by this class.
        action_type (str): one of {discrete, continuous} - what type of action space to use
        action_normalize (bool): whether to normalize inputted actions. This will override any default values
            specified by this class.
        reset_joint_pos (None or n-array): if specified, should be the joint positions that the object should
            be set to during a reset. If None (default), self.default_joint_pos will be used instead.
        obs_modalities (str or list of str): Observation modalities to use for this robot. Default is "all", which
            corresponds to all modalities being used.
            Otherwise, valid options should be part of omnigibson.sensors.ALL_SENSOR_MODALITIES.
        proprio_obs (str or list of str): proprioception observation key(s) to use for generating proprioceptive
            observations. If str, should be exactly "default" -- this results in the default proprioception
            observations being used, as defined by self.default_proprio_obs. See self._get_proprioception_dict
            for valid key choices
        kwargs (dict): Additional keyword arguments that are used for other super() calls from subclasses, allowing
            for flexible compositions of various object subclasses (e.g.: Robot is USDObject + ControllableObject).
    """
    # Store inputs
    self._obs_modalities = obs_modalities if obs_modalities == "all" else \
        {obs_modalities} if isinstance(obs_modalities, str) else set(obs_modalities)              # this will get updated later when we fill in our sensors
    self._proprio_obs = self.default_proprio_obs if proprio_obs == "default" else list(proprio_obs)

    # Process abilities
    robot_abilities = {"robot": {}}
    abilities = robot_abilities if abilities is None else robot_abilities.update(abilities)

    # Initialize internal attributes that will be loaded later
    self._sensors = None                     # e.g.: scan sensor, vision sensor

    # If specified, make sure scale is uniform -- this is because non-uniform scale can result in non-matching
    # collision representations for parts of the robot that were optimized (e.g.: bounding sphere for wheels)
    assert scale is None or isinstance(scale, int) or isinstance(scale, float) or np.all(scale == scale[0]), \
        f"Robot scale must be uniform! Got: {scale}"

    # Run super init
    super().__init__(
        prim_path=prim_path,
        usd_path=self.usd_path,
        name=name,
        category=m.ROBOT_CATEGORY,
        class_id=class_id,
        uuid=uuid,
        scale=scale,
        visible=visible,
        fixed_base=fixed_base,
        visual_only=visual_only,
        self_collisions=self_collisions,
        prim_type=PrimType.RIGID,
        include_default_states=True,
        load_config=load_config,
        abilities=abilities,
        control_freq=control_freq,
        controller_config=controller_config,
        action_type=action_type,
        action_normalize=action_normalize,
        reset_joint_pos=reset_joint_pos,
        **kwargs,
    )

add_obs_modality(modality)

Adds observation modality @modality to this robot. Note: Should be one of omnigibson.sensors.ALL_SENSOR_MODALITIES

Parameters:

Name Type Description Default
modality str

Observation modality to add to this robot

required
Source code in omnigibson/robots/robot_base.py
def add_obs_modality(self, modality):
    """
    Adds observation modality @modality to this robot. Note: Should be one of omnigibson.sensors.ALL_SENSOR_MODALITIES

    Args:
        modality (str): Observation modality to add to this robot
    """
    # Iterate over all sensors we own, and if the requested modality is a part of its possible valid modalities,
    # then we add it
    for sensor in self._sensors.values():
        if modality in sensor.all_modalities:
            sensor.add_modality(modality=modality)

can_toggle(toggle_position, toggle_distance_threshold)

Returns True if the part of the robot that can toggle a toggleable is within the given range of a point corresponding to a toggle marker by default, we assume robot cannot toggle toggle markers

Parameters:

Name Type Description Default
toggle_position 3-array

(x,y,z) cartesian position values as a reference point for evaluating whether a toggle can occur

required
toggle_distance_threshold float

distance value below which a toggle is allowed

required

Returns:

Name Type Description
bool

True if the part of the robot that can toggle a toggleable is within the given range of a point corresponding to a toggle marker. By default, we assume robot cannot toggle toggle markers

Source code in omnigibson/robots/robot_base.py
def can_toggle(self, toggle_position, toggle_distance_threshold):
    """
    Returns True if the part of the robot that can toggle a toggleable is within the given range of a
    point corresponding to a toggle marker
    by default, we assume robot cannot toggle toggle markers

    Args:
        toggle_position (3-array): (x,y,z) cartesian position values as a reference point for evaluating
            whether a toggle can occur
        toggle_distance_threshold (float): distance value below which a toggle is allowed

    Returns:
        bool: True if the part of the robot that can toggle a toggleable is within the given range of a
            point corresponding to a toggle marker. By default, we assume robot cannot toggle toggle markers
    """
    return False

get_obs()

Grabs all observations from the robot. This is keyword-mapped based on each observation modality (e.g.: proprio, rgb, etc.)

Returns:

Name Type Description
dict

Keyword-mapped dictionary mapping observation modality names to observations (usually np arrays)

Source code in omnigibson/robots/robot_base.py
def get_obs(self):
    """
    Grabs all observations from the robot. This is keyword-mapped based on each observation modality
        (e.g.: proprio, rgb, etc.)

    Returns:
        dict: Keyword-mapped dictionary mapping observation modality names to
            observations (usually np arrays)
    """
    # Our sensors already know what observation modalities it has, so we simply iterate over all of them
    # and grab their observations, processing them into a flat dict
    obs_dict = dict()
    for sensor_name, sensor in self._sensors.items():
        sensor_obs = sensor.get_obs()
        for obs_modality, obs in sensor_obs.items():
            obs_dict[f"{sensor_name}_{obs_modality}"] = obs

    # Have to handle proprio separately since it's not an actual sensor
    if "proprio" in self._obs_modalities:
        obs_dict["proprio"] = self.get_proprioception()

    return obs_dict

get_proprioception()

Returns:

Type Description

n-array: numpy array of all robot-specific proprioceptive observations.

Source code in omnigibson/robots/robot_base.py
def get_proprioception(self):
    """
    Returns:
        n-array: numpy array of all robot-specific proprioceptive observations.
    """
    proprio_dict = self._get_proprioception_dict()
    return np.concatenate([proprio_dict[obs] for obs in self._proprio_obs])

remove_obs_modality(modality)

Remove observation modality @modality from this robot. Note: Should be one of omnigibson.sensors.ALL_SENSOR_MODALITIES

Parameters:

Name Type Description Default
modality str

Observation modality to remove from this robot

required
Source code in omnigibson/robots/robot_base.py
def remove_obs_modality(self, modality):
    """
    Remove observation modality @modality from this robot. Note: Should be one of
    omnigibson.sensors.ALL_SENSOR_MODALITIES

    Args:
        modality (str): Observation modality to remove from this robot
    """
    # Iterate over all sensors we own, and if the requested modality is a part of its possible valid modalities,
    # then we remove it
    for sensor in self._sensors.values():
        if modality in sensor.all_modalities:
            sensor.remove_modality(modality=modality)

visualize_sensors()

Renders this robot's key sensors, visualizing them via matplotlib plots

Source code in omnigibson/robots/robot_base.py
def visualize_sensors(self):
    """
    Renders this robot's key sensors, visualizing them via matplotlib plots
    """
    frames = dict()
    remaining_obs_modalities = deepcopy(self.obs_modalities)
    for sensor in self.sensors.values():
        obs = sensor.get_obs()
        sensor_frames = []
        if isinstance(sensor, VisionSensor):
            # We check for rgb, depth, normal, seg_instance
            for modality in ["rgb", "depth", "normal", "seg_instance"]:
                if modality in sensor.modalities:
                    ob = obs[modality]
                    if modality == "rgb":
                        # Ignore alpha channel, map to floats
                        ob = ob[:, :, :3] / 255.0
                    elif modality == "seg_instance":
                        # Map IDs to rgb
                        ob = segmentation_to_rgb(ob, N=256) / 255.0
                    elif modality == "normal":
                        # Re-map to 0 - 1 range
                        ob = (ob + 1.0) / 2.0
                    else:
                        # Depth, nothing to do here
                        pass
                    # Add this observation to our frames and remove the modality
                    sensor_frames.append((modality, ob))
                    remaining_obs_modalities -= {modality}
                else:
                    # Warn user that we didn't find this modality
                    print(f"Modality {modality} is not active in sensor {sensor.name}, skipping...")
        elif isinstance(sensor, ScanSensor):
            # We check for occupancy_grid
            occupancy_grid = obs.get("occupancy_grid", None)
            if occupancy_grid is not None:
                sensor_frames.append(("occupancy_grid", occupancy_grid))
                remaining_obs_modalities -= {"occupancy_grid"}

        # Map the sensor name to the frames for that sensor
        frames[sensor.name] = sensor_frames

    # Warn user that any remaining modalities are not able to be visualized
    if len(remaining_obs_modalities) > 0:
        print(f"Modalities: {remaining_obs_modalities} cannot be visualized, skipping...")

    # Write all the frames to a plot
    for sensor_name, sensor_frames in frames.items():
        n_sensor_frames = len(sensor_frames)
        if n_sensor_frames > 0:
            fig, axes = plt.subplots(nrows=1, ncols=n_sensor_frames)
            if n_sensor_frames == 1:
                axes = [axes]
            # Dump frames and set each subtitle
            for i, (modality, frame) in enumerate(sensor_frames):
                axes[i].imshow(frame)
                axes[i].set_title(modality)
                axes[i].set_axis_off()
            # Set title
            fig.suptitle(sensor_name)
            plt.show(block=False)

    # One final plot show so all the figures get rendered
    plt.show()