Skip to content

dataset_object

DatasetObject

Bases: USDObject

DatasetObjects are instantiated from a USD file. It is an object that is assumed to come from an iG-supported dataset. These objects should contain additional metadata, including aggregate statistics across the object's category, e.g., avg dims, bounding boxes, masses, etc.

Source code in omnigibson/objects/dataset_object.py
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
class DatasetObject(USDObject):
    """
    DatasetObjects are instantiated from a USD file. It is an object that is assumed to come from an iG-supported
    dataset. These objects should contain additional metadata, including aggregate statistics across the
    object's category, e.g., avg dims, bounding boxes, masses, etc.
    """

    def __init__(
        self,
        name,
        usd_path=None,
        prim_path=None,
        category="object",
        model=None,
        class_id=None,
        uuid=None,
        scale=None,
        visible=True,
        fixed_base=False,
        visual_only=False,
        self_collisions=False,
        prim_type=PrimType.RIGID,
        load_config=None,
        abilities=None,
        include_default_states=True,
        bounding_box=None,
        fit_avg_dim_volume=False,
        in_rooms=None,
        bddl_object_scope=None,
        **kwargs,
    ):
        """
        Args:
            name (str): Name for the object. Names need to be unique per scene
            usd_path (None or str): If specified, global path to the USD file to load. Note that this will override
                @category + @model!
            prim_path (None or str): global path in the stage to this object. If not specified, will automatically be
                created at /World/<name>
            category (str): Category for the object. Defaults to "object".
            model (None or str): if @usd_path is not specified, then this must be specified in conjunction with
                @category to infer the usd filepath to load for this object, which evaluates to the following:

                    {og_dataset_path}/objects/{category}/{model}/usd/{model}.usd

            class_id (None or int): What class ID the object should be assigned in semantic segmentation rendering mode.
                If None, the ID will be inferred from this object's category.
            uuid (None or int): Unique unsigned-integer identifier to assign to this object (max 8-numbers).
                If None is specified, then it will be auto-generated
            scale (None or float or 3-array): if specified, sets either the uniform (float) or x,y,z (3-array) scale
                for this object. A single number corresponds to uniform scaling along the x,y,z axes, whereas a
                3-array specifies per-axis scaling.
            visible (bool): whether to render this object or not in the stage
            fixed_base (bool): whether to fix the base of this object or not
            visual_only (bool): Whether this object should be visual only (and not collide with any other objects)
            self_collisions (bool): Whether to enable self collisions for this object
            prim_type (PrimType): Which type of prim the object is, Valid options are: {PrimType.RIGID, PrimType.CLOTH}
            load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
                loading this prim at runtime.
            abilities (None or dict): If specified, manually adds specific object states to this object. It should be
                a dict in the form of {ability: {param: value}} containing object abilities and parameters to pass to
                the object state instance constructor.
            include_default_states (bool): whether to include the default object states from @get_default_states
            bounding_box (None or 3-array): If specified, will scale this object such that it fits in the desired
                (x,y,z) object-aligned bounding box. Note that EITHER @bounding_box or @scale may be specified
                -- not both!
            fit_avg_dim_volume (bool): whether to fit the object to have the same volume as the average dimension
                while keeping the aspect ratio. Note that if this is set, it will override both @scale and @bounding_box
            in_rooms (None or list): If specified, sets the rooms that this object should belong to
            bddl_object_scope (None or str): If specified, should set the BDDL object scope name, e.g. chip.n.04_2
            kwargs (dict): Additional keyword arguments that are used for other super() calls from subclasses, allowing
                for flexible compositions of various object subclasses (e.g.: Robot is USDObject + ControllableObject).
        """
        # Store variables
        self._in_rooms = in_rooms
        self._bddl_object_scope = bddl_object_scope

        # Info that will be filled in at runtime
        self.supporting_surfaces = None             # Dictionary mapping link names to surfaces represented by links

        # Make sure only one of bounding_box and scale are specified
        if bounding_box is not None and scale is not None:
            raise Exception("You cannot define both scale and bounding box size for an DatasetObject")

        # Add info to load config
        load_config = dict() if load_config is None else load_config
        load_config["bounding_box"] = bounding_box
        load_config["fit_avg_dim_volume"] = fit_avg_dim_volume

        # Infer the correct usd path to use
        if usd_path is None:
            assert model is not None, f"Either usd_path or model and category must be specified in order to create a" \
                                      f"DatasetObject!"
            usd_path = f"{gm.DATASET_PATH}/objects/{category}/{model}/usd/{model}.usd"

        # Post-process the usd path if we're generating a cloth object
        if prim_type == PrimType.CLOTH:
            assert usd_path.endswith(".usd"), f"usd_path [{usd_path}] is invalid."
            usd_path = usd_path[:-4] + "_cloth.usd"

        # Run super init
        super().__init__(
            prim_path=prim_path,
            usd_path=usd_path,
            name=name,
            category=category,
            class_id=class_id,
            uuid=uuid,
            scale=scale,
            visible=visible,
            fixed_base=fixed_base,
            visual_only=visual_only,
            self_collisions=self_collisions,
            prim_type=prim_type,
            include_default_states=include_default_states,
            load_config=load_config,
            abilities=abilities,
            **kwargs,
        )

    def load_supporting_surfaces(self):
        # Initialize dict of supporting surface info
        self.supporting_surfaces = {}

        # See if we have any height info -- if not, we can immediately return
        heights_info = self.heights_per_link
        if heights_info is None:
            return

        # TODO: Integrate images directly into usd file?
        # We loop over all the predicates and corresponding supported links in our heights info
        usd_dir = os.path.dirname(self._usd_path)
        for predicate, links in heights_info.items():
            height_maps = {}
            for link_name, heights in links.items():
                height_maps[link_name] = []
                for i, z_value in enumerate(heights):
                    # Get boolean birds-eye view xy-mask image for this surface
                    img_fname = os.path.join(usd_dir, "../misc", "height_maps_per_link", predicate, link_name, f"{i}.png")
                    xy_map = cv2.imread(img_fname, 0)
                    # Add this map to the supporting surfaces for this link and predicate combination
                    height_maps[link_name].append((z_value, xy_map))
            # Add this heights map to the overall supporting surfaces
            self.supporting_surfaces[predicate] = height_maps

    def sample_orientation(self):
        """
        Samples an orientation in quaternion (x,y,z,w) form

        Returns:
            4-array: (x,y,z,w) sampled quaternion orientation for this object, based on self.orientations
        """
        if self.orientations is None:
            raise ValueError("No orientation probabilities set")
        if len(self.orientations) == 0:
            # Set default value
            chosen_orientation = np.array([0, 0, 0, 1.0])
        else:
            probabilities = [o["prob"] for o in self.orientations.values()]
            probabilities = np.array(probabilities) / np.sum(probabilities)
            chosen_orientation = np.array(np.random.choice(list(self.orientations.values()), p=probabilities)["rotation"])

        # Randomize yaw from -pi to pi
        rot_num = np.random.uniform(-1, 1)
        rot_matrix = np.array(
            [
                [math.cos(math.pi * rot_num), -math.sin(math.pi * rot_num), 0.0],
                [math.sin(math.pi * rot_num), math.cos(math.pi * rot_num), 0.0],
                [0.0, 0.0, 1.0],
            ]
        )
        rotated_quat = T.mat2quat(rot_matrix @ T.quat2mat(chosen_orientation))
        return rotated_quat

    def _initialize(self):
        # Run super method first
        super()._initialize()

        # Apply any forced light intensity updates.
        if gm.FORCE_LIGHT_INTENSITY is not None:
            def recursive_light_update(child_prim):
                if "Light" in child_prim.GetPrimTypeInfo().GetTypeName():
                    child_prim.GetAttribute("intensity").Set(gm.FORCE_LIGHT_INTENSITY)

                for child_child_prim in child_prim.GetChildren():
                    recursive_light_update(child_child_prim)

            recursive_light_update(self._prim)

        # Apply any forced roughness updates
        for material in self.materials:
            material.reflection_roughness_texture_influence = 0.0
            material.reflection_roughness_constant = gm.FORCE_ROUGHNESS

        # Set the joint frictions based on category
        friction = SPECIAL_JOINT_FRICTIONS.get(self.category, DEFAULT_JOINT_FRICTION)
        for joint in self._joints.values():
            if joint.joint_type != JointType.JOINT_FIXED:
                joint.friction = friction

    def _load(self):
        if gm.USE_ENCRYPTED_ASSETS:
            # Create a temporary file to store the decrytped asset, load it, and then delete it.
            original_usd_path = self._usd_path
            encrypted_filename = original_usd_path.replace(".usd", ".encrypted.usd")
            decrypted_fd, decrypted_filename = tempfile.mkstemp(os.path.basename(original_usd_path), dir=og.tempdir)
            decrypt_file(encrypted_filename, decrypted_filename)
            self._usd_path = decrypted_filename
            prim = super()._load()
            os.close(decrypted_fd)
            # On Windows, Isaac Sim won't let go of the file until the prim is removed, so we can't delete it.
            if os.name == "posix":
                os.remove(decrypted_filename)
            self._usd_path = original_usd_path
            return prim
        else:
            return super()._load()

    def _post_load(self):
        # We run this post loading first before any others because we're modifying the load config that will be used
        # downstream
        # Set the scale of this prim according to its bounding box
        if self._load_config["fit_avg_dim_volume"]:
            # By default, we assume scale does not change if no avg obj specs are given, otherwise, scale accordingly
            scale = np.ones(3)
            if self.avg_obj_dims is not None and self.avg_obj_dims["size"] is not None:
                # Find the average volume, and scale accordingly relative to the native volume based on the bbox
                volume_ratio = np.product(self.avg_obj_dims["size"]) / np.product(self.native_bbox)
                size_ratio = np.cbrt(volume_ratio)
                scale *= size_ratio
        # Otherwise, if manual bounding box is specified, scale based on ratio between that and the native bbox
        elif self._load_config["bounding_box"] is not None:
            scale = self._load_config["bounding_box"] / self.native_bbox
        else:
            scale = np.ones(3) if self._load_config["scale"] is None else self._load_config["scale"]

        # Set this scale in the load config -- it will automatically scale the object during self.initialize()
        self._load_config["scale"] = scale

        # Load any supporting surfaces belonging to this object
        self.load_supporting_surfaces()

        # Run super last
        super()._post_load()

        if gm.USE_ENCRYPTED_ASSETS:
            # The loaded USD is from an already-deleted temporary file, so the asset paths for texture maps are wrong.
            # We explicitly provide the root_path to update all the asset paths: the asset paths are relative to the
            # original USD folder, i.e. <category>/<model>/usd.
            root_path = os.path.dirname(self._usd_path)
            for material in self.materials:
                material.shader_update_asset_paths_with_root_path(root_path)

        # Assign realistic density and mass based on average object category spec
        if self.avg_obj_dims is not None and self.avg_obj_dims["size"] is not None and self.avg_obj_dims["mass"] is not None:
            # Assume each link has the same density
            v_ratio = (np.product(self.native_bbox) * np.product(self.scale)) / np.product(self.avg_obj_dims["size"])
            mass = self.avg_obj_dims["mass"] * v_ratio
            if self._prim_type == PrimType.RIGID:
                density = mass / self.volume
                for link in self._links.values():
                    # If not a meta (virtual) link, set the density based on avg_obj_dims and a zero mass (ignored)
                    if link.has_collision_meshes:
                        link.mass = 0.0
                        link.density = density

            elif self._prim_type == PrimType.CLOTH:
                # Cloth cannot set density. Internally omni evenly distributes the mass to each particle
                mass = self.avg_obj_dims["mass"] * v_ratio
                self._links["base_link"].mass = mass

    def _update_texture_change(self, object_state):
        """
        Update the texture based on the given object_state. E.g. if object_state is Frozen, update the diffuse color
        to match the frozen state. If object_state is None, update the diffuse color to the default value. It attempts
        to load the cached texture map named DIFFUSE/albedo_[STATE_NAME].png. If the cached texture map does not exist,
        it modifies the current albedo map by adding and scaling the values. See @self._update_albedo_value for details.

        Args:
            object_state (BooleanState or None): the object state that the diffuse color should match to
        """
        # TODO: uncomment these once our dataset has the object state-conditioned texture maps
        # DEFAULT_ALBEDO_MAP_SUFFIX = frozenset({"DIFFUSE", "COMBINED", "albedo"})
        # state_name = object_state.__class__.__name__ if object_state is not None else None
        for material in self.materials:
            # texture_path = material.diffuse_texture
            # assert texture_path is not None, f"DatasetObject [{self.prim_path}] has invalid diffuse texture map."
            #
            # # Get updated texture file path for state.
            # texture_path_split = texture_path.split("/")
            # filedir, filename = "/".join(texture_path_split[:-1]), texture_path_split[-1]
            # assert filename[-4:] == ".png", f"Texture file {filename} does not end with .png"
            #
            # filename_split = filename[:-4].split("_")
            # # Check all three file names for backward compatibility.
            # if len(filename_split) > 0 and filename_split[-1] not in DEFAULT_ALBEDO_MAP_SUFFIX:
            #     filename_split.pop()
            # target_texture_path = f"{filedir}/{'_'.join(filename_split)}"
            # target_texture_path += f"_{state_name}.png" if state_name is not None else ".png"
            #
            # if os.path.exists(target_texture_path):
            #     # Since we are loading a pre-cached texture map, we need to reset the albedo value to the default
            #     self._update_albedo_value(None, material)
            #     if material.diffuse_texture != target_texture_path:
            #         material.diffuse_texture = target_texture_path
            # else:
            #     print(f"Warning: DatasetObject [{self.prim_path}] does not have texture map: "
            #           f"[{target_texture_path}]. Falling back to directly updating albedo value.")
            self._update_albedo_value(object_state, material)

    def set_bbox_center_position_orientation(self, position=None, orientation=None):
        """
        Sets the center of the object's bounding box with respect to the world's frame.

        Args:
            position (None or 3-array): The desired global (x,y,z) position. None means it will not be changed
            orientation (None or 4-array): The desired global (x,y,z,w) quaternion orientation.
                None means it will not be changed
        """
        if orientation is None:
            orientation = self.get_orientation()
        if position is not None:
            rotated_offset = T.pose_transform([0, 0, 0], orientation,
                                              self.scaled_bbox_center_in_base_frame, [0, 0, 0, 1])[0]
            position = position + rotated_offset
        self.set_position_orientation(position, orientation)

    @property
    def in_rooms(self):
        """
        Returns:
            None or list of str: If specified, room(s) that this object should belong to
        """
        return self._in_rooms

    @in_rooms.setter
    def in_rooms(self, rooms):
        """
        Sets which room(s) this object should belong to. If no rooms, then should set to None

        Args:
            rooms (None or list of str): If specified, the room(s) this object should belong to
        """
        # Store the value to the internal variable and also update the init kwargs accordingly
        self._init_info["args"]["in_rooms"] = rooms
        self._in_rooms = rooms

    @property
    def bddl_object_scope(self):
        """
        Returns:
            None or str: If specified, BDDL object scope name (e.g. chip.n.04_2) to assign to this object
        """
        return self._bddl_object_scope

    @bddl_object_scope.setter
    def bddl_object_scope(self, name):
        """
        Sets which BDDL object scope name for this object. If no name, then should set to None

        Args:
            name (None or str): If specified, BDDL object scope name (e.g. chip.n.04_2) to assign to this object
        """
        # Store the value to the internal variable and also update the init kwargs accordingly
        self._init_info["args"]["bddl_object_scope"] = name
        self._bddl_object_scope = name

    @property
    def native_bbox(self):
        """
        Get this object's native bounding box

        Returns:
            3-array: (x,y,z) bounding box
        """
        assert "ig:nativeBB" in self.property_names, \
            f"This dataset object '{self.name}' is expected to have native_bbox specified, but found none!"
        return np.array(self.get_attribute(attr="ig:nativeBB"))

    @property
    def base_link_offset(self):
        """
        Get this object's native base link offset

        Returns:
            3-array: (x,y,z) base link offset if it exists
        """
        return np.array(self.get_attribute(attr="ig:offsetBaseLink"))

    @property
    def metadata(self):
        """
        Gets this object's metadata, if it exists

        Returns:
            None or dict: Nested dictionary of object's metadata if it exists, else None
        """
        return self.get_custom_data().get("metadata", None)

    @property
    def heights_per_link(self):
        """
        Gets this object's heights per link information, if it exists

        Returns:
            None or dict: Nested dictionary of object's height per link information if it exists, else None
        """
        return self.get_custom_data().get("heights_per_link", None)

    @property
    def orientations(self):
        """
        Returns:
            None or dict: Possible orientation information for this object, if it exists. Otherwise, returns None
        """
        metadata = self.metadata
        return None if metadata is None else metadata.get("orientations", None)

    @property
    def scaled_bbox_center_in_base_frame(self):
        """
        where the base_link origin is wrt. the bounding box center. This allows us to place the model correctly
        since the joint transformations given in the scene USD are wrt. the bounding box center.
        We need to scale this offset as well.

        Returns:
            3-array: (x,y,z) location of bounding box, with respet to the base link's coordinate frame
        """
        return -self.scale * self.base_link_offset

    @property
    def native_link_bboxes(self):
        """
        Returns:
             dict: Keyword-mapped native bounding boxes for each link of this object
        """
        return None if self.metadata is None else self.metadata.get("link_bounding_boxes", None)

    @property
    def scales_in_link_frame(self):
        """
        Returns:
        dict: Keyword-mapped relative scales for each link of this object
        """
        scales = {self.root_link.body_name: self.scale}

        # We iterate through all links in this object, and check for any joint prims that exist
        # We traverse manually this way instead of accessing the self._joints dictionary, because
        # the dictionary only includes articulated joints and not fixed joints!
        for link in self._links.values():
            for prim in link.prim.GetChildren():
                if "joint" in prim.GetTypeName().lower():
                    # Grab relevant joint information
                    parent_name = prim.GetProperty("physics:body0").GetTargets()[0].pathString.split("/")[-1]
                    child_name = prim.GetProperty("physics:body1").GetTargets()[0].pathString.split("/")[-1]
                    if parent_name in scales and child_name not in scales:
                        scale_in_parent_lf = scales[parent_name]
                        # The location of the joint frame is scaled using the scale in the parent frame
                        quat0 = gf_quat_to_np_array(prim.GetAttribute("physics:localRot0").Get())[[1, 2, 3, 0]]
                        quat1 = gf_quat_to_np_array(prim.GetAttribute("physics:localRot1").Get())[[1, 2, 3, 0]]
                        # Invert the child link relationship, and multiply the two rotations together to get the final rotation
                        local_ori = T.quat_multiply(quaternion1=T.quat_inverse(quat1), quaternion0=quat0)
                        jnt_frame_rot = T.quat2mat(local_ori)
                        scale_in_child_lf = np.absolute(jnt_frame_rot.T @ np.array(scale_in_parent_lf))
                        scales[child_name] = scale_in_child_lf

        return scales

    def get_base_aligned_bbox(self, link_name=None, visual=False, xy_aligned=False, fallback_to_aabb=False, link_bbox_type="axis_aligned"):
        """
        Get a bounding box for this object that's axis-aligned in the object's base frame.

        Args:
            link_name (None or str): If specified, only get the bbox for the given link
            visual (bool): Whether to aggregate the bounding boxes from the visual meshes. Otherwise, will use
                collision meshes
            xy_aligned (bool): Whether to align the bounding box to the global XY-plane
            fallback_to_aabb (bool): If set and a link's info is not found, the (global-frame) AABB will be
                dynamically computed directly from omniverse
            link_bbox_type (str): Which type of link bbox to use, "axis_aligned" means the bounding box is axis-aligned
                to the link frame, "oriented" means the bounding box has the minimum volume

        Returns:
            4-tuple:
                - 3-array: (x,y,z) bbox center position in world frame
                - 3-array: (x,y,z,w) bbox quaternion orientation in world frame
                - 3-array: (x,y,z) bbox extent in world frame
                - 3-array: (x,y,z) bbox center in desired frame
        """
        assert self.prim_type == PrimType.RIGID, "get_base_aligned_bbox is only supported for rigid objects."
        bbox_type = "visual" if visual else "collision"

        # Get the base position transform.
        pos, orn = self.get_position_orientation()
        base_frame_to_world = T.pose2mat((pos, orn))

        # Compute the world-to-base frame transform.
        world_to_base_frame = trimesh.transformations.inverse_matrix(base_frame_to_world)

        # Grab the corners of all the different links' bounding boxes. We will later fit a bounding box to
        # this set of points to get our final, base-frame bounding box.
        points = []

        links = {link_name: self._links[link_name]} if link_name is not None else self._links
        for link_name, link in links.items():
            # If the link has no visual or collision meshes, we skip over it (based on the @visual flag)
            meshes = link.visual_meshes if visual else link.collision_meshes
            if len(meshes) == 0:
                continue

            # If the link has a bounding box annotation.
            if self.native_link_bboxes is not None and link_name in self.native_link_bboxes:
                # If a visual bounding box does not exist in the dictionary, try switching to collision.
                # We expect that every link has its collision bb annotated (or set to None if none exists).
                if bbox_type == "visual" and "visual" not in self.native_link_bboxes[link_name]:
                    log.debug(
                        "Falling back to collision bbox for object %s link %s since no visual bbox exists.",
                        self.name,
                        link_name,
                    )
                    bbox_type = "collision"

                # Check if the annotation is still missing.
                if bbox_type not in self.native_link_bboxes[link_name]:
                    raise ValueError(
                        "Could not find %s bounding box for object %s link %s" % (bbox_type, self.name, link_name)
                    )

                # Check if a mesh exists for this link. If None, the link is meshless, so we continue to the next link.
                # TODO: Because of encoding, may need to be UsdTokens.none, not None
                if self.native_link_bboxes[link_name][bbox_type] is None:
                    continue

                # Get the extent and transform.
                bb_data = self.native_link_bboxes[link_name][bbox_type][link_bbox_type]
                extent_in_bbox_frame = np.array(bb_data["extent"])
                bbox_to_link_origin = np.array(bb_data["transform"])

                # # Get the link's pose in the base frame.
                link_frame_to_world = T.pose2mat(link.get_position_orientation())
                link_frame_to_base_frame = world_to_base_frame @ link_frame_to_world

                # Scale the bounding box in link origin frame. Here we create a transform that first puts the bounding
                # box's vertices into the link frame, and then scales them to match the scale applied to this object.
                # Note that once scaled, the vertices of the bounding box do not necessarily form a cuboid anymore but
                # instead a parallelepiped. This is not a problem because we later fit a bounding box to the points,
                # this time in the object's base link frame.
                scale_in_link_frame = np.diag(np.concatenate([self.scales_in_link_frame[link_name], [1]]))
                bbox_to_scaled_link_origin = np.dot(scale_in_link_frame, bbox_to_link_origin)

                # Compute the bounding box vertices in the base frame.
                # bbox_to_link_com = np.dot(link_origin_to_link_com, bbox_to_scaled_link_origin)
                bbox_center_in_base_frame = np.dot(link_frame_to_base_frame, bbox_to_scaled_link_origin)
                vertices_in_base_frame = np.array(list(itertools.product((1, -1), repeat=3))) * (extent_in_bbox_frame / 2)

                # Add the points to our collection of points.
                points.extend(trimesh.transformations.transform_points(vertices_in_base_frame, bbox_center_in_base_frame))
            elif fallback_to_aabb:
                # If no BB annotation is available, get the AABB for this link.
                aabb_center, aabb_extent = BoundingBoxAPI.compute_center_extent(prim_path=link.prim_path)
                aabb_vertices_in_world = aabb_center + np.array(list(itertools.product((1, -1), repeat=3))) * (
                        aabb_extent / 2
                )
                aabb_vertices_in_base_frame = trimesh.transformations.transform_points(
                    aabb_vertices_in_world, world_to_base_frame
                )
                points.extend(aabb_vertices_in_base_frame)
            else:
                raise ValueError(
                    "Bounding box annotation missing for link: %s. Use fallback_to_aabb=True if you're okay with using "
                    "AABB as fallback." % link_name
                )

        if xy_aligned:
            # If the user requested an XY-plane aligned bbox, convert everything to that frame.
            # The desired frame is same as the base_com frame with its X/Y rotations removed.
            translate = trimesh.transformations.translation_from_matrix(base_frame_to_world)

            # To find the rotation that this transform does around the Z axis, we rotate the [1, 0, 0] vector by it
            # and then take the arctangent of its projection onto the XY plane.
            rotated_X_axis = base_frame_to_world[:3, 0]
            rotation_around_Z_axis = np.arctan2(rotated_X_axis[1], rotated_X_axis[0])
            xy_aligned_base_com_to_world = trimesh.transformations.compose_matrix(
                translate=translate, angles=[0, 0, rotation_around_Z_axis]
            )

            # We want to move our points to this frame as well.
            world_to_xy_aligned_base_com = trimesh.transformations.inverse_matrix(xy_aligned_base_com_to_world)
            base_com_to_xy_aligned_base_com = np.dot(world_to_xy_aligned_base_com, base_frame_to_world)
            points = trimesh.transformations.transform_points(points, base_com_to_xy_aligned_base_com)

            # Finally update our desired frame.
            desired_frame_to_world = xy_aligned_base_com_to_world
        else:
            # Default desired frame is base CoM frame.
            desired_frame_to_world = base_frame_to_world

        # TODO: Implement logic to allow tight bounding boxes that don't necessarily have to match the base frame.
        # All points are now in the desired frame: either the base CoM or the xy-plane-aligned base CoM.
        # Now fit a bounding box to all the points by taking the minimum/maximum in the desired frame.
        aabb_min_in_desired_frame = np.amin(points, axis=0)
        aabb_max_in_desired_frame = np.amax(points, axis=0)
        bbox_center_in_desired_frame = (aabb_min_in_desired_frame + aabb_max_in_desired_frame) / 2
        bbox_extent_in_desired_frame = aabb_max_in_desired_frame - aabb_min_in_desired_frame

        # Transform the center to the world frame.
        bbox_center_in_world = trimesh.transformations.transform_points(
            [bbox_center_in_desired_frame], desired_frame_to_world
        )[0]
        bbox_orn_in_world = Rotation.from_matrix(desired_frame_to_world[:3, :3]).as_quat()

        return bbox_center_in_world, bbox_orn_in_world, bbox_extent_in_desired_frame, bbox_center_in_desired_frame

    @property
    def avg_obj_dims(self):
        """
        Get the average object dimensions for this object, based on its category

        Returns:
            None or dict: Average object information based on its category
        """
        return AVERAGE_CATEGORY_SPECS.get(self.category, None)

    def _create_prim_with_same_kwargs(self, prim_path, name, load_config):
        # Add additional kwargs (fit_avg_dim_volume and bounding_box are already captured in load_config)
        return self.__class__(
            prim_path=prim_path,
            usd_path=self._usd_path,
            name=name,
            category=self.category,
            class_id=self.class_id,
            scale=self.scale,
            visible=self.visible,
            fixed_base=self.fixed_base,
            visual_only=self._visual_only,
            prim_type=self._prim_type,
            load_config=load_config,
            abilities=self._abilities,
            in_rooms=self.in_rooms,
            bddl_object_scope=self.bddl_object_scope,
        )

avg_obj_dims property

Get the average object dimensions for this object, based on its category

Returns:

Type Description

None or dict: Average object information based on its category

Get this object's native base link offset

Returns:

Type Description

3-array: (x,y,z) base link offset if it exists

bddl_object_scope property writable

Returns:

Type Description

None or str: If specified, BDDL object scope name (e.g. chip.n.04_2) to assign to this object

Gets this object's heights per link information, if it exists

Returns:

Type Description

None or dict: Nested dictionary of object's height per link information if it exists, else None

in_rooms property writable

Returns:

Type Description

None or list of str: If specified, room(s) that this object should belong to

metadata property

Gets this object's metadata, if it exists

Returns:

Type Description

None or dict: Nested dictionary of object's metadata if it exists, else None

native_bbox property

Get this object's native bounding box

Returns:

Type Description

3-array: (x,y,z) bounding box

Returns:

Name Type Description
dict

Keyword-mapped native bounding boxes for each link of this object

orientations property

Returns:

Type Description

None or dict: Possible orientation information for this object, if it exists. Otherwise, returns None

scaled_bbox_center_in_base_frame property

where the base_link origin is wrt. the bounding box center. This allows us to place the model correctly since the joint transformations given in the scene USD are wrt. the bounding box center. We need to scale this offset as well.

Returns:

Type Description

3-array: (x,y,z) location of bounding box, with respet to the base link's coordinate frame

dict: Keyword-mapped relative scales for each link of this object

__init__(name, usd_path=None, prim_path=None, category='object', model=None, class_id=None, uuid=None, scale=None, visible=True, fixed_base=False, visual_only=False, self_collisions=False, prim_type=PrimType.RIGID, load_config=None, abilities=None, include_default_states=True, bounding_box=None, fit_avg_dim_volume=False, in_rooms=None, bddl_object_scope=None, **kwargs)

Parameters:

Name Type Description Default
name str

Name for the object. Names need to be unique per scene

required
usd_path None or str

If specified, global path to the USD file to load. Note that this will override @category + @model!

None
prim_path None or str

global path in the stage to this object. If not specified, will automatically be created at /World/

None
category str

Category for the object. Defaults to "object".

'object'
model None or str

if @usd_path is not specified, then this must be specified in conjunction with @category to infer the usd filepath to load for this object, which evaluates to the following:

{og_dataset_path}/objects/{category}/{model}/usd/{model}.usd
None
class_id None or int

What class ID the object should be assigned in semantic segmentation rendering mode. If None, the ID will be inferred from this object's category.

None
uuid None or int

Unique unsigned-integer identifier to assign to this object (max 8-numbers). If None is specified, then it will be auto-generated

None
scale None or float or 3-array

if specified, sets either the uniform (float) or x,y,z (3-array) scale for this object. A single number corresponds to uniform scaling along the x,y,z axes, whereas a 3-array specifies per-axis scaling.

None
visible bool

whether to render this object or not in the stage

True
fixed_base bool

whether to fix the base of this object or not

False
visual_only bool

Whether this object should be visual only (and not collide with any other objects)

False
self_collisions bool

Whether to enable self collisions for this object

False
prim_type PrimType

Which type of prim the object is, Valid options are: {PrimType.RIGID, PrimType.CLOTH}

PrimType.RIGID
load_config None or dict

If specified, should contain keyword-mapped values that are relevant for loading this prim at runtime.

None
abilities None or dict

If specified, manually adds specific object states to this object. It should be a dict in the form of {ability: {param: value}} containing object abilities and parameters to pass to the object state instance constructor.

None
include_default_states bool

whether to include the default object states from @get_default_states

True
bounding_box None or 3-array

If specified, will scale this object such that it fits in the desired (x,y,z) object-aligned bounding box. Note that EITHER @bounding_box or @scale may be specified -- not both!

None
fit_avg_dim_volume bool

whether to fit the object to have the same volume as the average dimension while keeping the aspect ratio. Note that if this is set, it will override both @scale and @bounding_box

False
in_rooms None or list

If specified, sets the rooms that this object should belong to

None
bddl_object_scope None or str

If specified, should set the BDDL object scope name, e.g. chip.n.04_2

None
kwargs dict

Additional keyword arguments that are used for other super() calls from subclasses, allowing for flexible compositions of various object subclasses (e.g.: Robot is USDObject + ControllableObject).

{}
Source code in omnigibson/objects/dataset_object.py
def __init__(
    self,
    name,
    usd_path=None,
    prim_path=None,
    category="object",
    model=None,
    class_id=None,
    uuid=None,
    scale=None,
    visible=True,
    fixed_base=False,
    visual_only=False,
    self_collisions=False,
    prim_type=PrimType.RIGID,
    load_config=None,
    abilities=None,
    include_default_states=True,
    bounding_box=None,
    fit_avg_dim_volume=False,
    in_rooms=None,
    bddl_object_scope=None,
    **kwargs,
):
    """
    Args:
        name (str): Name for the object. Names need to be unique per scene
        usd_path (None or str): If specified, global path to the USD file to load. Note that this will override
            @category + @model!
        prim_path (None or str): global path in the stage to this object. If not specified, will automatically be
            created at /World/<name>
        category (str): Category for the object. Defaults to "object".
        model (None or str): if @usd_path is not specified, then this must be specified in conjunction with
            @category to infer the usd filepath to load for this object, which evaluates to the following:

                {og_dataset_path}/objects/{category}/{model}/usd/{model}.usd

        class_id (None or int): What class ID the object should be assigned in semantic segmentation rendering mode.
            If None, the ID will be inferred from this object's category.
        uuid (None or int): Unique unsigned-integer identifier to assign to this object (max 8-numbers).
            If None is specified, then it will be auto-generated
        scale (None or float or 3-array): if specified, sets either the uniform (float) or x,y,z (3-array) scale
            for this object. A single number corresponds to uniform scaling along the x,y,z axes, whereas a
            3-array specifies per-axis scaling.
        visible (bool): whether to render this object or not in the stage
        fixed_base (bool): whether to fix the base of this object or not
        visual_only (bool): Whether this object should be visual only (and not collide with any other objects)
        self_collisions (bool): Whether to enable self collisions for this object
        prim_type (PrimType): Which type of prim the object is, Valid options are: {PrimType.RIGID, PrimType.CLOTH}
        load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
            loading this prim at runtime.
        abilities (None or dict): If specified, manually adds specific object states to this object. It should be
            a dict in the form of {ability: {param: value}} containing object abilities and parameters to pass to
            the object state instance constructor.
        include_default_states (bool): whether to include the default object states from @get_default_states
        bounding_box (None or 3-array): If specified, will scale this object such that it fits in the desired
            (x,y,z) object-aligned bounding box. Note that EITHER @bounding_box or @scale may be specified
            -- not both!
        fit_avg_dim_volume (bool): whether to fit the object to have the same volume as the average dimension
            while keeping the aspect ratio. Note that if this is set, it will override both @scale and @bounding_box
        in_rooms (None or list): If specified, sets the rooms that this object should belong to
        bddl_object_scope (None or str): If specified, should set the BDDL object scope name, e.g. chip.n.04_2
        kwargs (dict): Additional keyword arguments that are used for other super() calls from subclasses, allowing
            for flexible compositions of various object subclasses (e.g.: Robot is USDObject + ControllableObject).
    """
    # Store variables
    self._in_rooms = in_rooms
    self._bddl_object_scope = bddl_object_scope

    # Info that will be filled in at runtime
    self.supporting_surfaces = None             # Dictionary mapping link names to surfaces represented by links

    # Make sure only one of bounding_box and scale are specified
    if bounding_box is not None and scale is not None:
        raise Exception("You cannot define both scale and bounding box size for an DatasetObject")

    # Add info to load config
    load_config = dict() if load_config is None else load_config
    load_config["bounding_box"] = bounding_box
    load_config["fit_avg_dim_volume"] = fit_avg_dim_volume

    # Infer the correct usd path to use
    if usd_path is None:
        assert model is not None, f"Either usd_path or model and category must be specified in order to create a" \
                                  f"DatasetObject!"
        usd_path = f"{gm.DATASET_PATH}/objects/{category}/{model}/usd/{model}.usd"

    # Post-process the usd path if we're generating a cloth object
    if prim_type == PrimType.CLOTH:
        assert usd_path.endswith(".usd"), f"usd_path [{usd_path}] is invalid."
        usd_path = usd_path[:-4] + "_cloth.usd"

    # Run super init
    super().__init__(
        prim_path=prim_path,
        usd_path=usd_path,
        name=name,
        category=category,
        class_id=class_id,
        uuid=uuid,
        scale=scale,
        visible=visible,
        fixed_base=fixed_base,
        visual_only=visual_only,
        self_collisions=self_collisions,
        prim_type=prim_type,
        include_default_states=include_default_states,
        load_config=load_config,
        abilities=abilities,
        **kwargs,
    )

get_base_aligned_bbox(link_name=None, visual=False, xy_aligned=False, fallback_to_aabb=False, link_bbox_type='axis_aligned')

Get a bounding box for this object that's axis-aligned in the object's base frame.

Parameters:

Name Type Description Default
link_name None or str

If specified, only get the bbox for the given link

None
visual bool

Whether to aggregate the bounding boxes from the visual meshes. Otherwise, will use collision meshes

False
xy_aligned bool

Whether to align the bounding box to the global XY-plane

False
fallback_to_aabb bool

If set and a link's info is not found, the (global-frame) AABB will be dynamically computed directly from omniverse

False
link_bbox_type str

Which type of link bbox to use, "axis_aligned" means the bounding box is axis-aligned to the link frame, "oriented" means the bounding box has the minimum volume

'axis_aligned'

Returns:

Type Description

4-tuple: - 3-array: (x,y,z) bbox center position in world frame - 3-array: (x,y,z,w) bbox quaternion orientation in world frame - 3-array: (x,y,z) bbox extent in world frame - 3-array: (x,y,z) bbox center in desired frame

Source code in omnigibson/objects/dataset_object.py
def get_base_aligned_bbox(self, link_name=None, visual=False, xy_aligned=False, fallback_to_aabb=False, link_bbox_type="axis_aligned"):
    """
    Get a bounding box for this object that's axis-aligned in the object's base frame.

    Args:
        link_name (None or str): If specified, only get the bbox for the given link
        visual (bool): Whether to aggregate the bounding boxes from the visual meshes. Otherwise, will use
            collision meshes
        xy_aligned (bool): Whether to align the bounding box to the global XY-plane
        fallback_to_aabb (bool): If set and a link's info is not found, the (global-frame) AABB will be
            dynamically computed directly from omniverse
        link_bbox_type (str): Which type of link bbox to use, "axis_aligned" means the bounding box is axis-aligned
            to the link frame, "oriented" means the bounding box has the minimum volume

    Returns:
        4-tuple:
            - 3-array: (x,y,z) bbox center position in world frame
            - 3-array: (x,y,z,w) bbox quaternion orientation in world frame
            - 3-array: (x,y,z) bbox extent in world frame
            - 3-array: (x,y,z) bbox center in desired frame
    """
    assert self.prim_type == PrimType.RIGID, "get_base_aligned_bbox is only supported for rigid objects."
    bbox_type = "visual" if visual else "collision"

    # Get the base position transform.
    pos, orn = self.get_position_orientation()
    base_frame_to_world = T.pose2mat((pos, orn))

    # Compute the world-to-base frame transform.
    world_to_base_frame = trimesh.transformations.inverse_matrix(base_frame_to_world)

    # Grab the corners of all the different links' bounding boxes. We will later fit a bounding box to
    # this set of points to get our final, base-frame bounding box.
    points = []

    links = {link_name: self._links[link_name]} if link_name is not None else self._links
    for link_name, link in links.items():
        # If the link has no visual or collision meshes, we skip over it (based on the @visual flag)
        meshes = link.visual_meshes if visual else link.collision_meshes
        if len(meshes) == 0:
            continue

        # If the link has a bounding box annotation.
        if self.native_link_bboxes is not None and link_name in self.native_link_bboxes:
            # If a visual bounding box does not exist in the dictionary, try switching to collision.
            # We expect that every link has its collision bb annotated (or set to None if none exists).
            if bbox_type == "visual" and "visual" not in self.native_link_bboxes[link_name]:
                log.debug(
                    "Falling back to collision bbox for object %s link %s since no visual bbox exists.",
                    self.name,
                    link_name,
                )
                bbox_type = "collision"

            # Check if the annotation is still missing.
            if bbox_type not in self.native_link_bboxes[link_name]:
                raise ValueError(
                    "Could not find %s bounding box for object %s link %s" % (bbox_type, self.name, link_name)
                )

            # Check if a mesh exists for this link. If None, the link is meshless, so we continue to the next link.
            # TODO: Because of encoding, may need to be UsdTokens.none, not None
            if self.native_link_bboxes[link_name][bbox_type] is None:
                continue

            # Get the extent and transform.
            bb_data = self.native_link_bboxes[link_name][bbox_type][link_bbox_type]
            extent_in_bbox_frame = np.array(bb_data["extent"])
            bbox_to_link_origin = np.array(bb_data["transform"])

            # # Get the link's pose in the base frame.
            link_frame_to_world = T.pose2mat(link.get_position_orientation())
            link_frame_to_base_frame = world_to_base_frame @ link_frame_to_world

            # Scale the bounding box in link origin frame. Here we create a transform that first puts the bounding
            # box's vertices into the link frame, and then scales them to match the scale applied to this object.
            # Note that once scaled, the vertices of the bounding box do not necessarily form a cuboid anymore but
            # instead a parallelepiped. This is not a problem because we later fit a bounding box to the points,
            # this time in the object's base link frame.
            scale_in_link_frame = np.diag(np.concatenate([self.scales_in_link_frame[link_name], [1]]))
            bbox_to_scaled_link_origin = np.dot(scale_in_link_frame, bbox_to_link_origin)

            # Compute the bounding box vertices in the base frame.
            # bbox_to_link_com = np.dot(link_origin_to_link_com, bbox_to_scaled_link_origin)
            bbox_center_in_base_frame = np.dot(link_frame_to_base_frame, bbox_to_scaled_link_origin)
            vertices_in_base_frame = np.array(list(itertools.product((1, -1), repeat=3))) * (extent_in_bbox_frame / 2)

            # Add the points to our collection of points.
            points.extend(trimesh.transformations.transform_points(vertices_in_base_frame, bbox_center_in_base_frame))
        elif fallback_to_aabb:
            # If no BB annotation is available, get the AABB for this link.
            aabb_center, aabb_extent = BoundingBoxAPI.compute_center_extent(prim_path=link.prim_path)
            aabb_vertices_in_world = aabb_center + np.array(list(itertools.product((1, -1), repeat=3))) * (
                    aabb_extent / 2
            )
            aabb_vertices_in_base_frame = trimesh.transformations.transform_points(
                aabb_vertices_in_world, world_to_base_frame
            )
            points.extend(aabb_vertices_in_base_frame)
        else:
            raise ValueError(
                "Bounding box annotation missing for link: %s. Use fallback_to_aabb=True if you're okay with using "
                "AABB as fallback." % link_name
            )

    if xy_aligned:
        # If the user requested an XY-plane aligned bbox, convert everything to that frame.
        # The desired frame is same as the base_com frame with its X/Y rotations removed.
        translate = trimesh.transformations.translation_from_matrix(base_frame_to_world)

        # To find the rotation that this transform does around the Z axis, we rotate the [1, 0, 0] vector by it
        # and then take the arctangent of its projection onto the XY plane.
        rotated_X_axis = base_frame_to_world[:3, 0]
        rotation_around_Z_axis = np.arctan2(rotated_X_axis[1], rotated_X_axis[0])
        xy_aligned_base_com_to_world = trimesh.transformations.compose_matrix(
            translate=translate, angles=[0, 0, rotation_around_Z_axis]
        )

        # We want to move our points to this frame as well.
        world_to_xy_aligned_base_com = trimesh.transformations.inverse_matrix(xy_aligned_base_com_to_world)
        base_com_to_xy_aligned_base_com = np.dot(world_to_xy_aligned_base_com, base_frame_to_world)
        points = trimesh.transformations.transform_points(points, base_com_to_xy_aligned_base_com)

        # Finally update our desired frame.
        desired_frame_to_world = xy_aligned_base_com_to_world
    else:
        # Default desired frame is base CoM frame.
        desired_frame_to_world = base_frame_to_world

    # TODO: Implement logic to allow tight bounding boxes that don't necessarily have to match the base frame.
    # All points are now in the desired frame: either the base CoM or the xy-plane-aligned base CoM.
    # Now fit a bounding box to all the points by taking the minimum/maximum in the desired frame.
    aabb_min_in_desired_frame = np.amin(points, axis=0)
    aabb_max_in_desired_frame = np.amax(points, axis=0)
    bbox_center_in_desired_frame = (aabb_min_in_desired_frame + aabb_max_in_desired_frame) / 2
    bbox_extent_in_desired_frame = aabb_max_in_desired_frame - aabb_min_in_desired_frame

    # Transform the center to the world frame.
    bbox_center_in_world = trimesh.transformations.transform_points(
        [bbox_center_in_desired_frame], desired_frame_to_world
    )[0]
    bbox_orn_in_world = Rotation.from_matrix(desired_frame_to_world[:3, :3]).as_quat()

    return bbox_center_in_world, bbox_orn_in_world, bbox_extent_in_desired_frame, bbox_center_in_desired_frame

sample_orientation()

Samples an orientation in quaternion (x,y,z,w) form

Returns:

Type Description

4-array: (x,y,z,w) sampled quaternion orientation for this object, based on self.orientations

Source code in omnigibson/objects/dataset_object.py
def sample_orientation(self):
    """
    Samples an orientation in quaternion (x,y,z,w) form

    Returns:
        4-array: (x,y,z,w) sampled quaternion orientation for this object, based on self.orientations
    """
    if self.orientations is None:
        raise ValueError("No orientation probabilities set")
    if len(self.orientations) == 0:
        # Set default value
        chosen_orientation = np.array([0, 0, 0, 1.0])
    else:
        probabilities = [o["prob"] for o in self.orientations.values()]
        probabilities = np.array(probabilities) / np.sum(probabilities)
        chosen_orientation = np.array(np.random.choice(list(self.orientations.values()), p=probabilities)["rotation"])

    # Randomize yaw from -pi to pi
    rot_num = np.random.uniform(-1, 1)
    rot_matrix = np.array(
        [
            [math.cos(math.pi * rot_num), -math.sin(math.pi * rot_num), 0.0],
            [math.sin(math.pi * rot_num), math.cos(math.pi * rot_num), 0.0],
            [0.0, 0.0, 1.0],
        ]
    )
    rotated_quat = T.mat2quat(rot_matrix @ T.quat2mat(chosen_orientation))
    return rotated_quat

set_bbox_center_position_orientation(position=None, orientation=None)

Sets the center of the object's bounding box with respect to the world's frame.

Parameters:

Name Type Description Default
position None or 3-array

The desired global (x,y,z) position. None means it will not be changed

None
orientation None or 4-array

The desired global (x,y,z,w) quaternion orientation. None means it will not be changed

None
Source code in omnigibson/objects/dataset_object.py
def set_bbox_center_position_orientation(self, position=None, orientation=None):
    """
    Sets the center of the object's bounding box with respect to the world's frame.

    Args:
        position (None or 3-array): The desired global (x,y,z) position. None means it will not be changed
        orientation (None or 4-array): The desired global (x,y,z,w) quaternion orientation.
            None means it will not be changed
    """
    if orientation is None:
        orientation = self.get_orientation()
    if position is not None:
        rotated_offset = T.pose_transform([0, 0, 0], orientation,
                                          self.scaled_bbox_center_in_base_frame, [0, 0, 0, 1])[0]
        position = position + rotated_offset
    self.set_position_orientation(position, orientation)