Generates a BEHAVIOR Task environment from a pre-defined configuration file.
It steps the environment 100 times with random actions sampled from the action space,
using the Gym interface, resetting it 10 times.
Source code in examples/environments/behavior_env_demo.py
| def main(random_selection=False, headless=False, short_exec=False):
"""
Generates a BEHAVIOR Task environment from a pre-defined configuration file.
It steps the environment 100 times with random actions sampled from the action space,
using the Gym interface, resetting it 10 times.
"""
logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
# Ask the user whether they want online object sampling or not
sampling_options = {
False: "Use a pre-sampled cached BEHAVIOR activity scene",
True: "Sample the BEHAVIOR activity in an online fashion",
}
should_sample = choose_from_options(options=sampling_options, name="online object sampling", random_selection=random_selection)
# Load the pre-selected configuration and set the online_sampling flag
config_filename = os.path.join(og.example_config_path, "fetch_behavior.yaml")
cfg = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader)
cfg["task"]["online_object_sampling"] = should_sample
# If we're online sampling, make sure global contacts are enabled so we can accurately detect kinematic changes
if should_sample:
gm.ENABLE_GLOBAL_CONTACT_REPORTING = True
# Load the environment
env = og.Environment(configs=cfg)
# Allow user to move camera more easily
og.sim.enable_viewer_camera_teleoperation()
# Run a simple loop and reset periodically
max_iterations = 10 if not short_exec else 1
for j in range(max_iterations):
logging.info("Resetting environment")
env.reset()
for i in range(100):
action = env.action_space.sample()
state, reward, done, info = env.step(action)
if done:
logging.info("Episode finished after {} timesteps".format(i + 1))
break
# Always close the environment at the end
env.close()
|