Skip to content

Commit 14c7936

Browse files
[Newton] Add initial design for Visualizers (#3979)
# Description Initial design to support multiple visualizers with Isaac Lab 3.0 The visualizers - OV Visualizer - Newton OpenGL Visualizer - Newton Rerun Visualizer Each visualizer comes with a config class. CLI newton_visualizer argument is removed. - Discussion on how we can best enable visualizer with CLI arg is ongoing. - Currently, visualizers can be selected in the code by visualizer_cfgs in simulation_cfg and the --headless arg disables any selected visualizer Also there is a new abstraction called scene data provider which manages per step data flow to visualizers. This is a pretty thin abstraction right now and subject to change with the renderers design and ov sdk. --------- Signed-off-by: matthewtrepte <mtrepte@nvidia.com> Co-authored-by: Antoine Richard <antoiner@nvidia.com>
1 parent c969b59 commit 14c7936

30 files changed

+1819
-428
lines changed

scripts/environments/random_agent.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@
1818
)
1919
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
2020
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
21-
parser.add_argument("--newton_visualizer", action="store_true", default=False, help="Enable Newton rendering.")
2221
# append AppLauncher cli args
2322
AppLauncher.add_app_launcher_args(parser)
2423
# parse the arguments
@@ -52,8 +51,8 @@ def main():
5251
device=args_cli.device,
5352
num_envs=args_cli.num_envs,
5453
use_fabric=not args_cli.disable_fabric,
55-
newton_visualizer=args_cli.newton_visualizer,
5654
)
55+
5756
# create environment
5857
env = gym.make(args_cli.task, cfg=env_cfg)
5958

scripts/environments/zero_agent.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@
1818
)
1919
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
2020
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
21-
parser.add_argument("--newton_visualizer", action="store_true", default=False, help="Enable Newton rendering.")
2221
# append AppLauncher cli args
2322
AppLauncher.add_app_launcher_args(parser)
2423
# parse the arguments
@@ -52,8 +51,8 @@ def main():
5251
device=args_cli.device,
5352
num_envs=args_cli.num_envs,
5453
use_fabric=not args_cli.disable_fabric,
55-
newton_visualizer=args_cli.newton_visualizer,
5654
)
55+
5756
# create environment
5857
env = gym.make(args_cli.task, cfg=env_cfg)
5958

scripts/reinforcement_learning/rl_games/play.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@
3232
help="When no checkpoint provided, use the last saved model. Otherwise use the best saved model.",
3333
)
3434
parser.add_argument("--real-time", action="store_true", default=False, help="Run in real-time, if possible.")
35-
parser.add_argument("--newton_visualizer", action="store_true", default=False, help="Enable Newton rendering.")
3635
# append AppLauncher cli args
3736
AppLauncher.add_app_launcher_args(parser)
3837
# parse the arguments
@@ -80,12 +79,9 @@ def main():
8079
task_name = args_cli.task.split(":")[-1]
8180
# parse env configuration
8281
env_cfg = parse_env_cfg(
83-
args_cli.task,
84-
device=args_cli.device,
85-
num_envs=args_cli.num_envs,
86-
use_fabric=not args_cli.disable_fabric,
87-
newton_visualizer=args_cli.newton_visualizer,
82+
args_cli.task, device=args_cli.device, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric
8883
)
84+
8985
agent_cfg = load_cfg_from_registry(args_cli.task, "rl_games_cfg_entry_point")
9086

9187
# specify directory for logging experiments

scripts/reinforcement_learning/rl_games/train.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,6 @@
3838
const=True,
3939
help="if toggled, this experiment will be tracked with Weights and Biases",
4040
)
41-
parser.add_argument("--newton_visualizer", action="store_true", default=False, help="Enable Newton rendering.")
4241
# append AppLauncher cli args
4342
AppLauncher.add_app_launcher_args(parser)
4443
# parse the arguments
@@ -90,7 +89,6 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg, agent_cfg: dict):
9089
# override configurations with non-hydra CLI arguments
9190
env_cfg.scene.num_envs = args_cli.num_envs if args_cli.num_envs is not None else env_cfg.scene.num_envs
9291
env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device
93-
env_cfg.sim.enable_newton_rendering = args_cli.newton_visualizer
9492

9593
# randomly sample a seed if seed = -1
9694
if args_cli.seed == -1:

scripts/reinforcement_learning/rsl_rl/play.py

Lines changed: 0 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,6 @@
3434
help="Use the pre-trained checkpoint from Nucleus.",
3535
)
3636
parser.add_argument("--real-time", action="store_true", default=False, help="Run in real-time, if possible.")
37-
parser.add_argument("--newton_visualizer", action="store_true", default=False, help="Enable Newton rendering.")
3837
# append RSL-RL cli arguments
3938
cli_args.add_rsl_rl_args(parser)
4039
# append AppLauncher cli args
@@ -96,7 +95,6 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg, agent_cfg: RslRlBaseRun
9695
# note: certain randomizations occur in the environment initialization so we set the seed here
9796
env_cfg.seed = agent_cfg.seed
9897
env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device
99-
env_cfg.sim.enable_newton_rendering = args_cli.newton_visualizer
10098

10199
# specify directory for logging experiments
102100
log_root_path = os.path.join("logs", "rsl_rl", agent_cfg.experiment_name)
@@ -117,19 +115,6 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg, agent_cfg: RslRlBaseRun
117115
# set the log directory for the environment (works for all environment types)
118116
env_cfg.log_dir = log_dir
119117

120-
# Set play mode for Newton viewer if using Newton visualizer
121-
if args_cli.newton_visualizer:
122-
# Set visualizer to play mode in Newton config
123-
if hasattr(env_cfg.sim, "newton_cfg"):
124-
env_cfg.sim.newton_cfg.visualizer_train_mode = False
125-
else:
126-
# Create newton_cfg if it doesn't exist
127-
from isaaclab.sim._impl.newton_manager_cfg import NewtonCfg
128-
129-
newton_cfg = NewtonCfg()
130-
newton_cfg.visualizer_train_mode = False
131-
env_cfg.sim.newton_cfg = newton_cfg
132-
133118
# create isaac environment
134119
env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None)
135120

scripts/reinforcement_learning/rsl_rl/train.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@
3232
"--distributed", action="store_true", default=False, help="Run training with multiple GPUs or nodes."
3333
)
3434
parser.add_argument("--export_io_descriptors", action="store_true", default=False, help="Export IO descriptors.")
35-
parser.add_argument("--newton_visualizer", action="store_true", default=False, help="Enable Newton rendering.")
3635
# append RSL-RL cli arguments
3736
cli_args.add_rsl_rl_args(parser)
3837
# append AppLauncher cli args
@@ -122,7 +121,6 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg, agent_cfg: RslRlBaseRun
122121
# note: certain randomizations occur in the environment initialization so we set the seed here
123122
env_cfg.seed = agent_cfg.seed
124123
env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device
125-
env_cfg.sim.enable_newton_rendering = args_cli.newton_visualizer
126124

127125
# multi-gpu training configuration
128126
if args_cli.distributed:

scripts/reinforcement_learning/sb3/play.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,6 @@
3939
default=False,
4040
help="Use a slower SB3 wrapper but keep all the extra training info.",
4141
)
42-
parser.add_argument("--newton_visualizer", action="store_true", default=False, help="Enable Newton rendering.")
4342
# append AppLauncher cli args
4443
AppLauncher.add_app_launcher_args(parser)
4544
# parse the arguments
@@ -83,11 +82,7 @@ def main():
8382
"""Play with stable-baselines agent."""
8483
# parse configuration
8584
env_cfg = parse_env_cfg(
86-
args_cli.task,
87-
device=args_cli.device,
88-
num_envs=args_cli.num_envs,
89-
use_fabric=not args_cli.disable_fabric,
90-
newton_visualizer=args_cli.newton_visualizer,
85+
args_cli.task, device=args_cli.device, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric
9186
)
9287

9388
task_name = args_cli.task.split(":")[-1]

scripts/reinforcement_learning/sb3/train.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@
3232
default=False,
3333
help="Use a slower SB3 wrapper but keep all the extra training info.",
3434
)
35-
parser.add_argument("--newton_visualizer", action="store_true", default=False, help="Enable Newton rendering.")
3635
# append AppLauncher cli args
3736
AppLauncher.add_app_launcher_args(parser)
3837
# parse the arguments
@@ -113,7 +112,6 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg, agent_cfg: dict):
113112
# note: certain randomizations occur in the environment initialization so we set the seed here
114113
env_cfg.seed = agent_cfg["seed"]
115114
env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device
116-
env_cfg.sim.enable_newton_rendering = args_cli.newton_visualizer
117115

118116
# directory for logging into
119117
run_info = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")

scripts/reinforcement_learning/skrl/play.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,6 @@
4646
help="The RL algorithm used for training the skrl agent.",
4747
)
4848
parser.add_argument("--real-time", action="store_true", default=False, help="Run in real-time, if possible.")
49-
parser.add_argument("--newton_visualizer", action="store_true", default=False, help="Enable Newton rendering.")
5049

5150
# append AppLauncher cli args
5251
AppLauncher.add_app_launcher_args(parser)
@@ -112,11 +111,7 @@ def main():
112111

113112
# parse configuration
114113
env_cfg = parse_env_cfg(
115-
args_cli.task,
116-
device=args_cli.device,
117-
num_envs=args_cli.num_envs,
118-
use_fabric=not args_cli.disable_fabric,
119-
newton_visualizer=args_cli.newton_visualizer,
114+
args_cli.task, device=args_cli.device, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric
120115
)
121116
try:
122117
experiment_cfg = load_cfg_from_registry(task_name, f"skrl_{algorithm}_cfg_entry_point")

scripts/reinforcement_learning/skrl/train.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,6 @@
4444
choices=["AMP", "PPO", "IPPO", "MAPPO"],
4545
help="The RL algorithm used for training the skrl agent.",
4646
)
47-
parser.add_argument("--newton_visualizer", action="store_true", default=False, help="Enable Newton rendering.")
4847

4948
# append AppLauncher cli args
5049
AppLauncher.add_app_launcher_args(parser)
@@ -113,7 +112,6 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg, agent_cfg: dict):
113112
# override configurations with non-hydra CLI arguments
114113
env_cfg.scene.num_envs = args_cli.num_envs if args_cli.num_envs is not None else env_cfg.scene.num_envs
115114
env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device
116-
env_cfg.sim.enable_newton_rendering = args_cli.newton_visualizer
117115

118116
# multi-gpu training config
119117
if args_cli.distributed:

0 commit comments

Comments
 (0)