diff --git a/.jenkins/get_files_to_run.py b/.jenkins/get_files_to_run.py index bdf4562a82..e928228a5e 100644 --- a/.jenkins/get_files_to_run.py +++ b/.jenkins/get_files_to_run.py @@ -41,7 +41,7 @@ def add_to_shard(i, filename): all_other_files = all_files.copy() needs_multigpu = list( - filter(lambda x: get_needs_machine(x) == "linux.16xlarge.nvidia.gpu", all_files,) + filter(lambda x: get_needs_machine(x) == "4-gpu", all_files,) ) needs_a10g = list( filter(lambda x: get_needs_machine(x) == "linux.g5.4xlarge.nvidia.gpu", all_files,) diff --git a/.jenkins/metadata.json b/.jenkins/metadata.json index 44c16ccedb..7e886d6346 100644 --- a/.jenkins/metadata.json +++ b/.jenkins/metadata.json @@ -21,13 +21,13 @@ }, "intermediate_source/pipeline_tutorial.py": { "duration": 320, - "needs": "linux.16xlarge.nvidia.gpu" + "needs": "4-gpu" }, "beginner_source/blitz/data_parallel_tutorial.py": { - "needs": "linux.16xlarge.nvidia.gpu" + "needs": "4-gpu" }, "intermediate_source/model_parallel_tutorial.py": { - "needs": "linux.16xlarge.nvidia.gpu" + "needs": "4-gpu" }, "intermediate_source/torchrec_intro_tutorial.py": { "needs": "linux.g5.4xlarge.nvidia.gpu" @@ -75,6 +75,6 @@ "needs": "linux.g5.4xlarge.nvidia.gpu" }, "beginner_source/distributed_training_with_ray_tutorial.py": { - "needs": "linux.16xlarge.nvidia.gpu" + "needs": "4-gpu" } }