Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion diffsynth/core/vram/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,11 @@ def cast_to(self, weight, dtype, device):

def check_free_vram(self):
device = self.computation_device if self.computation_device != "npu" else "npu:0"
gpu_mem_state = getattr(torch, self.computation_device_type).mem_get_info(device)
device_module = getattr(torch, self.computation_device_type, None)
# Only CUDA and NPU have mem_get_info, for MPS/CPU assume enough memory
if device_module is None or not hasattr(device_module, "mem_get_info"):
return True
Comment on lines +69 to +70
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

The condition to check for mem_get_info can be simplified. hasattr works correctly even if device_module is None (it returns False). This makes the device_module is None check redundant.

Suggested change
if device_module is None or not hasattr(device_module, "mem_get_info"):
return True
if not hasattr(device_module, "mem_get_info"):
return True

gpu_mem_state = device_module.mem_get_info(device)
used_memory = (gpu_mem_state[1] - gpu_mem_state[0]) / (1024**3)
return used_memory < self.vram_limit

Expand Down
5 changes: 4 additions & 1 deletion diffsynth/diffusion/base_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,10 @@ def load_models_to_device(self, model_names):
for module in model.modules():
if hasattr(module, "offload"):
module.offload()
getattr(torch, self.device_type).empty_cache()
# Clear cache if available (only CUDA has empty_cache)
device_module = getattr(torch, self.device_type, None)
if device_module is not None and hasattr(device_module, "empty_cache"):
device_module.empty_cache()
Comment on lines +160 to +161
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

The condition to check for empty_cache can be simplified. hasattr returns False if the object is None, so the device_module is not None check is redundant.

Suggested change
if device_module is not None and hasattr(device_module, "empty_cache"):
device_module.empty_cache()
if hasattr(device_module, "empty_cache"):
device_module.empty_cache()

# onload models
for name, model in self.named_children():
if name in model_names:
Expand Down
5 changes: 4 additions & 1 deletion diffsynth/models/dinov3_image_encoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,10 @@ def __init__(self):
}
)

def forward(self, image, torch_dtype=torch.bfloat16, device="cuda"):
def forward(self, image, torch_dtype=torch.bfloat16, device=None):
# Use model's device if not specified
if device is None:
device = next(self.parameters()).device
inputs = self.processor(images=image, return_tensors="pt")
pixel_values = inputs["pixel_values"].to(dtype=torch_dtype, device=device)
bool_masked_pos = None
Expand Down
5 changes: 4 additions & 1 deletion diffsynth/models/siglip2_image_encoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,10 @@ def __init__(self):
}
)

def forward(self, image, torch_dtype=torch.bfloat16, device="cuda"):
def forward(self, image, torch_dtype=torch.bfloat16, device=None):
# Use model's device if not specified
if device is None:
device = next(self.parameters()).device
pixel_values = self.processor(images=[image], return_tensors="pt")["pixel_values"]
pixel_values = pixel_values.to(device=device, dtype=torch_dtype)
output_attentions = False
Expand Down