We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 0530113 commit d5edb93Copy full SHA for d5edb93
autoparallel/export_module.py
@@ -134,13 +134,8 @@ def flattened_joint(*args):
134
output_gradients = []
135
for a, grad in zip(args, gradients):
136
if isinstance(a, torch.Tensor) and a.requires_grad:
137
- assert (
138
- grad is not None
139
- ), """\
140
-Found a parameter that did not receive a gradient.
141
-"This is most likely a bug, but if this needs to be supported please comment on this Github issue:
142
-https://github.com/pytorch/pytorch/issues/101192
143
-"""
+ if grad is None:
+ grad = torch.zeros_like(a)
144
output_gradients.append(grad)
145
else:
146
assert grad is None
0 commit comments