|
33 | 33 | from ._builder import build_model_with_cfg |
34 | 34 | from ._features_fx import register_notrace_function |
35 | 35 | from ._manipulate import named_apply |
36 | | -from ._registry import register_model |
| 36 | +from ._registry import register_model, generate_default_cfgs |
37 | 37 |
|
38 | 38 | __all__ = ['GlobalContextVit'] |
39 | 39 |
|
40 | 40 |
|
41 | | -def _cfg(url='', **kwargs): |
42 | | - return { |
43 | | - 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), |
44 | | - 'crop_pct': 0.875, 'interpolation': 'bicubic', |
45 | | - 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, |
46 | | - 'first_conv': 'stem.conv1', 'classifier': 'head.fc', |
47 | | - 'fixed_input_size': True, |
48 | | - **kwargs |
49 | | - } |
50 | | - |
51 | | - |
52 | | -default_cfgs = { |
53 | | - 'gcvit_xxtiny': _cfg( |
54 | | - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_xxtiny_224_nvidia-d1d86009.pth'), |
55 | | - 'gcvit_xtiny': _cfg( |
56 | | - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_xtiny_224_nvidia-274b92b7.pth'), |
57 | | - 'gcvit_tiny': _cfg( |
58 | | - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_tiny_224_nvidia-ac783954.pth'), |
59 | | - 'gcvit_small': _cfg( |
60 | | - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_small_224_nvidia-4e98afa2.pth'), |
61 | | - 'gcvit_base': _cfg( |
62 | | - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_base_224_nvidia-f009139b.pth'), |
63 | | -} |
64 | | - |
65 | | - |
66 | 41 | class MbConvBlock(nn.Module): |
67 | 42 | """ A depthwise separable / fused mbconv style residual block with SE, `no norm. |
68 | 43 | """ |
@@ -541,6 +516,31 @@ def _create_gcvit(variant, pretrained=False, **kwargs): |
541 | 516 | return model |
542 | 517 |
|
543 | 518 |
|
| 519 | +def _cfg(url='', **kwargs): |
| 520 | + return { |
| 521 | + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), |
| 522 | + 'crop_pct': 0.875, 'interpolation': 'bicubic', |
| 523 | + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, |
| 524 | + 'first_conv': 'stem.conv1', 'classifier': 'head.fc', |
| 525 | + 'fixed_input_size': True, |
| 526 | + **kwargs |
| 527 | + } |
| 528 | + |
| 529 | + |
| 530 | +default_cfgs = generate_default_cfgs({ |
| 531 | + 'gcvit_xxtiny.in1k': _cfg( |
| 532 | + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_xxtiny_224_nvidia-d1d86009.pth'), |
| 533 | + 'gcvit_xtiny.in1k': _cfg( |
| 534 | + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_xtiny_224_nvidia-274b92b7.pth'), |
| 535 | + 'gcvit_tiny.in1k': _cfg( |
| 536 | + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_tiny_224_nvidia-ac783954.pth'), |
| 537 | + 'gcvit_small.in1k': _cfg( |
| 538 | + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_small_224_nvidia-4e98afa2.pth'), |
| 539 | + 'gcvit_base.in1k': _cfg( |
| 540 | + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_base_224_nvidia-f009139b.pth'), |
| 541 | +}) |
| 542 | + |
| 543 | + |
544 | 544 | @register_model |
545 | 545 | def gcvit_xxtiny(pretrained=False, **kwargs) -> GlobalContextVit: |
546 | 546 | model_kwargs = dict( |
|
0 commit comments