@@ -12,7 +12,7 @@ class Mlp(nn.Module):
1212 def __init__ (
1313 self ,
1414 in_channels : int ,
15- mlp_ratio : int = 4 ,
15+ mlp_ratio : int = 2 ,
1616 activation : str = "star_relu" ,
1717 dropout : float = 0.0 ,
1818 bias : bool = False ,
@@ -31,7 +31,7 @@ def __init__(
3131 ----------
3232 in_channels : int
3333 Number of input features.
34- mlp_ratio : int, default=4
34+ mlp_ratio : int, default=2
3535 Scaling factor to get the number hidden features from the `in_features`.
3636 activation : str, default="star_relu"
3737 The name of the activation function.
@@ -69,7 +69,7 @@ class MlpBlock(nn.Module):
6969 def __init__ (
7070 self ,
7171 in_channels : int ,
72- mlp_ratio : int = 4 ,
72+ mlp_ratio : int = 2 ,
7373 activation : str = "star_relu" ,
7474 activation_kwargs : Dict [str , Any ] = None ,
7575 dropout : float = 0.0 ,
@@ -85,7 +85,7 @@ def __init__(
8585 ----------
8686 in_channels : int
8787 Number of input features.
88- mlp_ratio : int, default=4
88+ mlp_ratio : int, default=2
8989 Scaling factor to get the number hidden features from the `in_features`.
9090 activation : str, default="star_relu"
9191 The name of the activation function.
0 commit comments