@@ -76,7 +76,7 @@ def comma_separated_string_to_integer_list(s):
7676
7777def saturating_sigmoid (x ):
7878 """Saturating sigmoid: 1.2 * sigmoid(x) - 0.1 cut to [0, 1]."""
79- with tf .name_scope ("saturating_sigmoid" , [x ]):
79+ with tf .name_scope ("saturating_sigmoid" , values = [x ]):
8080 y = tf .sigmoid (x )
8181 return tf .minimum (1.0 , tf .maximum (0.0 , 1.2 * y - 0.1 ))
8282
@@ -173,7 +173,7 @@ def shakeshake(xs, equal_grad=False):
173173
174174def convert_rgb_to_real (x ):
175175 """Conversion of pixel values to real numbers."""
176- with tf .name_scope ("rgb_to_real" , [x ]):
176+ with tf .name_scope ("rgb_to_real" , values = [x ]):
177177 x = tf .to_float (x )
178178 # Use the formula (value/128) - 1 to convert each channel value into a
179179 # real number in the range -1 to 1.
@@ -794,7 +794,7 @@ def subseparable_conv_block(inputs, filters, dilation_rates_and_kernel_sizes,
794794
795795def pool (inputs , window_size , pooling_type , padding , strides = (1 , 1 )):
796796 """Pooling (supports "LEFT")."""
797- with tf .name_scope ("pool" , [inputs ]):
797+ with tf .name_scope ("pool" , values = [inputs ]):
798798 static_shape = inputs .get_shape ()
799799 if not static_shape or len (static_shape ) != 4 :
800800 raise ValueError ("Inputs to conv must have statically known rank 4." )
@@ -949,7 +949,7 @@ def simple_attention(target, source, bias=None):
949949 Returns:
950950 a `Tensor` with same shape as `target`
951951 """
952- with tf .name_scope ("simple_attention" , [target , source ]):
952+ with tf .name_scope ("simple_attention" , values = [target , source ]):
953953 target_shape = shape_list (target )
954954 source_shape = shape_list (source )
955955 target = tf .reshape (
@@ -1515,7 +1515,7 @@ def pad_to_same_length(x, y, final_length_divisible_by=1, axis=1):
15151515 """Pad tensors x and y on axis 1 so that they have the same length."""
15161516 if axis not in [1 , 2 ]:
15171517 raise ValueError ("Only axis=1 and axis=2 supported for now." )
1518- with tf .name_scope ("pad_to_same_length" , [x , y ]):
1518+ with tf .name_scope ("pad_to_same_length" , values = [x , y ]):
15191519 x_length = shape_list (x )[axis ]
15201520 y_length = shape_list (y )[axis ]
15211521 max_length = tf .maximum (x_length , y_length )
@@ -1550,7 +1550,7 @@ def padding_list(length_diff, arg):
15501550
15511551def pad_with_zeros (logits , labels ):
15521552 """Pad labels on the length dimension to match logits length."""
1553- with tf .name_scope ("pad_with_zeros" , [logits , labels ]):
1553+ with tf .name_scope ("pad_with_zeros" , values = [logits , labels ]):
15541554 logits , labels = pad_to_same_length (logits , labels )
15551555 if len (labels .shape .as_list ()) == 3 : # 2-d labels.
15561556 logits , labels = pad_to_same_length (logits , labels , axis = 2 )
@@ -1644,7 +1644,7 @@ def padded_cross_entropy(logits,
16441644 reduce_sum = reduce_sum )
16451645 confidence = 1.0 - label_smoothing
16461646 vocab_size = shape_list (logits )[- 1 ]
1647- with tf .name_scope ("padded_cross_entropy" , [logits , labels ]):
1647+ with tf .name_scope ("padded_cross_entropy" , values = [logits , labels ]):
16481648 if len (logits .get_shape ().as_list ()) == 2 :
16491649 # Deal with the case where we did not insert extra dimensions due to
16501650 # TPU issues. No pad-to-same-length happens in this case.
@@ -1678,7 +1678,7 @@ def smoothing_cross_entropy(logits,
16781678 Returns:
16791679
16801680 """
1681- with tf .name_scope ("smoothing_cross_entropy" , [logits , labels ]):
1681+ with tf .name_scope ("smoothing_cross_entropy" , values = [logits , labels ]):
16821682 # Low confidence is given to all non-true labels, uniformly.
16831683 low_confidence = (1.0 - confidence ) / tf .to_float (vocab_size - 1 )
16841684 # Normalizing constant is the best cross-entropy value with soft targets.
@@ -1725,7 +1725,7 @@ def global_pool_1d(inputs, pooling_type="MAX", mask=None):
17251725 output: A tensor of dimensions batch_size x input_dims
17261726 dimension containing the sequences of transformed vectors.
17271727 """
1728- with tf .name_scope ("global_pool" , [inputs ]):
1728+ with tf .name_scope ("global_pool" , values = [inputs ]):
17291729 if mask is not None :
17301730 mask = tf .expand_dims (mask , axis = 2 )
17311731 inputs = tf .multiply (inputs , mask )
@@ -1762,7 +1762,7 @@ def running_global_pool_1d(inputs, pooling_type="MAX"):
17621762 dimension containing the running 'totals'.
17631763 """
17641764 del pooling_type
1765- with tf .name_scope ("running_global_pool" , [inputs ]):
1765+ with tf .name_scope ("running_global_pool" , values = [inputs ]):
17661766 scan_fct = tf .maximum
17671767 # Permute inputs so seq_length is first.
17681768 elems = tf .transpose (inputs , [1 , 0 , 2 ])
@@ -2118,7 +2118,7 @@ def padded_cross_entropy_factored(factored_logits,
21182118 a = factored_logits .a
21192119 b = factored_logits .b
21202120 confidence = 1.0 - label_smoothing
2121- with tf .name_scope ("padded_cross_entropy_factored" , [a , b , labels ]):
2121+ with tf .name_scope ("padded_cross_entropy_factored" , values = [a , b , labels ]):
21222122 labels_flat = tf .reshape (labels , [- 1 ])
21232123 a_flat = tf .reshape (a , [- 1 , shape_list (b )[1 ]])
21242124 xent = smoothing_cross_entropy_factored (a_flat , b , labels_flat ,
0 commit comments