From d28ce4e501a346a2a74bb2cf10411f836ad0006a Mon Sep 17 00:00:00 2001 From: Viet-Anh Tran <55154899+CallOn84@users.noreply.github.com> Date: Mon, 17 Jul 2023 23:50:07 +0100 Subject: [PATCH] Update tfprocess.py for TensorFlow 2.4+ While trying to train my own model, I happen to found an error within tfprocess.py that made train_maia.py not work if you're using a TensorFlow version that is greater than 2.4.0. The reason is that tf.keras.mixed_precision.experimental API had been removed with the introduction of tf.keras.mixed_precision in TensorFlow 2.4+. To fix this, I changed two lines of code. tf.keras.mixed_precision.experimental.set_policy('mixed_float16'), which can be found in Line 123, was changed to tf.keras.mixed_precision.set_global_policy('mixed_float16'). self.optimizer = tf.keras.mixed_precision.experimental.LossScaleOptimizer(self.optimizer, self.loss_scale), which can be found in Line 150, was changed to self.optimizer = tf.keras.mixed_precision.LossScaleOptimizer(self.optimizer). --- move_prediction/maia_chess_backend/maia/tfprocess.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/move_prediction/maia_chess_backend/maia/tfprocess.py b/move_prediction/maia_chess_backend/maia/tfprocess.py index 1dd0ad13..8dcb9bcf 100644 --- a/move_prediction/maia_chess_backend/maia/tfprocess.py +++ b/move_prediction/maia_chess_backend/maia/tfprocess.py @@ -120,7 +120,7 @@ def __init__(self, cfg, name, collection_name): tf.config.experimental.set_visible_devices(gpus[self.cfg['gpu']], 'GPU') tf.config.experimental.set_memory_growth(gpus[self.cfg['gpu']], True) if self.model_dtype == tf.float16: - tf.keras.mixed_precision.experimental.set_policy('mixed_float16') + tf.keras.mixed_precision.set_global_policy('mixed_float16') self.global_step = tf.Variable(0, name='global_step', trainable=False, dtype=tf.int64) @@ -147,7 +147,7 @@ def init_net_v2(self): self.optimizer = tf.keras.optimizers.SGD(learning_rate=lambda: self.active_lr, momentum=0.9, nesterov=True) self.orig_optimizer = self.optimizer if self.loss_scale != 1: - self.optimizer = tf.keras.mixed_precision.experimental.LossScaleOptimizer(self.optimizer, self.loss_scale) + self.optimizer = tf.keras.mixed_precision.LossScaleOptimizer(self.optimizer) def correct_policy(target, output): output = tf.cast(output, tf.float32) # Calculate loss on policy head