From e228e955c27c70a0f0e19ecee6ccb2bc66387dab Mon Sep 17 00:00:00 2001 From: Andrew Barnes Date: Tue, 3 Mar 2026 13:20:23 -0500 Subject: [PATCH] fix: upgrade deprecated LightGBM API usage in docs and version constraints The core model code (gbdt.py, highfreq_gdbt_model.py, double_ensemble.py) was already updated to use LightGBM's callback-based API in PR #974, but the documentation and version constraints were not updated to match: - Update docs/start/integration.rst to use lgb.early_stopping(), lgb.log_evaluation(), and lgb.record_evaluation() callbacks instead of the deprecated early_stopping_rounds, verbose_eval, and evals_result parameters (removed in LightGBM 4.0) - Set minimum lightgbm>=3.3.0 in pyproject.toml (callback API introduced in 3.3.0) - Update example requirements.txt files from lightgbm==3.1.0 to lightgbm>=3.3.0 to match the callback API usage in the codebase Closes #904 Co-Authored-By: Claude Opus 4.6 --- docs/start/integration.rst | 12 +++++++----- examples/benchmarks/DoubleEnsemble/requirements.txt | 2 +- examples/hyperparameter/LightGBM/requirements.txt | 2 +- pyproject.toml | 5 ++++- 4 files changed, 13 insertions(+), 8 deletions(-) diff --git a/docs/start/integration.rst b/docs/start/integration.rst index 3ec2d2ea608..235ab730350 100644 --- a/docs/start/integration.rst +++ b/docs/start/integration.rst @@ -57,16 +57,17 @@ The Custom models need to inherit `qlib.model.base.Model <../reference/api.html# dtrain = lgb.Dataset(x_train.values, label=y_train) dvalid = lgb.Dataset(x_valid.values, label=y_valid) - # fit the model + # fit the model using callbacks (LightGBM >= 3.3.0) + early_stopping_callback = lgb.early_stopping(early_stopping_rounds) + verbose_eval_callback = lgb.log_evaluation(period=verbose_eval) + evals_result_callback = lgb.record_evaluation(evals_result) self.model = lgb.train( self.params, dtrain, num_boost_round=num_boost_round, valid_sets=[dtrain, dvalid], valid_names=["train", "valid"], - early_stopping_rounds=early_stopping_rounds, - verbose_eval=verbose_eval, - evals_result=evals_result, + callbacks=[early_stopping_callback, verbose_eval_callback, evals_result_callback], **kwargs ) @@ -94,6 +95,7 @@ The Custom models need to inherit `qlib.model.base.Model <../reference/api.html# def finetune(self, dataset: DatasetH, num_boost_round=10, verbose_eval=20): # Based on existing model and finetune by train more rounds dtrain, _ = self._prepare_data(dataset) + verbose_eval_callback = lgb.log_evaluation(period=verbose_eval) self.model = lgb.train( self.params, dtrain, @@ -101,7 +103,7 @@ The Custom models need to inherit `qlib.model.base.Model <../reference/api.html# init_model=self.model, valid_sets=[dtrain], valid_names=["train"], - verbose_eval=verbose_eval, + callbacks=[verbose_eval_callback], ) Configuration File diff --git a/examples/benchmarks/DoubleEnsemble/requirements.txt b/examples/benchmarks/DoubleEnsemble/requirements.txt index d25789bd715..93abd45367f 100644 --- a/examples/benchmarks/DoubleEnsemble/requirements.txt +++ b/examples/benchmarks/DoubleEnsemble/requirements.txt @@ -1,3 +1,3 @@ pandas==1.1.2 numpy==1.21.0 -lightgbm==3.1.0 \ No newline at end of file +lightgbm>=3.3.0 \ No newline at end of file diff --git a/examples/hyperparameter/LightGBM/requirements.txt b/examples/hyperparameter/LightGBM/requirements.txt index 83d96011dc5..bb20edde33a 100644 --- a/examples/hyperparameter/LightGBM/requirements.txt +++ b/examples/hyperparameter/LightGBM/requirements.txt @@ -1,5 +1,5 @@ pandas==1.1.2 numpy==1.21.0 -lightgbm==3.1.0 +lightgbm>=3.3.0 optuna==2.7.0 optuna-dashboard==0.4.1 diff --git a/pyproject.toml b/pyproject.toml index 4bfe68727d4..0e7199be28c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,7 +44,10 @@ dependencies = [ "tqdm", "pymongo", "loguru", - "lightgbm", + # Since version 3.3.0, lightgbm supports the callback-based API + # (early_stopping(), log_evaluation(), record_evaluation()). + # The deprecated parameter-based API was removed in lightgbm 4.0. + "lightgbm>=3.3.0", "gym", "cvxpy", "joblib",