diff --git a/.gitignore b/.gitignore
index a81c8ee..a4a3135 100644
--- a/.gitignore
+++ b/.gitignore
@@ -136,3 +136,6 @@ dmypy.json
# Cython debug symbols
cython_debug/
+
+# VSCode
+.history/
\ No newline at end of file
diff --git a/.history/README_20220919203414.md b/.history/README_20220919203414.md
new file mode 100644
index 0000000..0845bb2
--- /dev/null
+++ b/.history/README_20220919203414.md
@@ -0,0 +1,338 @@
+
+[](https://travis-ci.com/finlab-python/finlab_crypto) [](https://badge.fury.io/py/finlab-crypto) [](https://codecov.io/gh/finlab-python/finlab_crypto)
+
+Develop and verify crypto trading strategies at a glance.
+
+## Key Features
+* Pandas vectorize backtest
+* Talib wrapper to composite strategies easily
+* Backtest visualization and analysis (uses [vectorbt](https://github.com/polakowo/vectorbt/) as backend)
+* Analyze the probability of overfitting ([combinatorially symmetric cross validation](https://www.davidhbailey.com/dhbpapers/backtest-prob.pdf))
+* Easy to deploy strategies on google cloud functions
+* Colab and Jupyter compatable
+* [10 hours trading bot online course](https://hahow.in/cr/crypto-python)
+
+## Installation
+```
+pip install finlab_crypto
+```
+
+## Colab Example
+ * [basic example for backtesting and optimization  ](https://colab.research.google.com/drive/1l1hylhFY-tzMV1Jca95mv_32hXe0L0M_?usp=sharing)
+
+## Usage
+### Setup Research Environment (Recommend)
+Create directory `./history/` for saving historical data. If Colab notebook is detected, it creates `GoogleDrive/crypto_workspace/history` and link the folder to `./history/`.
+``` python
+import finlab_crypto
+finlab_crypto.setup()
+```
+### Get Historical Price
+``` python
+ohlcv = finlab_crypto.crawler.get_all_binance('BTCUSDT', '4h')
+ohlcv.head()
+```
+
+### Trading Strategy
+``` python
+@finlab_crypto.Strategy(n1=20, n2=60)
+def sma_strategy(ohlcv):
+ n1 = sma_strategy.n1
+ n2 = sma_strategy.n2
+
+ sma1 = ohlcv.close.rolling(int(n1)).mean()
+ sma2 = ohlcv.close.rolling(int(n2)).mean()
+ return (sma1 > sma2), (sma1 < sma2)
+```
+### Backtest
+``` python
+# default fee and slipagge are 0.1% and 0.1%
+
+vars = {'n1': 20, 'n2': 60}
+portfolio = sma_strategy.backtest(ohlcv, vars, freq='4h', plot=True)
+```
+
+
+### Optimization
+``` python
+import numpy as np
+vars = {
+ 'n1': np.arange(10, 100, 5),
+ 'n2': np.arange(10, 100, 5)
+}
+portfolio = sma_strategy.backtest(ohlcv, vars, freq='4h', plot=True)
+```
+
+
+
+
+### Live Trading
+
+To perform live trading of a strategy, the following 3 sections should be executed when any candle is complete.
+
+#### 1. Create TradingMethods
+First, we need to encapsulate a strategy into `TradingMethod`
+```py
+from finlab_crypto.online import TradingMethod, TradingPortfolio, render_html
+
+# create TradingMethod for live trading
+tm_sma = TradingMethod(
+ name='live-strategy-sma'
+ symbols=['ADAUSDT', 'DOTBTC', 'ETHBTC'], freq='4h', lookback=1200,
+ strategy=sma_strategy,
+ variables=dict(n1 = 35, n2 = 105,),
+ weight=5000,
+ weight_unit='USDT',
+ execution_price='close' # trade at close or open price
+)
+```
+
+#### 2. register TradingMethods to TradingPortfolio
+A `TradingPortfolio` can sync the virtual portfolio to your Binance trading account. A `TradingPortfolio` contains many `TradingMethod`s, which should be executed whenever any new candle is (going to) closed. You can decide when to rebalance the portfolio by giving `execute_before_candle_complete` when creating the `TradingPortfolio`:
+* `execute_before_candle_complete=True`: rebalance right *before* a candle is closed (i.e. setting xx:59 for 1h frequency strategy), so you can execute orders faster then others. However, signal hazards may occur due to incomplete candles.
+* `execute_before_candle_complete=False` (default): rebalance right *after* a candle is closed (i.e. setting xx:00 for 1h frequency strategy)
+
+The above information is crucial to help `TradingPortfolio` decide whether to remove incomplete candles when generating trading signals or not. However, `Tradingportfolio` will *not* execute periodically for you. So, you should set up a crontab or cloud function to execute it.
+We recommend you run the code by yourself before setting the crontab or cloud function.
+
+```py
+# setup portftolio
+BINANCE_KEY = '' # Enter your key and secret here!
+BINANCE_SECRET = ''
+
+tp = TradingPortfolio(BINANCE_KEY, BINANCE_SECRET, execute_before_candle_complete=False)
+tp.register(tm0)
+
+# additional trading methods can be registered
+# tp.register(tm1)
+```
+
+#### 3. view and execute orders
+Finally, we could call `tp.get_ohlcvs()` to get history data of all trading assets and call `tp.get_latest_signals` to calculate the trading signals. The aggregate information is created using `tp.calculate_position_size`. All the information can be viewed by `tp.render_html`.
+```py
+ohlcvs = tp.get_ohlcvs()
+signals = tp.get_latest_signals(ohlcvs)
+position, position_btc, new_orders = tp.calculate_position_size(signals)
+
+render_html(signals, position, position_btc, new_orders, order_results)
+```
+
+If the result makes sense, use `tp.execute_orders` to sync the position of your real account. Please make an issue if there is any bug:
+```py
+# (order) mode can be either 'TEST', 'MARKET', 'LIMIT'
+# TEST mode will show orders without real executions.
+order_results = tp.execute_orders(new_orders, mode='TEST')
+```
+
+### Testing
+
+The following script runs all test cases on your local environment. [Creating an isolated python environment](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#creating-an-environment-with-commands) is recommended. To test crawler functions, please provide Binance API's key and secret by setting environment variables `BINANCE_KEY` and `BINANCE_SECRET`, respectively.
+
+``` bash
+git clone https://github.com/finlab-python/finlab_crypto.git
+cd finlab_crypto
+pip install requirements.txt
+pip install coverage
+BINANCE_KEY=<> BINANCE_SECRET=<> coverage run -m unittest discover --pattern *_test.py
+```
+
+## Updates
+Version 0.2.22
+
+* add shorting capability
+* change init cash to $1,000,000 (Vector BT might have infinity as default)
+* changed default fees to 0.15% because standard taker fees are 0.075% to execute a trade
+
+Version 0.2.21
+
+* fix pyecharts compatibility
+* set the default argument `client` of get_nbars_binance
+
+Version 0.2.20
+
+* fix get_all_binance last candle not updated
+
+Version 0.2.19
+
+* fix bar color
+
+Verison 0.2.18
+
+* fix stop loss and take profit and add them into tests
+
+Verison 0.2.17
+* update vectorbt version
+
+Version 0.2.16
+* update pandas version
+
+Version 0.2.15
+* fix tp.portfolio_backtest
+
+Version 0.2.14
+* add `execute_before_candle_complete`
+* add `weight` and `weight_unit` for `TradingMethod`
+
+Version 0.2.12
+* fix numba version
+
+Version 0.2.11
+Version 0.2.10
+* fix numpy version
+
+Version 0.2.8
+* merge transactions to reduce fees
+
+Version 0.2.7
+* fix test error (request binance api too fast)
+* add USDC as base stable coin (tp.set_default_stable_coin('USDC'))
+
+Version 0.2.6
+* fix version of pandas==1.1.5, since pandas==1.2.0 is not compatable with vectorbt
+* fix show_parameters function in Strategy and Filter
+
+Version 0.2.5
+* fix weight_btc error
+* fix strategy mutable input
+
+Verison 0.2.4
+* fix entry price online.py
+
+Version 0.2.3
+* fix execution price issue
+
+Version 0.2.2: not stable
+* improve syntax
+* add execution price for the strategy
+
+Version 0.2.1
+* fix vectorbt version
+
+Version 0.2.0
+* update vectorbt to 0.14.4
+
+Version 0.1.19
+* refactor(strategy.py): refactor strategy
+* refactor(cscv.py): refactor cscv
+* add cscv_nbins and cscv_objective to strategy.backtest
+* add bitmex support
+
+Version 0.1.18
+* fix(crawler): get_n_bars
+* fix(TradingPortfolio): get_ohlcv
+* fix(TradingPortfolio): portfolio_backtest
+
+Version 0.1.17
+* fix error for latest_signal asset_btc_value
+* add unittest for latest_signal
+
+Version 0.1.16
+* fix web page error
+* fix error for zero orders
+
+Version 0.1.15
+* fix web page error
+
+Version 0.1.14
+* refine render_html function
+
+Version 0.1.13
+* refine display html for TradingPortfolio
+
+Version 0.1.12
+* add delay when portfolio backtesting
+* fix colab compatability
+* improve interface of TradingPortfolio
+
+Version 0.1.11
+* fix portfolio backtest error
+* add last date equity for backtest
+
+Version 0.1.10
+* add portfolio backtest
+* rename online.py functions
+* refactor error tolerance of different position in online.py functions
+* set usdt to excluded asset when calculate position size
+
+Version 0.1.9
+* set 'filters' as an optional argument on TradingMethod
+* set plot range dynamically
+* portfolio backtest
+
+Version 0.1.8
+* fix talib parameter type incompatable issue
+
+Version 0.1.7
+* fix talib parameter type incompatable issue
+
+Version 0.1.6
+* fix talib-binary compatable issue using talib_strategy or talib_filter
+
+Version 0.1.5
+* add filters to online.py
+* add lambda argument options to talib_filter
+* move talib_filter to finlab_crypto package
+
+Version 0.1.4
+* fix talib filter and strategy pandas import error
+* fix talib import error in indicators, talib_strategy, and talib_filter
+
+Version 0.1.3
+* remove progress bar when only single strategy is backtested
+* adjust online portfolio to support leaverge
+* new theme for overfitting plots
+* fix online order with zero order amount
+* fix SD2 for overfitting plots
+
+Version 0.1.2
+* fix strategy variables
+
+Version 0.1.1
+* fix talib error
+* add filters folder
+* add excluded assets when sync portfolio
+* add filter folder to setup
+* fix variable eval failure
+
+Version 0.1.0
+* add filter interface
+* add talib strategy wrapper
+* add talib filter wrapper
+
+Version 0.0.9.dev1
+* vectorbt heatmap redesign
+* improve optimization plots
+* redesign strategy interface
+* add new function setup, to replace setup_colab
+
+Version 0.0.8.dev1
+* fix transaction duplicate bug
+
+Version 0.0.7.dev1
+* fix bugs of zero transaction
+
+Version 0.0.6.dev1
+* fix latest signal
+* rename strategy.recent_signal
+* restructure rebalance function in online.py
+
+Version 0.0.5.dev1
+* add init module
+* add colab setup function
+* set vectorbt default
+* fix crawler duplicated index
+
+Version 0.0.4.dev1
+* add seaborn to dependencies
+* remove talib-binary from dependencies
+* fix padding style
+
+Version 0.0.3.dev1
+* remove logs when calculating portfolio
+* add render html to show final portfolio changes
+* add button in html to place real trade with google cloud function
+
+Version 0.0.2.dev1
+* skip heatmap if it is broken
+* add portfolio strategies
+* add talib dependency
diff --git a/.history/README_20220919205110.md b/.history/README_20220919205110.md
new file mode 100644
index 0000000..6437448
--- /dev/null
+++ b/.history/README_20220919205110.md
@@ -0,0 +1,339 @@
+
+[](https://travis-ci.com/finlab-python/finlab_crypto) [](https://badge.fury.io/py/finlab-crypto) [](https://codecov.io/gh/finlab-python/finlab_crypto)
+
+Develop and verify crypto trading strategies at a glance.
+
+## Key Features
+* Pandas vectorize backtest
+* Talib wrapper to composite strategies easily
+* Backtest visualization and analysis (uses [vectorbt](https://github.com/polakowo/vectorbt/) as backend)
+* Analyze the probability of overfitting ([combinatorially symmetric cross validation](https://www.davidhbailey.com/dhbpapers/backtest-prob.pdf))
+* Easy to deploy strategies on google cloud functions
+* Colab and Jupyter compatable
+* [10 hours trading bot online course](https://hahow.in/cr/crypto-python)
+
+## Installation
+```
+pip install finlab_crypto
+```
+
+## Colab Example
+ * [basic example for backtesting and optimization  ](https://colab.research.google.com/drive/1l1hylhFY-tzMV1Jca95mv_32hXe0L0M_?usp=sharing)
+
+## Usage
+### Setup Research Environment (Recommend)
+Create directory `./history/` for saving historical data. If Colab notebook is detected, it creates `GoogleDrive/crypto_workspace/history` and link the folder to `./history/`.
+``` python
+import finlab_crypto
+finlab_crypto.setup()
+```
+### Get Historical Price
+``` python
+ohlcv = finlab_crypto.crawler.get_all_binance('BTCUSDT', '4h')
+ohlcv.head()
+```
+
+### Trading Strategy
+``` python
+@finlab_crypto.Strategy(n1=20, n2=60)
+def sma_strategy(ohlcv):
+ n1 = sma_strategy.n1
+ n2 = sma_strategy.n2
+
+ sma1 = ohlcv.close.rolling(int(n1)).mean()
+ sma2 = ohlcv.close.rolling(int(n2)).mean()
+ return (sma1 > sma2), (sma1 < sma2)
+```
+### Backtest
+``` python
+# default fee and slipagge are 0.1% and 0.1%
+
+vars = {'n1': 20, 'n2': 60}
+portfolio = sma_strategy.backtest(ohlcv, vars, freq='4h', plot=True)
+```
+
+
+### Optimization
+``` python
+import numpy as np
+vars = {
+ 'n1': np.arange(10, 100, 5),
+ 'n2': np.arange(10, 100, 5)
+}
+portfolio = sma_strategy.backtest(ohlcv, vars, freq='4h', plot=True)
+```
+
+
+
+
+### Live Trading
+
+To perform live trading of a strategy, the following 3 sections should be executed when any candle is complete.
+
+#### 1. Create TradingMethods
+First, we need to encapsulate a strategy into `TradingMethod`
+```py
+from finlab_crypto.online import TradingMethod, TradingPortfolio, render_html
+
+# create TradingMethod for live trading
+tm_sma = TradingMethod(
+ name='live-strategy-sma'
+ symbols=['ADAUSDT', 'DOTBTC', 'ETHBTC'], freq='4h', lookback=1200,
+ strategy=sma_strategy,
+ variables=dict(n1 = 35, n2 = 105,),
+ weight=5000,
+ weight_unit='USDT',
+ execution_price='close' # trade at close or open price
+)
+```
+
+#### 2. register TradingMethods to TradingPortfolio
+A `TradingPortfolio` can sync the virtual portfolio to your Binance trading account. A `TradingPortfolio` contains many `TradingMethod`s, which should be executed whenever any new candle is (going to) closed. You can decide when to rebalance the portfolio by giving `execute_before_candle_complete` when creating the `TradingPortfolio`:
+* `execute_before_candle_complete=True`: rebalance right *before* a candle is closed (i.e. setting xx:59 for 1h frequency strategy), so you can execute orders faster then others. However, signal hazards may occur due to incomplete candles.
+* `execute_before_candle_complete=False` (default): rebalance right *after* a candle is closed (i.e. setting xx:00 for 1h frequency strategy)
+
+The above information is crucial to help `TradingPortfolio` decide whether to remove incomplete candles when generating trading signals or not. However, `Tradingportfolio` will *not* execute periodically for you. So, you should set up a crontab or cloud function to execute it.
+We recommend you run the code by yourself before setting the crontab or cloud function.
+
+```py
+# setup portftolio
+BINANCE_KEY = '' # Enter your key and secret here!
+BINANCE_SECRET = ''
+
+tp = TradingPortfolio(BINANCE_KEY, BINANCE_SECRET, execute_before_candle_complete=False)
+tp.register(tm0)
+
+# additional trading methods can be registered
+# tp.register(tm1)
+```
+
+#### 3. view and execute orders
+Finally, we could call `tp.get_ohlcvs()` to get history data of all trading assets and call `tp.get_latest_signals` to calculate the trading signals. The aggregate information is created using `tp.calculate_position_size`. All the information can be viewed by `tp.render_html`.
+```py
+ohlcvs = tp.get_ohlcvs()
+signals = tp.get_latest_signals(ohlcvs)
+position, position_btc, new_orders = tp.calculate_position_size(signals)
+
+render_html(signals, position, position_btc, new_orders, order_results)
+```
+
+If the result makes sense, use `tp.execute_orders` to sync the position of your real account. Please make an issue if there is any bug:
+```py
+# (order) mode can be either 'TEST', 'MARKET', 'LIMIT'
+# TEST mode will show orders without real executions.
+order_results = tp.execute_orders(new_orders, mode='TEST')
+```
+
+### Testing
+
+The following script runs all test cases on your local environment. [Creating an isolated python environment](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#creating-an-environment-with-commands) is recommended. To test crawler functions, please provide Binance API's key and secret by setting environment variables `BINANCE_KEY` and `BINANCE_SECRET`, respectively.
+
+``` bash
+git clone https://github.com/finlab-python/finlab_crypto.git
+cd finlab_crypto
+pip install requirements.txt
+pip install coverage
+BINANCE_KEY=<> BINANCE_SECRET=<> coverage run -m unittest discover --pattern *_test.py
+```
+
+## Updates
+Version 0.2.22
+
+* add shorting capability
+* change init cash to $1,000,000 (Vector BT might have infinity as default)
+* changed default fees to 0.15% because standard taker fees are 0.075% to execute a trade
+* updated SMA + BB demo strategies
+
+Version 0.2.21
+
+* fix pyecharts compatibility
+* set the default argument `client` of get_nbars_binance
+
+Version 0.2.20
+
+* fix get_all_binance last candle not updated
+
+Version 0.2.19
+
+* fix bar color
+
+Verison 0.2.18
+
+* fix stop loss and take profit and add them into tests
+
+Verison 0.2.17
+* update vectorbt version
+
+Version 0.2.16
+* update pandas version
+
+Version 0.2.15
+* fix tp.portfolio_backtest
+
+Version 0.2.14
+* add `execute_before_candle_complete`
+* add `weight` and `weight_unit` for `TradingMethod`
+
+Version 0.2.12
+* fix numba version
+
+Version 0.2.11
+Version 0.2.10
+* fix numpy version
+
+Version 0.2.8
+* merge transactions to reduce fees
+
+Version 0.2.7
+* fix test error (request binance api too fast)
+* add USDC as base stable coin (tp.set_default_stable_coin('USDC'))
+
+Version 0.2.6
+* fix version of pandas==1.1.5, since pandas==1.2.0 is not compatable with vectorbt
+* fix show_parameters function in Strategy and Filter
+
+Version 0.2.5
+* fix weight_btc error
+* fix strategy mutable input
+
+Verison 0.2.4
+* fix entry price online.py
+
+Version 0.2.3
+* fix execution price issue
+
+Version 0.2.2: not stable
+* improve syntax
+* add execution price for the strategy
+
+Version 0.2.1
+* fix vectorbt version
+
+Version 0.2.0
+* update vectorbt to 0.14.4
+
+Version 0.1.19
+* refactor(strategy.py): refactor strategy
+* refactor(cscv.py): refactor cscv
+* add cscv_nbins and cscv_objective to strategy.backtest
+* add bitmex support
+
+Version 0.1.18
+* fix(crawler): get_n_bars
+* fix(TradingPortfolio): get_ohlcv
+* fix(TradingPortfolio): portfolio_backtest
+
+Version 0.1.17
+* fix error for latest_signal asset_btc_value
+* add unittest for latest_signal
+
+Version 0.1.16
+* fix web page error
+* fix error for zero orders
+
+Version 0.1.15
+* fix web page error
+
+Version 0.1.14
+* refine render_html function
+
+Version 0.1.13
+* refine display html for TradingPortfolio
+
+Version 0.1.12
+* add delay when portfolio backtesting
+* fix colab compatability
+* improve interface of TradingPortfolio
+
+Version 0.1.11
+* fix portfolio backtest error
+* add last date equity for backtest
+
+Version 0.1.10
+* add portfolio backtest
+* rename online.py functions
+* refactor error tolerance of different position in online.py functions
+* set usdt to excluded asset when calculate position size
+
+Version 0.1.9
+* set 'filters' as an optional argument on TradingMethod
+* set plot range dynamically
+* portfolio backtest
+
+Version 0.1.8
+* fix talib parameter type incompatable issue
+
+Version 0.1.7
+* fix talib parameter type incompatable issue
+
+Version 0.1.6
+* fix talib-binary compatable issue using talib_strategy or talib_filter
+
+Version 0.1.5
+* add filters to online.py
+* add lambda argument options to talib_filter
+* move talib_filter to finlab_crypto package
+
+Version 0.1.4
+* fix talib filter and strategy pandas import error
+* fix talib import error in indicators, talib_strategy, and talib_filter
+
+Version 0.1.3
+* remove progress bar when only single strategy is backtested
+* adjust online portfolio to support leaverge
+* new theme for overfitting plots
+* fix online order with zero order amount
+* fix SD2 for overfitting plots
+
+Version 0.1.2
+* fix strategy variables
+
+Version 0.1.1
+* fix talib error
+* add filters folder
+* add excluded assets when sync portfolio
+* add filter folder to setup
+* fix variable eval failure
+
+Version 0.1.0
+* add filter interface
+* add talib strategy wrapper
+* add talib filter wrapper
+
+Version 0.0.9.dev1
+* vectorbt heatmap redesign
+* improve optimization plots
+* redesign strategy interface
+* add new function setup, to replace setup_colab
+
+Version 0.0.8.dev1
+* fix transaction duplicate bug
+
+Version 0.0.7.dev1
+* fix bugs of zero transaction
+
+Version 0.0.6.dev1
+* fix latest signal
+* rename strategy.recent_signal
+* restructure rebalance function in online.py
+
+Version 0.0.5.dev1
+* add init module
+* add colab setup function
+* set vectorbt default
+* fix crawler duplicated index
+
+Version 0.0.4.dev1
+* add seaborn to dependencies
+* remove talib-binary from dependencies
+* fix padding style
+
+Version 0.0.3.dev1
+* remove logs when calculating portfolio
+* add render html to show final portfolio changes
+* add button in html to place real trade with google cloud function
+
+Version 0.0.2.dev1
+* skip heatmap if it is broken
+* add portfolio strategies
+* add talib dependency
diff --git a/.history/README_20220919205245.md b/.history/README_20220919205245.md
new file mode 100644
index 0000000..15fa404
--- /dev/null
+++ b/.history/README_20220919205245.md
@@ -0,0 +1,339 @@
+
+[](https://travis-ci.com/finlab-python/finlab_crypto) [](https://badge.fury.io/py/finlab-crypto) [](https://codecov.io/gh/finlab-python/finlab_crypto)
+
+Develop and verify crypto trading strategies at a glance.
+
+## Key Features
+* Pandas vectorize backtest
+* Talib wrapper to composite strategies easily
+* Backtest visualization and analysis (uses [vectorbt](https://github.com/polakowo/vectorbt/) as backend)
+* Analyze the probability of overfitting ([combinatorially symmetric cross validation](https://www.davidhbailey.com/dhbpapers/backtest-prob.pdf))
+* Easy to deploy strategies on google cloud functions
+* Colab and Jupyter compatable
+* [10 hours trading bot online course](https://hahow.in/cr/crypto-python)
+
+## Installation
+```
+pip install finlab_crypto
+```
+
+## Colab Example
+ * [basic example for backtesting and optimization  ](https://colab.research.google.com/drive/1l1hylhFY-tzMV1Jca95mv_32hXe0L0M_?usp=sharing)
+
+## Usage
+### Setup Research Environment (Recommend)
+Create directory `./history/` for saving historical data. If Colab notebook is detected, it creates `GoogleDrive/crypto_workspace/history` and link the folder to `./history/`.
+``` python
+import finlab_crypto
+finlab_crypto.setup()
+```
+### Get Historical Price
+``` python
+ohlcv = finlab_crypto.crawler.get_all_binance('BTCUSDT', '4h')
+ohlcv.head()
+```
+
+### Trading Strategy
+``` python
+@finlab_crypto.Strategy(n1=20, n2=60)
+def sma_strategy(ohlcv):
+ n1 = sma_strategy.n1
+ n2 = sma_strategy.n2
+
+ sma1 = ohlcv.close.rolling(int(n1)).mean()
+ sma2 = ohlcv.close.rolling(int(n2)).mean()
+ return (sma1 > sma2), (sma1 < sma2)
+```
+### Backtest
+``` python
+# default fee and slipagge are 0.1% and 0.1%
+
+vars = {'n1': 20, 'n2': 60}
+portfolio = sma_strategy.backtest(ohlcv, vars, freq='4h', plot=True)
+```
+
+
+### Optimization
+``` python
+import numpy as np
+vars = {
+ 'n1': np.arange(10, 100, 5),
+ 'n2': np.arange(10, 100, 5)
+}
+portfolio = sma_strategy.backtest(ohlcv, vars, freq='4h', plot=True)
+```
+
+
+
+
+### Live Trading
+
+To perform live trading of a strategy, the following 3 sections should be executed when any candle is complete.
+
+#### 1. Create TradingMethods
+First, we need to encapsulate a strategy into `TradingMethod`
+```py
+from finlab_crypto.online import TradingMethod, TradingPortfolio, render_html
+
+# create TradingMethod for live trading
+tm_sma = TradingMethod(
+ name='live-strategy-sma'
+ symbols=['ADAUSDT', 'DOTBTC', 'ETHBTC'], freq='4h', lookback=1200,
+ strategy=sma_strategy,
+ variables=dict(n1 = 35, n2 = 105,),
+ weight=5000,
+ weight_unit='USDT',
+ execution_price='close' # trade at close or open price
+)
+```
+
+#### 2. register TradingMethods to TradingPortfolio
+A `TradingPortfolio` can sync the virtual portfolio to your Binance trading account. A `TradingPortfolio` contains many `TradingMethod`s, which should be executed whenever any new candle is (going to) closed. You can decide when to rebalance the portfolio by giving `execute_before_candle_complete` when creating the `TradingPortfolio`:
+* `execute_before_candle_complete=True`: rebalance right *before* a candle is closed (i.e. setting xx:59 for 1h frequency strategy), so you can execute orders faster then others. However, signal hazards may occur due to incomplete candles.
+* `execute_before_candle_complete=False` (default): rebalance right *after* a candle is closed (i.e. setting xx:00 for 1h frequency strategy)
+
+The above information is crucial to help `TradingPortfolio` decide whether to remove incomplete candles when generating trading signals or not. However, `Tradingportfolio` will *not* execute periodically for you. So, you should set up a crontab or cloud function to execute it.
+We recommend you run the code by yourself before setting the crontab or cloud function.
+
+```py
+# setup portftolio
+BINANCE_KEY = '' # Enter your key and secret here!
+BINANCE_SECRET = ''
+
+tp = TradingPortfolio(BINANCE_KEY, BINANCE_SECRET, execute_before_candle_complete=False)
+tp.register(tm0)
+
+# additional trading methods can be registered
+# tp.register(tm1)
+```
+
+#### 3. view and execute orders
+Finally, we could call `tp.get_ohlcvs()` to get history data of all trading assets and call `tp.get_latest_signals` to calculate the trading signals. The aggregate information is created using `tp.calculate_position_size`. All the information can be viewed by `tp.render_html`.
+```py
+ohlcvs = tp.get_ohlcvs()
+signals = tp.get_latest_signals(ohlcvs)
+position, position_btc, new_orders = tp.calculate_position_size(signals)
+
+render_html(signals, position, position_btc, new_orders, order_results)
+```
+
+If the result makes sense, use `tp.execute_orders` to sync the position of your real account. Please make an issue if there is any bug:
+```py
+# (order) mode can be either 'TEST', 'MARKET', 'LIMIT'
+# TEST mode will show orders without real executions.
+order_results = tp.execute_orders(new_orders, mode='TEST')
+```
+
+### Testing
+
+The following script runs all test cases on your local environment. [Creating an isolated python environment](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#creating-an-environment-with-commands) is recommended. To test crawler functions, please provide Binance API's key and secret by setting environment variables `BINANCE_KEY` and `BINANCE_SECRET`, respectively.
+
+``` bash
+git clone https://github.com/finlab-python/finlab_crypto.git
+cd finlab_crypto
+pip install requirements.txt
+pip install coverage
+BINANCE_KEY=<> BINANCE_SECRET=<> coverage run -m unittest discover --pattern *_test.py
+```
+
+## Updates
+Version 0.2.22 - [@kodiakcrypto](https://github.com/kodiakcrypto)
+
+* added shorting capability
+* change init cash to $1,000,000 (Vector BT might have infinity as default)
+* changed default fees to 0.15% because standard taker fees are 0.075% to execute a trade
+* updated SMA + BB demo strategy examples
+
+Version 0.2.21
+
+* fix pyecharts compatibility
+* set the default argument `client` of get_nbars_binance
+
+Version 0.2.20
+
+* fix get_all_binance last candle not updated
+
+Version 0.2.19
+
+* fix bar color
+
+Verison 0.2.18
+
+* fix stop loss and take profit and add them into tests
+
+Verison 0.2.17
+* update vectorbt version
+
+Version 0.2.16
+* update pandas version
+
+Version 0.2.15
+* fix tp.portfolio_backtest
+
+Version 0.2.14
+* add `execute_before_candle_complete`
+* add `weight` and `weight_unit` for `TradingMethod`
+
+Version 0.2.12
+* fix numba version
+
+Version 0.2.11
+Version 0.2.10
+* fix numpy version
+
+Version 0.2.8
+* merge transactions to reduce fees
+
+Version 0.2.7
+* fix test error (request binance api too fast)
+* add USDC as base stable coin (tp.set_default_stable_coin('USDC'))
+
+Version 0.2.6
+* fix version of pandas==1.1.5, since pandas==1.2.0 is not compatable with vectorbt
+* fix show_parameters function in Strategy and Filter
+
+Version 0.2.5
+* fix weight_btc error
+* fix strategy mutable input
+
+Verison 0.2.4
+* fix entry price online.py
+
+Version 0.2.3
+* fix execution price issue
+
+Version 0.2.2: not stable
+* improve syntax
+* add execution price for the strategy
+
+Version 0.2.1
+* fix vectorbt version
+
+Version 0.2.0
+* update vectorbt to 0.14.4
+
+Version 0.1.19
+* refactor(strategy.py): refactor strategy
+* refactor(cscv.py): refactor cscv
+* add cscv_nbins and cscv_objective to strategy.backtest
+* add bitmex support
+
+Version 0.1.18
+* fix(crawler): get_n_bars
+* fix(TradingPortfolio): get_ohlcv
+* fix(TradingPortfolio): portfolio_backtest
+
+Version 0.1.17
+* fix error for latest_signal asset_btc_value
+* add unittest for latest_signal
+
+Version 0.1.16
+* fix web page error
+* fix error for zero orders
+
+Version 0.1.15
+* fix web page error
+
+Version 0.1.14
+* refine render_html function
+
+Version 0.1.13
+* refine display html for TradingPortfolio
+
+Version 0.1.12
+* add delay when portfolio backtesting
+* fix colab compatability
+* improve interface of TradingPortfolio
+
+Version 0.1.11
+* fix portfolio backtest error
+* add last date equity for backtest
+
+Version 0.1.10
+* add portfolio backtest
+* rename online.py functions
+* refactor error tolerance of different position in online.py functions
+* set usdt to excluded asset when calculate position size
+
+Version 0.1.9
+* set 'filters' as an optional argument on TradingMethod
+* set plot range dynamically
+* portfolio backtest
+
+Version 0.1.8
+* fix talib parameter type incompatable issue
+
+Version 0.1.7
+* fix talib parameter type incompatable issue
+
+Version 0.1.6
+* fix talib-binary compatable issue using talib_strategy or talib_filter
+
+Version 0.1.5
+* add filters to online.py
+* add lambda argument options to talib_filter
+* move talib_filter to finlab_crypto package
+
+Version 0.1.4
+* fix talib filter and strategy pandas import error
+* fix talib import error in indicators, talib_strategy, and talib_filter
+
+Version 0.1.3
+* remove progress bar when only single strategy is backtested
+* adjust online portfolio to support leaverge
+* new theme for overfitting plots
+* fix online order with zero order amount
+* fix SD2 for overfitting plots
+
+Version 0.1.2
+* fix strategy variables
+
+Version 0.1.1
+* fix talib error
+* add filters folder
+* add excluded assets when sync portfolio
+* add filter folder to setup
+* fix variable eval failure
+
+Version 0.1.0
+* add filter interface
+* add talib strategy wrapper
+* add talib filter wrapper
+
+Version 0.0.9.dev1
+* vectorbt heatmap redesign
+* improve optimization plots
+* redesign strategy interface
+* add new function setup, to replace setup_colab
+
+Version 0.0.8.dev1
+* fix transaction duplicate bug
+
+Version 0.0.7.dev1
+* fix bugs of zero transaction
+
+Version 0.0.6.dev1
+* fix latest signal
+* rename strategy.recent_signal
+* restructure rebalance function in online.py
+
+Version 0.0.5.dev1
+* add init module
+* add colab setup function
+* set vectorbt default
+* fix crawler duplicated index
+
+Version 0.0.4.dev1
+* add seaborn to dependencies
+* remove talib-binary from dependencies
+* fix padding style
+
+Version 0.0.3.dev1
+* remove logs when calculating portfolio
+* add render html to show final portfolio changes
+* add button in html to place real trade with google cloud function
+
+Version 0.0.2.dev1
+* skip heatmap if it is broken
+* add portfolio strategies
+* add talib dependency
diff --git a/.history/finlab_crypto/__init___20220919203020.py b/.history/finlab_crypto/__init___20220919203020.py
new file mode 100644
index 0000000..b6eb576
--- /dev/null
+++ b/.history/finlab_crypto/__init___20220919203020.py
@@ -0,0 +1,58 @@
+import warnings
+warnings.simplefilter(action='ignore', category=FutureWarning)
+from . import crawler
+from .strategy import Strategy
+from .strategy import Filter
+
+import vectorbt as vbt
+import sys
+import os
+
+__version__ = '0.2.21'
+
+
+# set default fees and slippage
+vbt.settings.portfolio['init_cash'] = 1000000.0 # in $
+vbt.settings.portfolio['fees'] = 0.15 # in %
+vbt.settings.portfolio['slippage'] = 0.0 # in %
+
+# has workspace
+def check_and_create_dir(dname):
+ has_dir = os.path.isdir(dname)
+ if not has_dir:
+ os.mkdir(dname)
+
+def setup_colab():
+ google_drive_connected = os.path.isdir('/content/drive/MyDrive')
+
+ if not google_drive_connected:
+ print('|------------------------------')
+ print('| Google Drive not connected! ')
+ print('|------------------------------')
+ print('|')
+ print('| Please connect google drive:')
+ from google.colab import drive
+ drive.mount('/content/drive')
+
+ # ln -s var
+ def ln_dir(path):
+ dir = path.split('/')[-1]
+ if not os.path.isdir(dir):
+ os.symlink(path, dir)
+
+ check_and_create_dir('/content/drive/MyDrive/crypto_workspace')
+ # check_and_create_dir('/content/drive/MyDrive/crypto_workspace/strategies')
+ check_and_create_dir('/content/drive/MyDrive/crypto_workspace/history')
+ # check_and_create_dir('/content/drive/MyDrive/crypto_workspace/filters')
+ # ln_dir("/content/drive/MyDrive/crypto_workspace/strategies")
+ # ln_dir("/content/drive/MyDrive/crypto_workspace/filters")
+ ln_dir("/content/drive/MyDrive/crypto_workspace/history")
+
+def setup():
+ IN_COLAB = 'google.colab' in sys.modules
+ if IN_COLAB:
+ setup_colab()
+ else:
+ # check_and_create_dir('strategies')
+ # check_and_create_dir('filters')
+ check_and_create_dir('history')
diff --git a/.history/finlab_crypto/__init___20220919203027.py b/.history/finlab_crypto/__init___20220919203027.py
new file mode 100644
index 0000000..caf6236
--- /dev/null
+++ b/.history/finlab_crypto/__init___20220919203027.py
@@ -0,0 +1,58 @@
+import warnings
+warnings.simplefilter(action='ignore', category=FutureWarning)
+from . import crawler
+from .strategy import Strategy
+from .strategy import Filter
+
+import vectorbt as vbt
+import sys
+import os
+
+__version__ = '0.2.22'
+
+
+# set default fees and slippage
+vbt.settings.portfolio['init_cash'] = 1000000.0 # in $
+vbt.settings.portfolio['fees'] = 0.15 # in %
+vbt.settings.portfolio['slippage'] = 0.0 # in %
+
+# has workspace
+def check_and_create_dir(dname):
+ has_dir = os.path.isdir(dname)
+ if not has_dir:
+ os.mkdir(dname)
+
+def setup_colab():
+ google_drive_connected = os.path.isdir('/content/drive/MyDrive')
+
+ if not google_drive_connected:
+ print('|------------------------------')
+ print('| Google Drive not connected! ')
+ print('|------------------------------')
+ print('|')
+ print('| Please connect google drive:')
+ from google.colab import drive
+ drive.mount('/content/drive')
+
+ # ln -s var
+ def ln_dir(path):
+ dir = path.split('/')[-1]
+ if not os.path.isdir(dir):
+ os.symlink(path, dir)
+
+ check_and_create_dir('/content/drive/MyDrive/crypto_workspace')
+ # check_and_create_dir('/content/drive/MyDrive/crypto_workspace/strategies')
+ check_and_create_dir('/content/drive/MyDrive/crypto_workspace/history')
+ # check_and_create_dir('/content/drive/MyDrive/crypto_workspace/filters')
+ # ln_dir("/content/drive/MyDrive/crypto_workspace/strategies")
+ # ln_dir("/content/drive/MyDrive/crypto_workspace/filters")
+ ln_dir("/content/drive/MyDrive/crypto_workspace/history")
+
+def setup():
+ IN_COLAB = 'google.colab' in sys.modules
+ if IN_COLAB:
+ setup_colab()
+ else:
+ # check_and_create_dir('strategies')
+ # check_and_create_dir('filters')
+ check_and_create_dir('history')
diff --git a/.history/finlab_crypto/chart_20220819145848.py b/.history/finlab_crypto/chart_20220819145848.py
new file mode 100644
index 0000000..da8fc37
--- /dev/null
+++ b/.history/finlab_crypto/chart_20220819145848.py
@@ -0,0 +1,299 @@
+from pyecharts.globals import CurrentConfig, NotebookType
+from pyecharts.charts import Kline, Line, Grid, Bar
+CurrentConfig.NOTEBOOK_TYPE = NotebookType.JUPYTER_LAB
+
+import pyecharts.options as opts
+import numpy as np
+import pandas as pd
+from pyecharts.charts import Candlestick
+
+def chart(dfstock, overlaps=dict(), figures=dict(), markers=dict(), markerlines=[], start_date=None, end_date=None, k_colors='world'):
+ """Backtesting Analysis and optimizer dashboard platform.
+
+ Use pyechart and seaborn module to generate interactive variety charts.
+
+ Args:
+ dfstock: A dataframe of trading target data.
+ overlaps: A dict of overlaps indicator line setting in figure.
+ figures: A dict of information needed for picture drawing.
+ markers: A dict of which dfstock index needed to be mark.
+ markerlines: A tuple(name, x, y ) in dict of drawing the line connection between entry to exist point.
+ start_date: A datetime value of the start of dfstock.
+ end_date: A datetime value of the end of dfstock .
+ k_colors: A string value(world or taiwan) of kline color for diff area. Or use dict format like {'increasing_line':#111111, 'decreasing_line':#000000}
+
+ Returns:
+ grid_chart: chart display.
+ chart_size: A dict of chart's height and width values.
+
+ """
+ title = 60
+ title_margin_top = 30
+ main_chart_height = 300
+ margin_left = 50
+ vol_chart_height = 50
+ sub_figure_height = 60
+ width = 800
+
+ dfstock = dfstock.loc[start_date:end_date]
+
+ mark_data = []
+ for mark in markers:
+
+ if mark[1] not in dfstock.index:
+ continue
+
+ x = np.where(dfstock.index == mark[1])[0][0]
+ y = dfstock.high.loc[mark[1]]
+ color = '#1d6ff2'
+ o = opts.MarkPointItem(coord=[float(x), y], value=mark[0], itemstyle_opts=opts.ItemStyleOpts(color=color))
+ mark_data.append(o)
+
+ modified_marklines = []
+ for markline in markerlines:
+ name, x, y = markline
+ if x[0] not in dfstock.index or x[1] not in dfstock.index:
+ continue
+ xx0 = np.where(dfstock.index == x[0])[0][0]
+ xx1 = np.where(dfstock.index == x[1])[0][0]
+ x = [float(xx0), float(xx1)]
+ modified_marklines.append([
+ {
+ 'name': name,
+ 'coord': [x[0], y[0]],
+ 'itemStyle': {'color': '#216dc4'}
+ },
+ {
+ 'coord': [x[1], y[1]]
+ }
+ ])
+
+ #for m in modified_marklines:
+ # print(m.opts)
+ # print('------')
+
+ # mark_data += [
+ # opts.MarkPointItem(type_="max", name="最大值", symbol='rect', symbol_size=[50, 20],
+ # itemstyle_opts=opts.ItemStyleOpts(color='rgba(0,0,0,0.3)')
+ # ),
+ # opts.MarkPointItem(type_="min", name="最小值", symbol='rect', symbol_size=[50, 20],
+ # itemstyle_opts=opts.ItemStyleOpts(color='rgba(0,0,0,0.3)')
+ # )
+ # ]
+ if isinstance(k_colors, str):
+ k_colors_set = {'taiwan': {'increasing_line': '#ff6183', 'decreasing_line': '#58d6ac'},
+ 'world': {'increasing_line': '#58d6ac', 'decreasing_line': '#ff6183'}
+ }[k_colors]
+ if isinstance(k_colors, dict):
+ k_colors_set = {'increasing_line': k_colors.get('increasing_line', '#58d6ac'), 'decreasing_line': k_colors.get('decreasing_line', '#ff6183')}
+
+ kline = (
+ Kline()
+ .add_xaxis(xaxis_data=dfstock.index.astype(str).to_list())
+ .add_yaxis(
+ series_name="klines",
+ y_axis=dfstock[['open', 'close', 'low', 'high']].values.tolist(),
+ markpoint_opts=opts.MarkPointOpts(
+ data=mark_data
+ ),
+ markline_opts=opts.MarkLineOpts(
+ data=modified_marklines,
+ label_opts={'position':'insideMiddleTop', 'show': False}
+ ),
+ itemstyle_opts=opts.ItemStyleOpts(
+ color=k_colors_set['increasing_line'],
+ color0=k_colors_set['decreasing_line'],
+ border_color=k_colors_set['increasing_line'],
+ border_color0=k_colors_set['decreasing_line'],
+ ),
+ )
+ .set_series_opts()
+ )
+
+ #################
+ # overlap chart
+ #################
+
+ overlap_chart = (
+ Line()
+ .add_xaxis(xaxis_data=dfstock.index.astype(str).to_list())
+ )
+ for name, o in overlaps.items():
+ overlap_chart.add_yaxis(
+ series_name=name,
+ y_axis=o.loc[start_date:end_date].to_list(),
+ is_smooth=True,
+ is_hover_animation=False,
+ linestyle_opts=opts.LineStyleOpts(opacity=0.5),
+ label_opts=opts.LabelOpts(is_show=False),
+ )
+
+ # Bar-1
+ bar_1 = (
+ Bar()
+ .add_xaxis(xaxis_data=dfstock.index.astype(str).to_list())
+ .add_yaxis(
+ series_name="volume",
+ y_axis=dfstock.volume.loc[start_date:end_date].to_list(),
+ xaxis_index=1,
+ yaxis_index=1,
+ label_opts=opts.LabelOpts(is_show=False),
+ # 改进后在 grid 中 add_js_funcs 后变成如下
+ itemstyle_opts=opts.ItemStyleOpts(
+ color='rgba(0,0,0,0.2)',
+ ),
+ )
+ .set_global_opts(
+ xaxis_opts=opts.AxisOpts(
+ type_="category",
+ grid_index=1,
+ axislabel_opts=opts.LabelOpts(is_show=False),
+ ),
+ yaxis_opts=opts.AxisOpts(
+ axislabel_opts=opts.LabelOpts(is_show=False),
+ ),
+ legend_opts=opts.LegendOpts(is_show=False),
+ )
+ )
+
+
+ #################
+ # indicators
+ #################
+
+ def is_item(item):
+ return isinstance(item, pd.Series) or isinstance(item, tuple)
+
+ def item_to_chart(name, item):
+
+ if isinstance(item, pd.Series):
+ item_type = 'line'
+ series = item.loc[start_date:end_date]
+ elif isinstance(item, tuple):
+ item_type = item[1]
+ series = item[0].loc[start_date:end_date]
+ else:
+ print('Object type not accept (only pd.Series or tuple)')
+ raise
+
+ values = series.to_list()
+ index = series.index.astype(str).to_list()
+
+ chart = None
+ if item_type == 'line':
+ chart = Line()
+ chart.add_xaxis(xaxis_data=index)
+ chart.add_yaxis(series_name=name,
+ y_axis=values,
+ is_hover_animation=False,
+ #linestyle_opts=opts.LineStyleOpts(width=3, opacity=0.5),
+ label_opts=opts.LabelOpts(is_show=False),
+ )
+ elif item_type == 'bar':
+ chart = Bar()
+ chart.add_xaxis(xaxis_data=index)
+ chart.add_yaxis(
+ series_name=name,
+ y_axis=values,
+ #xaxis_index=1,
+ #yaxis_index=1,
+ label_opts=opts.LabelOpts(is_show=False),
+ )
+
+ return chart
+
+ example_charts = []
+ for name, graph in figures.items():
+ if is_item(graph):
+ example_charts.append(item_to_chart(name, graph))
+ elif isinstance(graph, dict) or isinstance(graph, pd.DataFrame):
+ ys = [item_to_chart(name, subgraph) for name, subgraph in graph.items()]
+ for y in ys[1:]:
+ ys[0].overlap(y)
+ example_charts.append(ys[0])
+ else:
+ raise Exception('cannot support subfigure type')
+
+ if len(dfstock) <= 500:
+ range_start = 0
+ else:
+ range_start = 95#100 - int(10000/len(dfstock))
+
+ kline.set_global_opts(
+ legend_opts=opts.LegendOpts(pos_top='0px', pos_left=str(margin_left)),
+ xaxis_opts=opts.AxisOpts(is_scale=True),
+ yaxis_opts=opts.AxisOpts(
+ is_scale=True,
+ splitarea_opts=opts.SplitAreaOpts(
+ is_show=True, areastyle_opts=opts.AreaStyleOpts(opacity=0.3)
+ ),
+ #grid_index=1,
+ #split_number=3,
+ axisline_opts=opts.AxisLineOpts(is_on_zero=False),
+ #axistick_opts=opts.AxisTickOpts(is_show=False),
+ splitline_opts=opts.SplitLineOpts(is_show=False),
+ axislabel_opts=opts.LabelOpts(is_show=True),
+ ),
+ datazoom_opts=[
+ opts.DataZoomOpts(
+ is_show=False,
+ type_="inside",
+ xaxis_index=list(range(len(example_charts)+2)),
+ range_start=range_start,
+ range_end=100,
+ ),
+ opts.DataZoomOpts(
+ is_show=True,
+ xaxis_index=list(range(len(example_charts)+2)),
+ type_="slider",
+ pos_top="85%",
+ range_start=range_start,
+ range_end=100,
+ ),
+ ],
+ #title_opts=opts.TitleOpts(title="Kline-DataZoom-inside"),
+ )
+
+ # Kline And Line
+ overlap_kline_line = kline.overlap(overlap_chart)
+
+ total_height = title + main_chart_height + len(example_charts) * (sub_figure_height + title) + 200
+
+ # Grid Overlap + Bar
+ grid_chart = Grid(
+ init_opts=opts.InitOpts(
+ width=str(width) + 'px',
+ height=str(total_height) + 'px',
+ animation_opts=opts.AnimationOpts(animation=False),
+ )
+ )
+ grid_chart.add(
+ overlap_kline_line,
+ grid_opts=opts.GridOpts(pos_top=str(title) + 'px',
+ height=str(main_chart_height) + 'px',
+ pos_left=str(margin_left)+'px', pos_right='0'),
+ )
+
+ grid_chart.add(
+ bar_1,
+ grid_opts=opts.GridOpts(pos_top=str(title+main_chart_height-vol_chart_height) + 'px',
+ height=str(vol_chart_height) + 'px',
+ pos_left=str(margin_left)+'px', pos_right='0'),
+ )
+
+ for i, chart in enumerate(example_charts):
+ title_pos_top = title + main_chart_height + i * (sub_figure_height + title)
+ chart.set_global_opts(
+ #title_opts=opts.TitleOpts(name, pos_top=str(title_pos_top+title_margin_top) + 'px'),
+ legend_opts=opts.LegendOpts(pos_left=str(margin_left), pos_top=str(title_pos_top+title_margin_top) + 'px'),
+ )
+ chart_pos_top = title_pos_top + title
+ grid_chart.add(
+ chart,
+ grid_opts=opts.GridOpts(pos_top=str(chart_pos_top) + 'px',
+ height=str(sub_figure_height) + 'px',
+ pos_left=str(margin_left)+'px', pos_right='0'
+ ),
+ )
+ chart_size = {'height': total_height, 'width': width}
+ return grid_chart, chart_size
diff --git a/.history/finlab_crypto/chart_20220919204222.py b/.history/finlab_crypto/chart_20220919204222.py
new file mode 100644
index 0000000..b1c82fc
--- /dev/null
+++ b/.history/finlab_crypto/chart_20220919204222.py
@@ -0,0 +1,298 @@
+from pyecharts.globals import CurrentConfig, NotebookType
+from pyecharts.charts import Kline, Line, Grid, Bar
+CurrentConfig.NOTEBOOK_TYPE = NotebookType.JUPYTER_LAB
+
+import pyecharts.options as opts
+import numpy as np
+import pandas as pd
+from pyecharts.charts import Candlestick
+
+def chart(dfstock, overlaps=dict(), figures=dict(), markers=dict(), markerlines=[], start_date=None, end_date=None, k_colors='world'):
+ """Backtesting Analysis and optimizer dashboard platform.
+
+ Use pyechart and seaborn module to generate interactive variety charts.
+
+ Args:
+ dfstock: A dataframe of trading target data.
+ overlaps: A dict of overlaps indicator line setting in figure.
+ figures: A dict of information needed for picture drawing.
+ markers: A dict of which dfstock index needed to be mark.
+ markerlines: A tuple(name, x, y ) in dict of drawing the line connection between entry to exist point.
+ start_date: A datetime value of the start of dfstock.
+ end_date: A datetime value of the end of dfstock .
+ k_colors: A string value(world or taiwan) of kline color for diff area. Or use dict format like {'increasing_line':#111111, 'decreasing_line':#000000}
+
+ Returns:
+ grid_chart: chart display.
+ chart_size: A dict of chart's height and width values.
+
+ """
+ title = 60
+ title_margin_top = 30
+ main_chart_height = 300
+ margin_left = 50
+ vol_chart_height = 50
+ sub_figure_height = 60
+ width = 800
+
+ dfstock = dfstock.loc[start_date:end_date]
+
+ mark_data = []
+ for mark in markers:
+
+ if mark[1] not in dfstock.index:
+ continue
+
+ x = np.where(dfstock.index == mark[1])[0][0]
+ y = dfstock.high.loc[mark[1]]
+ color = '#1d6ff2'
+ o = opts.MarkPointItem(coord=[float(x), y], value=mark[0], itemstyle_opts=opts.ItemStyleOpts(color=color))
+ mark_data.append(o)
+
+ modified_marklines = []
+ for markline in markerlines:
+ name, x, y = markline
+ if x[0] not in dfstock.index or x[1] not in dfstock.index:
+ continue
+ xx0 = np.where(dfstock.index == x[0])[0][0]
+ xx1 = np.where(dfstock.index == x[1])[0][0]
+ x = [float(xx0), float(xx1)]
+ modified_marklines.append([
+ {
+ 'name': name,
+ 'coord': [x[0], y[0]],
+ 'itemStyle': {'color': '#216dc4'}
+ },
+ {
+ 'coord': [x[1], y[1]]
+ }
+ ])
+
+ #for m in modified_marklines:
+ # print(m.opts)
+ # print('------')
+
+ # mark_data += [
+ # opts.MarkPointItem(type_="max", name="最大值", symbol='rect', symbol_size=[50, 20],
+ # itemstyle_opts=opts.ItemStyleOpts(color='rgba(0,0,0,0.3)')
+ # ),
+ # opts.MarkPointItem(type_="min", name="最小值", symbol='rect', symbol_size=[50, 20],
+ # itemstyle_opts=opts.ItemStyleOpts(color='rgba(0,0,0,0.3)')
+ # )
+ # ]
+ if isinstance(k_colors, str):
+ k_colors_set = {'taiwan': {'increasing_line': '#ff6183', 'decreasing_line': '#58d6ac'},
+ 'world': {'increasing_line': '#58d6ac', 'decreasing_line': '#ff6183'}
+ }[k_colors]
+ if isinstance(k_colors, dict):
+ k_colors_set = {'increasing_line': k_colors.get('increasing_line', '#58d6ac'), 'decreasing_line': k_colors.get('decreasing_line', '#ff6183')}
+
+ kline = (
+ Kline()
+ .add_xaxis(xaxis_data=dfstock.index.astype(str).to_list())
+ .add_yaxis(
+ series_name="klines",
+ y_axis=dfstock[['open', 'close', 'low', 'high']].values.tolist(),
+ markpoint_opts=opts.MarkPointOpts(
+ data=mark_data
+ ),
+ markline_opts=opts.MarkLineOpts(
+ data=modified_marklines,
+ label_opts={'position':'insideMiddleTop', 'show': False}
+ ),
+ itemstyle_opts=opts.ItemStyleOpts(
+ color=k_colors_set['increasing_line'],
+ color0=k_colors_set['decreasing_line'],
+ border_color=k_colors_set['increasing_line'],
+ border_color0=k_colors_set['decreasing_line'],
+ ),
+ )
+ .set_series_opts()
+ )
+
+ #################
+ # overlap chart
+ #################
+
+ overlap_chart = (
+ Line().add_xaxis(xaxis_data=dfstock.index.astype(str).to_list())
+ )
+ for name, o in overlaps.items():
+ overlap_chart.add_yaxis(
+ series_name=name,
+ y_axis=o.loc[start_date:end_date].to_list(),
+ is_smooth=True,
+ is_hover_animation=False,
+ linestyle_opts=opts.LineStyleOpts(opacity=0.5),
+ label_opts=opts.LabelOpts(is_show=False),
+ )
+
+ # Bar-1
+ bar_1 = (
+ Bar()
+ .add_xaxis(xaxis_data=dfstock.index.astype(str).to_list())
+ .add_yaxis(
+ series_name="volume",
+ y_axis=dfstock.volume.loc[start_date:end_date].to_list(),
+ xaxis_index=1,
+ yaxis_index=1,
+ label_opts=opts.LabelOpts(is_show=False),
+ # 改进后在 grid 中 add_js_funcs 后变成如下
+ itemstyle_opts=opts.ItemStyleOpts(
+ color='rgba(0,0,0,0.2)',
+ ),
+ )
+ .set_global_opts(
+ xaxis_opts=opts.AxisOpts(
+ type_="category",
+ grid_index=1,
+ axislabel_opts=opts.LabelOpts(is_show=False),
+ ),
+ yaxis_opts=opts.AxisOpts(
+ axislabel_opts=opts.LabelOpts(is_show=False),
+ ),
+ legend_opts=opts.LegendOpts(is_show=False),
+ )
+ )
+
+
+ #################
+ # indicators
+ #################
+
+ def is_item(item):
+ return isinstance(item, pd.Series) or isinstance(item, tuple)
+
+ def item_to_chart(name, item):
+
+ if isinstance(item, pd.Series):
+ item_type = 'line'
+ series = item.loc[start_date:end_date]
+ elif isinstance(item, tuple):
+ item_type = item[1]
+ series = item[0].loc[start_date:end_date]
+ else:
+ print('Object type not accept (only pd.Series or tuple)')
+ raise
+
+ values = series.to_list()
+ index = series.index.astype(str).to_list()
+
+ chart = None
+ if item_type == 'line':
+ chart = Line()
+ chart.add_xaxis(xaxis_data=index)
+ chart.add_yaxis(series_name=name,
+ y_axis=values,
+ is_hover_animation=False,
+ #linestyle_opts=opts.LineStyleOpts(width=3, opacity=0.5),
+ label_opts=opts.LabelOpts(is_show=False),
+ )
+ elif item_type == 'bar':
+ chart = Bar()
+ chart.add_xaxis(xaxis_data=index)
+ chart.add_yaxis(
+ series_name=name,
+ y_axis=values,
+ #xaxis_index=1,
+ #yaxis_index=1,
+ label_opts=opts.LabelOpts(is_show=False),
+ )
+
+ return chart
+
+ example_charts = []
+ for name, graph in figures.items():
+ if is_item(graph):
+ example_charts.append(item_to_chart(name, graph))
+ elif isinstance(graph, dict) or isinstance(graph, pd.DataFrame):
+ ys = [item_to_chart(name, subgraph) for name, subgraph in graph.items()]
+ for y in ys[1:]:
+ ys[0].overlap(y)
+ example_charts.append(ys[0])
+ else:
+ raise Exception('cannot support subfigure type')
+
+ if len(dfstock) <= 500:
+ range_start = 0
+ else:
+ range_start = 95#100 - int(10000/len(dfstock))
+
+ kline.set_global_opts(
+ legend_opts=opts.LegendOpts(pos_top='0px', pos_left=str(margin_left)),
+ xaxis_opts=opts.AxisOpts(is_scale=True),
+ yaxis_opts=opts.AxisOpts(
+ is_scale=True,
+ splitarea_opts=opts.SplitAreaOpts(
+ is_show=True, areastyle_opts=opts.AreaStyleOpts(opacity=0.3)
+ ),
+ #grid_index=1,
+ #split_number=3,
+ axisline_opts=opts.AxisLineOpts(is_on_zero=False),
+ #axistick_opts=opts.AxisTickOpts(is_show=False),
+ splitline_opts=opts.SplitLineOpts(is_show=False),
+ axislabel_opts=opts.LabelOpts(is_show=True),
+ ),
+ datazoom_opts=[
+ opts.DataZoomOpts(
+ is_show=False,
+ type_="inside",
+ xaxis_index=list(range(len(example_charts)+2)),
+ range_start=range_start,
+ range_end=100,
+ ),
+ opts.DataZoomOpts(
+ is_show=True,
+ xaxis_index=list(range(len(example_charts)+2)),
+ type_="slider",
+ pos_top="85%",
+ range_start=range_start,
+ range_end=100,
+ ),
+ ],
+ #title_opts=opts.TitleOpts(title="Kline-DataZoom-inside"),
+ )
+
+ # Kline And Line
+ overlap_kline_line = kline.overlap(overlap_chart)
+
+ total_height = title + main_chart_height + len(example_charts) * (sub_figure_height + title) + 200
+
+ # Grid Overlap + Bar
+ grid_chart = Grid(
+ init_opts=opts.InitOpts(
+ width=str(width) + 'px',
+ height=str(total_height) + 'px',
+ animation_opts=opts.AnimationOpts(animation=False),
+ )
+ )
+ grid_chart.add(
+ overlap_kline_line,
+ grid_opts=opts.GridOpts(pos_top=str(title) + 'px',
+ height=str(main_chart_height) + 'px',
+ pos_left=str(margin_left)+'px', pos_right='0'),
+ )
+
+ grid_chart.add(
+ bar_1,
+ grid_opts=opts.GridOpts(pos_top=str(title+main_chart_height-vol_chart_height) + 'px',
+ height=str(vol_chart_height) + 'px',
+ pos_left=str(margin_left)+'px', pos_right='0'),
+ )
+
+ for i, chart in enumerate(example_charts):
+ title_pos_top = title + main_chart_height + i * (sub_figure_height + title)
+ chart.set_global_opts(
+ #title_opts=opts.TitleOpts(name, pos_top=str(title_pos_top+title_margin_top) + 'px'),
+ legend_opts=opts.LegendOpts(pos_left=str(margin_left), pos_top=str(title_pos_top+title_margin_top) + 'px'),
+ )
+ chart_pos_top = title_pos_top + title
+ grid_chart.add(
+ chart,
+ grid_opts=opts.GridOpts(pos_top=str(chart_pos_top) + 'px',
+ height=str(sub_figure_height) + 'px',
+ pos_left=str(margin_left)+'px', pos_right='0'
+ ),
+ )
+ chart_size = {'height': total_height, 'width': width}
+ return grid_chart, chart_size
diff --git a/.history/finlab_crypto/strategy_20220919202431.py b/.history/finlab_crypto/strategy_20220919202431.py
new file mode 100644
index 0000000..e556ea5
--- /dev/null
+++ b/.history/finlab_crypto/strategy_20220919202431.py
@@ -0,0 +1,401 @@
+"""Strategy function plug-in.
+
+You can use Filter and Strategy function as decorator
+that make strategies easy to construct filters layers
+and common strategy detection methods, such as back-testing,
+parameter tuning and analysis charts.
+
+ Typical usage example:
+```
+ @Filter(timeperiod=20)
+ def your_filter(ohlcv):
+ your filter logic...
+ return filter > filter_value, figures
+ f60 = your_filter.create({'timeperiod': 60})
+```
+ -------------------------------
+```
+ @Strategy()
+ def your_strategy(ohlcv):
+ your strategy logic...
+ return entries, exits, figures
+ portfolio = your_strategy.backtest(ohlcv, freq='4h', plot=True)
+```
+"""
+
+from finlab_crypto.utility import (enumerate_variables, enumerate_signal,
+ stop_early, plot_combination, plot_strategy,
+ variable_visualization, remove_pd_object
+ )
+from finlab_crypto.overfitting import CSCV
+import copy
+import vectorbt as vbt
+import pandas as pd
+import matplotlib.pyplot as plt
+from collections import Iterable
+
+
+class Filter(object):
+ """Filter package features plug-in.
+
+ Offer easy way to create filter to use in class Strategy.
+
+ Attributes:
+ default_parameters: customized filter attributes.
+ """
+
+ def __init__(self, **default_parameters):
+ """inits filter."""
+ self.func = None
+ self.filters = {}
+ self._default_parameters = default_parameters
+ self.set_parameters(default_parameters)
+
+ def __call__(self, func):
+ """decorator function
+
+ Args
+ func: A function of the customized filter.
+ """
+ self.func = func
+ return self
+
+ def set_parameters(self, variables):
+ """set your customized filter parameters.
+
+ let filter class use variables dict to set method
+
+ Args:
+ variables: a dict of your customized filter attributes.
+
+ """
+ if self._default_parameters:
+ for key, val in self._default_parameters.items():
+ setattr(self, key, val)
+
+ if variables:
+ for key, val in variables.items():
+ setattr(self, key, val)
+
+
+ def show_parameters(self):
+ parameters = {}
+ for key, val in self._default_parameters.items():
+ parameters[key] = getattr(self, key)
+ print(parameters)
+
+ def create(self, variables=None):
+ """generate filter signals, fig_data.
+
+ offer easy way to create filter signals, fig_data
+
+ Args:
+ variables: a dict of your customized filter attributes.
+ Returns:
+ signals: a dataframe of filter signals.
+ fig_data: a dict of required data for figure display.
+ """
+
+ def ret_f(ohlcv):
+
+ variable_enumerate = enumerate_variables(variables)
+ if len(variable_enumerate) == 0:
+ variable_enumerate.append(self._default_parameters)
+
+ signals = {}
+ fig_data = {}
+ for v in variable_enumerate:
+
+ self.set_parameters(v)
+ results = self.func(ohlcv)
+
+ v = remove_pd_object(v)
+
+ if isinstance(results, Iterable):
+ signals[str(v)], fig_data = results
+ else:
+ signals[str(v)] = results
+
+ signals = pd.DataFrame(signals)
+ signals.columns.name = 'filter'
+
+ param_names = list(eval(signals.columns[0]).keys())
+ arrays = ([signals.columns.map(lambda s: eval(s)[p]) for p in param_names])
+ tuples = list(zip(*arrays))
+ columns = pd.MultiIndex.from_tuples(tuples, names=param_names)
+ signals.columns = columns
+
+ return signals, fig_data
+
+ return ret_f
+
+class Strategy(object):
+ """strategy features plug-in.
+
+ offer common strategy detection methods, such as back-testing,
+ parameter tuning and analysis charts.
+
+ Attributes:
+ default_parameters: customized strategy attributes.
+
+ """
+
+ def __init__(self, **default_parameters):
+ """inits strategy."""
+ self.filters = {}
+ self._default_parameters = default_parameters
+ self.set_parameters(default_parameters)
+
+ def __call__(self, func):
+ """decorator function
+
+ Args
+ func: A function of customized strategy.
+ """
+ self.func = func
+ return self
+
+ def set_parameters(self, variables):
+ """set your customized strategy parameters.
+
+ let strategy class use variables dict to set method.
+
+ Args:
+ variables: a dict of your customized strategy attributes.
+
+ """
+
+ # remove stop vars
+ stop_vars = ['sl_stop', 'tp_stop', 'ts_stop']
+ for svar in stop_vars:
+ if hasattr(self, svar):
+ delattr(self, svar)
+
+ # set defualt variables
+ if self._default_parameters:
+ for key, val in self._default_parameters.items():
+ setattr(self, key, val)
+
+ # set custom variables
+ if variables:
+ for key, val in variables.items():
+ setattr(self, key, val)
+
+ def show_parameters(self):
+ parameters = {}
+ for key, val in self._default_parameters.items():
+ parameters[key] = getattr(self, key)
+ print(parameters)
+
+ @staticmethod
+ def _enumerate_filters(ohlcv, filters):
+ """enumerate filters data.
+
+ process filter dictionary data to prepare for adding filter signals.
+
+ Args:
+ ohlcv: a dataframe of your trading target.
+ filters: a dict of your customized filter attributes.
+
+ Returns:
+ a dict that generate tuple with filter signal dataframe and figures data.
+ for example:
+
+ {'mmi': (timeperiod 20
+ timestamp
+ 2020-11-25 02:00:00+00:00 true
+ 2020-11-25 03:00:00+00:00 true
+ 2020-11-25 04:00:00+00:00 true
+
+ [3 rows x 1 columns], {'figures': {'mmi_index': timestamp
+ 2020-11-25 02:00:00+00:00 0.7
+ 2020-11-25 03:00:00+00:00 0.7
+ 2020-11-25 04:00:00+00:00 0.7
+ name: close, length: 28597, dtype: float64}})}
+
+ """
+ ret = {}
+ for fname, f in filters.items():
+ # get filter signals and figures
+ filter_df, filter_figures = f(ohlcv)
+ ret[fname] = (filter_df, filter_figures)
+ return ret
+
+ @staticmethod
+ def _add_filters(entries, exits, fig_data, filters):
+ """add filters in strategy.
+
+ generate entries, exits, fig_data after add filters.
+
+ Args:
+ entries: A dataframe of entries point time series.
+ exits: A dataframe of exits point time series.
+ fig_data: A dict of your customized figure Attributes.
+ filters: A dict of _enumerate_filters function return.
+
+ Returns:
+ entries: A dataframe of entries point time series after add filter function.
+ exits: A dataframe of exits point time series after add filter function.
+ fig_data: A dict of tuple with filter signal dataframe and figures data.
+
+ """
+ for fname, (filter_df, filter_figures) in filters.items():
+ filter_df.columns = filter_df.columns.set_names([fname + '_' + n for n in filter_df.columns.names])
+ entries = filter_df.vbt.tile(entries.shape[1]).vbt & entries.vbt.repeat(filter_df.shape[1]).vbt
+ exits = exits.vbt.repeat(filter_df.shape[1])
+ exits.columns = entries.columns
+
+ # merge figures
+ if filter_figures is not None:
+ if 'figures' in filter_figures:
+ if 'figures' not in fig_data:
+ fig_data['figures'] = {}
+ for name, fig in filter_figures['figures'].items():
+ fig_data['figures'][fname + '_' + name] = fig
+ if 'overlaps' in filter_figures:
+ if 'overlaps' not in fig_data:
+ fig_data['overlaps'] = {}
+ for name, fig in filter_figures['overlaps'].items():
+ fig_data['overlaps'][fname + '_' + name] = fig
+
+ return entries, exits, fig_data
+
+ @staticmethod
+ def _add_stops(ohlcv, entries, exits, variables):
+ """Add early trading stop condition in strategy.
+
+ Args:
+ ohlcv: A dataframe of your trading target.
+ entries: A dataframe of entry point time series.
+ exits: A dataframe of exits point time series.
+ variables: A dict of your customized strategy Attributes.
+
+ Returns:
+ entries: A dataframe of entries point time series after add stop_early function.
+ exits: A dataframe of exits point time series after add stop_early function.
+
+ """
+ entries, exits = stop_early(ohlcv, entries, exits, variables)
+ entries = entries.squeeze()
+ exits = exits.squeeze()
+ return entries, exits
+
+ def backtest(self, ohlcv, variables=None,
+ filters=None, lookback=None, plot=False,
+ signals=False, side='long', cscv_nbins=10,
+ cscv_objective=lambda r: r.mean(), html=None, compounded=True, execution_price='close',
+ k_colors='world', **args):
+
+ """Backtest analysis tool set.
+ Use vectorbt as base module to create numerical operations features.
+ Use seaborn and pyechart as base modules to create analysis charts platform.
+
+ Args:
+ ohlcv: A dataframe of your trading target.
+ variables: A dict of your customized strategy Attributes.
+ Default is None.
+ filters: A dict of your customized filter Attributes.
+ Default is None.
+ lookback: A int of slice that you want to get recent ohlcv.
+ Default is None.
+ plot: A bool of control plot display.
+ Default is False.
+ signals: A bool of controlentries, exits, fig_data return.
+ Default is False.
+ side: A str of transaction direction,short or long.
+ Default is long.
+ cscv_nbins: A int of CSCV algorithm bin size to control overfitting calculation.
+ Default is 10.
+ cscv_objective: A function of in sample(is) and out of sample(oos) return benchmark algorithm.
+ Default is lambda r:r.mean().
+ html: A str of your customized html format file to show plot.
+ Default is None.
+ compounded: use compounded return as result of backtesting
+ Default is True
+ execution_price: price for trading operation ('open' or 'close').
+ Default is 'open'
+ k_colors: A string value(world or taiwan) of kline color for diff area. Or use dict format like {'increasing_line':#111111, 'decreasing_line':#000000}
+ Default is 'world'
+ **args:
+ Other parameters.
+
+ Returns:
+ A dataframe of vectorbt.Portfolio.from_signals results
+ Plot results display.
+
+ Raises:
+ 'Shorting is not support yet':if side is 'short'.
+ "side should be 'long' or 'short'":if side is not 'short' or 'long'.
+
+ """
+ variables = variables or dict()
+ filters = filters or dict()
+
+ variables_without_stop = copy.copy(variables)
+
+ # sl_trail: patch for vbt updates
+ exit_vars = ['sl_stop', 'ts_stop', 'tp_stop', 'sl_trail']
+ stop_vars = {}
+ for e in exit_vars:
+ if e in variables_without_stop:
+ stop_vars[e] = variables[e]
+ variables_without_stop.pop(e)
+
+ ohlcv_lookback = ohlcv.iloc[-lookback:] if lookback else ohlcv
+
+ variable_enumerate = enumerate_variables(variables_without_stop)
+
+ if not variable_enumerate:
+ variable_enumerate = [self._default_parameters]
+
+ entries, exits, fig_data = enumerate_signal(ohlcv_lookback, self, variable_enumerate)
+
+ if filters:
+ filter_signals = self._enumerate_filters(ohlcv_lookback, filters)
+ entries, exits, fig_data = self._add_filters(entries, exits, fig_data, filter_signals)
+
+ entries, exits = self._add_stops(ohlcv_lookback, entries, exits, stop_vars)
+
+ if signals:
+ return entries, exits, fig_data
+
+ if side == 'long':
+
+ if not compounded:
+ args['size'] = vbt.settings.portfolio['init_cash'] / ohlcv_lookback.close[0]
+
+ assert execution_price == 'close' or execution_price == 'open'
+ price = ohlcv_lookback[execution_price] if execution_price == 'close' else ohlcv_lookback[execution_price].shift(-1).bfill()
+
+ portfolio = vbt.Portfolio.from_signals(
+ ohlcv_lookback[execution_price], entries.fillna(False), exits.fillna(False), **args)
+
+ elif side == 'short':
+ if not compounded:
+ args['size'] = vbt.settings.portfolio['init_cash'] / ohlcv_lookback.close[0]
+
+ assert execution_price == 'close' or execution_price == 'open'
+ price = ohlcv_lookback[execution_price] if execution_price == 'close' else ohlcv_lookback[execution_price].shift(-1).bfill()
+
+ portfolio = vbt.Portfolio.from_signals(
+ ohlcv_lookback[execution_price], short_entries=entries.fillna(False), short_exits = exits.fillna(False), **args)
+
+ else:
+ raise Exception("side should be 'long' or 'short'")
+
+ if (plot or html is not None) and isinstance(entries, pd.Series):
+ plot_strategy(ohlcv_lookback, entries, exits, portfolio, fig_data, html=html, k_colors=k_colors)
+
+ elif plot and isinstance(entries, pd.DataFrame):
+
+ # perform CSCV algorithm
+ cscv = CSCV(n_bins=cscv_nbins, objective=cscv_objective)
+ cscv.add_daily_returns(portfolio.daily_returns())
+ cscv_result = cscv.estimate_overfitting(plot=False)
+
+ # plot results
+ plot_combination(portfolio, cscv_result)
+ plt.show()
+ variable_visualization(portfolio)
+
+ return portfolio
diff --git a/.history/finlab_crypto/strategy_20220919202800.py b/.history/finlab_crypto/strategy_20220919202800.py
new file mode 100644
index 0000000..5ed479f
--- /dev/null
+++ b/.history/finlab_crypto/strategy_20220919202800.py
@@ -0,0 +1,400 @@
+"""Strategy function plug-in.
+
+You can use Filter and Strategy function as decorator
+that make strategies easy to construct filters layers
+and common strategy detection methods, such as back-testing,
+parameter tuning and analysis charts.
+
+ Typical usage example:
+```
+ @Filter(timeperiod=20)
+ def your_filter(ohlcv):
+ your filter logic...
+ return filter > filter_value, figures
+ f60 = your_filter.create({'timeperiod': 60})
+```
+ -------------------------------
+```
+ @Strategy()
+ def your_strategy(ohlcv):
+ your strategy logic...
+ return entries, exits, figures
+ portfolio = your_strategy.backtest(ohlcv, freq='4h', plot=True)
+```
+"""
+
+from finlab_crypto.utility import (enumerate_variables, enumerate_signal,
+ stop_early, plot_combination, plot_strategy,
+ variable_visualization, remove_pd_object
+ )
+from finlab_crypto.overfitting import CSCV
+import copy
+import vectorbt as vbt
+import pandas as pd
+import matplotlib.pyplot as plt
+from collections import Iterable
+
+
+class Filter(object):
+ """Filter package features plug-in.
+
+ Offer easy way to create filter to use in class Strategy.
+
+ Attributes:
+ default_parameters: customized filter attributes.
+ """
+
+ def __init__(self, **default_parameters):
+ """inits filter."""
+ self.func = None
+ self.filters = {}
+ self._default_parameters = default_parameters
+ self.set_parameters(default_parameters)
+
+ def __call__(self, func):
+ """decorator function
+
+ Args
+ func: A function of the customized filter.
+ """
+ self.func = func
+ return self
+
+ def set_parameters(self, variables):
+ """set your customized filter parameters.
+
+ let filter class use variables dict to set method
+
+ Args:
+ variables: a dict of your customized filter attributes.
+
+ """
+ if self._default_parameters:
+ for key, val in self._default_parameters.items():
+ setattr(self, key, val)
+
+ if variables:
+ for key, val in variables.items():
+ setattr(self, key, val)
+
+
+ def show_parameters(self):
+ parameters = {}
+ for key, val in self._default_parameters.items():
+ parameters[key] = getattr(self, key)
+ print(parameters)
+
+ def create(self, variables=None):
+ """generate filter signals, fig_data.
+
+ offer easy way to create filter signals, fig_data
+
+ Args:
+ variables: a dict of your customized filter attributes.
+ Returns:
+ signals: a dataframe of filter signals.
+ fig_data: a dict of required data for figure display.
+ """
+
+ def ret_f(ohlcv):
+
+ variable_enumerate = enumerate_variables(variables)
+ if len(variable_enumerate) == 0:
+ variable_enumerate.append(self._default_parameters)
+
+ signals = {}
+ fig_data = {}
+ for v in variable_enumerate:
+
+ self.set_parameters(v)
+ results = self.func(ohlcv)
+
+ v = remove_pd_object(v)
+
+ if isinstance(results, Iterable):
+ signals[str(v)], fig_data = results
+ else:
+ signals[str(v)] = results
+
+ signals = pd.DataFrame(signals)
+ signals.columns.name = 'filter'
+
+ param_names = list(eval(signals.columns[0]).keys())
+ arrays = ([signals.columns.map(lambda s: eval(s)[p]) for p in param_names])
+ tuples = list(zip(*arrays))
+ columns = pd.MultiIndex.from_tuples(tuples, names=param_names)
+ signals.columns = columns
+
+ return signals, fig_data
+
+ return ret_f
+
+class Strategy(object):
+ """strategy features plug-in.
+
+ offer common strategy detection methods, such as back-testing,
+ parameter tuning and analysis charts.
+
+ Attributes:
+ default_parameters: customized strategy attributes.
+
+ """
+
+ def __init__(self, **default_parameters):
+ """inits strategy."""
+ self.filters = {}
+ self._default_parameters = default_parameters
+ self.set_parameters(default_parameters)
+
+ def __call__(self, func):
+ """decorator function
+
+ Args
+ func: A function of customized strategy.
+ """
+ self.func = func
+ return self
+
+ def set_parameters(self, variables):
+ """set your customized strategy parameters.
+
+ let strategy class use variables dict to set method.
+
+ Args:
+ variables: a dict of your customized strategy attributes.
+
+ """
+
+ # remove stop vars
+ stop_vars = ['sl_stop', 'tp_stop', 'ts_stop']
+ for svar in stop_vars:
+ if hasattr(self, svar):
+ delattr(self, svar)
+
+ # set defualt variables
+ if self._default_parameters:
+ for key, val in self._default_parameters.items():
+ setattr(self, key, val)
+
+ # set custom variables
+ if variables:
+ for key, val in variables.items():
+ setattr(self, key, val)
+
+ def show_parameters(self):
+ parameters = {}
+ for key, val in self._default_parameters.items():
+ parameters[key] = getattr(self, key)
+ print(parameters)
+
+ @staticmethod
+ def _enumerate_filters(ohlcv, filters):
+ """enumerate filters data.
+
+ process filter dictionary data to prepare for adding filter signals.
+
+ Args:
+ ohlcv: a dataframe of your trading target.
+ filters: a dict of your customized filter attributes.
+
+ Returns:
+ a dict that generate tuple with filter signal dataframe and figures data.
+ for example:
+
+ {'mmi': (timeperiod 20
+ timestamp
+ 2020-11-25 02:00:00+00:00 true
+ 2020-11-25 03:00:00+00:00 true
+ 2020-11-25 04:00:00+00:00 true
+
+ [3 rows x 1 columns], {'figures': {'mmi_index': timestamp
+ 2020-11-25 02:00:00+00:00 0.7
+ 2020-11-25 03:00:00+00:00 0.7
+ 2020-11-25 04:00:00+00:00 0.7
+ name: close, length: 28597, dtype: float64}})}
+
+ """
+ ret = {}
+ for fname, f in filters.items():
+ # get filter signals and figures
+ filter_df, filter_figures = f(ohlcv)
+ ret[fname] = (filter_df, filter_figures)
+ return ret
+
+ @staticmethod
+ def _add_filters(entries, exits, fig_data, filters):
+ """add filters in strategy.
+
+ generate entries, exits, fig_data after add filters.
+
+ Args:
+ entries: A dataframe of entries point time series.
+ exits: A dataframe of exits point time series.
+ fig_data: A dict of your customized figure Attributes.
+ filters: A dict of _enumerate_filters function return.
+
+ Returns:
+ entries: A dataframe of entries point time series after add filter function.
+ exits: A dataframe of exits point time series after add filter function.
+ fig_data: A dict of tuple with filter signal dataframe and figures data.
+
+ """
+ for fname, (filter_df, filter_figures) in filters.items():
+ filter_df.columns = filter_df.columns.set_names([fname + '_' + n for n in filter_df.columns.names])
+ entries = filter_df.vbt.tile(entries.shape[1]).vbt & entries.vbt.repeat(filter_df.shape[1]).vbt
+ exits = exits.vbt.repeat(filter_df.shape[1])
+ exits.columns = entries.columns
+
+ # merge figures
+ if filter_figures is not None:
+ if 'figures' in filter_figures:
+ if 'figures' not in fig_data:
+ fig_data['figures'] = {}
+ for name, fig in filter_figures['figures'].items():
+ fig_data['figures'][fname + '_' + name] = fig
+ if 'overlaps' in filter_figures:
+ if 'overlaps' not in fig_data:
+ fig_data['overlaps'] = {}
+ for name, fig in filter_figures['overlaps'].items():
+ fig_data['overlaps'][fname + '_' + name] = fig
+
+ return entries, exits, fig_data
+
+ @staticmethod
+ def _add_stops(ohlcv, entries, exits, variables):
+ """Add early trading stop condition in strategy.
+
+ Args:
+ ohlcv: A dataframe of your trading target.
+ entries: A dataframe of entry point time series.
+ exits: A dataframe of exits point time series.
+ variables: A dict of your customized strategy Attributes.
+
+ Returns:
+ entries: A dataframe of entries point time series after add stop_early function.
+ exits: A dataframe of exits point time series after add stop_early function.
+
+ """
+ entries, exits = stop_early(ohlcv, entries, exits, variables)
+ entries = entries.squeeze()
+ exits = exits.squeeze()
+ return entries, exits
+
+ def backtest(self, ohlcv, variables=None,
+ filters=None, lookback=None, plot=False,
+ signals=False, side='long', cscv_nbins=10,
+ cscv_objective=lambda r: r.mean(), html=None, compounded=True, execution_price='close',
+ k_colors='world', **args):
+
+ """Backtest analysis tool set.
+ Use vectorbt as base module to create numerical operations features.
+ Use seaborn and pyechart as base modules to create analysis charts platform.
+
+ Args:
+ ohlcv: A dataframe of your trading target.
+ variables: A dict of your customized strategy Attributes.
+ Default is None.
+ filters: A dict of your customized filter Attributes.
+ Default is None.
+ lookback: A int of slice that you want to get recent ohlcv.
+ Default is None.
+ plot: A bool of control plot display.
+ Default is False.
+ signals: A bool of controlentries, exits, fig_data return.
+ Default is False.
+ side: A str of transaction direction,short or long.
+ Default is long.
+ cscv_nbins: A int of CSCV algorithm bin size to control overfitting calculation.
+ Default is 10.
+ cscv_objective: A function of in sample(is) and out of sample(oos) return benchmark algorithm.
+ Default is lambda r:r.mean().
+ html: A str of your customized html format file to show plot.
+ Default is None.
+ compounded: use compounded return as result of backtesting
+ Default is True
+ execution_price: price for trading operation ('open' or 'close').
+ Default is 'open'
+ k_colors: A string value(world or taiwan) of kline color for diff area. Or use dict format like {'increasing_line':#111111, 'decreasing_line':#000000}
+ Default is 'world'
+ **args:
+ Other parameters.
+
+ Returns:
+ A dataframe of vectorbt.Portfolio.from_signals results
+ Plot results display.
+
+ Raises:
+ "side should be 'long' or 'short'":if side is not 'short' or 'long'.
+ """
+
+ variables = variables or dict()
+ filters = filters or dict()
+
+ variables_without_stop = copy.copy(variables)
+
+ # sl_trail: patch for vbt updates
+ exit_vars = ['sl_stop', 'ts_stop', 'tp_stop', 'sl_trail']
+ stop_vars = {}
+ for e in exit_vars:
+ if e in variables_without_stop:
+ stop_vars[e] = variables[e]
+ variables_without_stop.pop(e)
+
+ ohlcv_lookback = ohlcv.iloc[-lookback:] if lookback else ohlcv
+
+ variable_enumerate = enumerate_variables(variables_without_stop)
+
+ if not variable_enumerate:
+ variable_enumerate = [self._default_parameters]
+
+ entries, exits, fig_data = enumerate_signal(ohlcv_lookback, self, variable_enumerate)
+
+ if filters:
+ filter_signals = self._enumerate_filters(ohlcv_lookback, filters)
+ entries, exits, fig_data = self._add_filters(entries, exits, fig_data, filter_signals)
+
+ entries, exits = self._add_stops(ohlcv_lookback, entries, exits, stop_vars)
+
+ if signals:
+ return entries, exits, fig_data
+
+ if side == 'long':
+
+ if not compounded:
+ args['size'] = vbt.settings.portfolio['init_cash'] / ohlcv_lookback.close[0]
+
+ assert execution_price == 'close' or execution_price == 'open'
+ price = ohlcv_lookback[execution_price] if execution_price == 'close' else ohlcv_lookback[execution_price].shift(-1).bfill()
+
+ portfolio = vbt.Portfolio.from_signals(
+ price, entries.fillna(False), exits.fillna(False), **args)
+
+ elif side == 'short':
+ if not compounded:
+ args['size'] = vbt.settings.portfolio['init_cash'] / ohlcv_lookback.close[0]
+
+ assert execution_price == 'close' or execution_price == 'open'
+ price = ohlcv_lookback[execution_price] if execution_price == 'close' else ohlcv_lookback[execution_price].shift(-1).bfill()
+
+ portfolio = vbt.Portfolio.from_signals(
+ price, short_entries=entries.fillna(False), short_exits=exits.fillna(False), **args)
+
+ else:
+ raise Exception("side should be 'long' or 'short'")
+
+ if (plot or html is not None) and isinstance(entries, pd.Series):
+ plot_strategy(ohlcv_lookback, entries, exits, portfolio, fig_data, html=html, k_colors=k_colors)
+
+ elif plot and isinstance(entries, pd.DataFrame):
+
+ # perform CSCV algorithm
+ cscv = CSCV(n_bins=cscv_nbins, objective=cscv_objective)
+ cscv.add_daily_returns(portfolio.daily_returns())
+ cscv_result = cscv.estimate_overfitting(plot=False)
+
+ # plot results
+ plot_combination(portfolio, cscv_result)
+ plt.show()
+ variable_visualization(portfolio)
+
+ return portfolio
diff --git a/.history/finlab_crypto/strategy_20220919205345.py b/.history/finlab_crypto/strategy_20220919205345.py
new file mode 100644
index 0000000..0a2c26a
--- /dev/null
+++ b/.history/finlab_crypto/strategy_20220919205345.py
@@ -0,0 +1,400 @@
+"""Strategy function plug-in.
+
+You can use Filter and Strategy function as decorator
+that make strategies easy to construct filters layers
+and common strategy detection methods, such as back-testing,
+parameter tuning and analysis charts.
+
+ Typical usage example:
+```
+ @Filter(timeperiod=20)
+ def your_filter(ohlcv):
+ your filter logic...
+ return filter > filter_value, figures
+ f60 = your_filter.create({'timeperiod': 60})
+```
+ -------------------------------
+```
+ @Strategy()
+ def your_strategy(ohlcv):
+ your strategy logic...
+ return entries, exits, figures
+ portfolio = your_strategy.backtest(ohlcv, freq='4h', plot=True)
+```
+"""
+
+from finlab_crypto.utility import (enumerate_variables, enumerate_signal,
+ stop_early, plot_combination, plot_strategy,
+ variable_visualization, remove_pd_object
+ )
+from finlab_crypto.overfitting import CSCV
+import copy
+import vectorbt as vbt
+import pandas as pd
+import matplotlib.pyplot as plt
+from collections import Iterable
+
+
+class Filter(object):
+ """Filter package features plug-in.
+
+ Offer easy way to create filter to use in class Strategy.
+
+ Attributes:
+ default_parameters: customized filter attributes.
+ """
+
+ def __init__(self, **default_parameters):
+ """inits filter."""
+ self.func = None
+ self.filters = {}
+ self._default_parameters = default_parameters
+ self.set_parameters(default_parameters)
+
+ def __call__(self, func):
+ """decorator function
+
+ Args
+ func: A function of the customized filter.
+ """
+ self.func = func
+ return self
+
+ def set_parameters(self, variables):
+ """set your customized filter parameters.
+
+ let filter class use variables dict to set method
+
+ Args:
+ variables: a dict of your customized filter attributes.
+
+ """
+ if self._default_parameters:
+ for key, val in self._default_parameters.items():
+ setattr(self, key, val)
+
+ if variables:
+ for key, val in variables.items():
+ setattr(self, key, val)
+
+
+ def show_parameters(self):
+ parameters = {}
+ for key, val in self._default_parameters.items():
+ parameters[key] = getattr(self, key)
+ print(parameters)
+
+ def create(self, variables=None):
+ """generate filter signals, fig_data.
+
+ offer easy way to create filter signals, fig_data
+
+ Args:
+ variables: a dict of your customized filter attributes.
+ Returns:
+ signals: a dataframe of filter signals.
+ fig_data: a dict of required data for figure display.
+ """
+
+ def ret_f(ohlcv):
+
+ variable_enumerate = enumerate_variables(variables)
+ if len(variable_enumerate) == 0:
+ variable_enumerate.append(self._default_parameters)
+
+ signals = {}
+ fig_data = {}
+ for v in variable_enumerate:
+
+ self.set_parameters(v)
+ results = self.func(ohlcv)
+
+ v = remove_pd_object(v)
+
+ if isinstance(results, Iterable):
+ signals[str(v)], fig_data = results
+ else:
+ signals[str(v)] = results
+
+ signals = pd.DataFrame(signals)
+ signals.columns.name = 'filter'
+
+ param_names = list(eval(signals.columns[0]).keys())
+ arrays = ([signals.columns.map(lambda s: eval(s)[p]) for p in param_names])
+ tuples = list(zip(*arrays))
+ columns = pd.MultiIndex.from_tuples(tuples, names=param_names)
+ signals.columns = columns
+
+ return signals, fig_data
+
+ return ret_f
+
+class Strategy(object):
+ """strategy features plug-in.
+
+ offer common strategy detection methods, such as back-testing,
+ parameter tuning and analysis charts.
+
+ Attributes:
+ default_parameters: customized strategy attributes.
+
+ """
+
+ def __init__(self, **default_parameters):
+ """inits strategy."""
+ self.filters = {}
+ self._default_parameters = default_parameters
+ self.set_parameters(default_parameters)
+
+ def __call__(self, func):
+ """decorator function
+
+ Args
+ func: A function of customized strategy.
+ """
+ self.func = func
+ return self
+
+ def set_parameters(self, variables):
+ """set your customized strategy parameters.
+
+ let strategy class use variables dict to set method.
+
+ Args:
+ variables: a dict of your customized strategy attributes.
+
+ """
+
+ # remove stop vars
+ stop_vars = ['sl_stop', 'tp_stop', 'ts_stop']
+ for svar in stop_vars:
+ if hasattr(self, svar):
+ delattr(self, svar)
+
+ # set defualt variables
+ if self._default_parameters:
+ for key, val in self._default_parameters.items():
+ setattr(self, key, val)
+
+ # set custom variables
+ if variables:
+ for key, val in variables.items():
+ setattr(self, key, val)
+
+ def show_parameters(self):
+ parameters = {}
+ for key, val in self._default_parameters.items():
+ parameters[key] = getattr(self, key)
+ print(parameters)
+
+ @staticmethod
+ def _enumerate_filters(ohlcv, filters):
+ """enumerate filters data.
+
+ process filter dictionary data to prepare for adding filter signals.
+
+ Args:
+ ohlcv: a dataframe of your trading target.
+ filters: a dict of your customized filter attributes.
+
+ Returns:
+ a dict that generate tuple with filter signal dataframe and figures data.
+ for example:
+
+ {'mmi': (timeperiod 20
+ timestamp
+ 2020-11-25 02:00:00+00:00 true
+ 2020-11-25 03:00:00+00:00 true
+ 2020-11-25 04:00:00+00:00 true
+
+ [3 rows x 1 columns], {'figures': {'mmi_index': timestamp
+ 2020-11-25 02:00:00+00:00 0.7
+ 2020-11-25 03:00:00+00:00 0.7
+ 2020-11-25 04:00:00+00:00 0.7
+ name: close, length: 28597, dtype: float64}})}
+
+ """
+ ret = {}
+ for fname, f in filters.items():
+ # get filter signals and figures
+ filter_df, filter_figures = f(ohlcv)
+ ret[fname] = (filter_df, filter_figures)
+ return ret
+
+ @staticmethod
+ def _add_filters(entries, exits, fig_data, filters):
+ """add filters in strategy.
+
+ generate entries, exits, fig_data after add filters.
+
+ Args:
+ entries: A dataframe of entries point time series.
+ exits: A dataframe of exits point time series.
+ fig_data: A dict of your customized figure Attributes.
+ filters: A dict of _enumerate_filters function return.
+
+ Returns:
+ entries: A dataframe of entries point time series after add filter function.
+ exits: A dataframe of exits point time series after add filter function.
+ fig_data: A dict of tuple with filter signal dataframe and figures data.
+
+ """
+ for fname, (filter_df, filter_figures) in filters.items():
+ filter_df.columns = filter_df.columns.set_names([fname + '_' + n for n in filter_df.columns.names])
+ entries = filter_df.vbt.tile(entries.shape[1]).vbt & entries.vbt.repeat(filter_df.shape[1]).vbt
+ exits = exits.vbt.repeat(filter_df.shape[1])
+ exits.columns = entries.columns
+
+ # merge figures
+ if filter_figures is not None:
+ if 'figures' in filter_figures:
+ if 'figures' not in fig_data:
+ fig_data['figures'] = {}
+ for name, fig in filter_figures['figures'].items():
+ fig_data['figures'][fname + '_' + name] = fig
+ if 'overlaps' in filter_figures:
+ if 'overlaps' not in fig_data:
+ fig_data['overlaps'] = {}
+ for name, fig in filter_figures['overlaps'].items():
+ fig_data['overlaps'][fname + '_' + name] = fig
+
+ return entries, exits, fig_data
+
+ @staticmethod
+ def _add_stops(ohlcv, entries, exits, variables):
+ """Add early trading stop condition in strategy.
+
+ Args:
+ ohlcv: A dataframe of your trading target.
+ entries: A dataframe of entry point time series.
+ exits: A dataframe of exits point time series.
+ variables: A dict of your customized strategy Attributes.
+
+ Returns:
+ entries: A dataframe of entries point time series after add stop_early function.
+ exits: A dataframe of exits point time series after add stop_early function.
+
+ """
+ entries, exits = stop_early(ohlcv, entries, exits, variables)
+ entries = entries.squeeze()
+ exits = exits.squeeze()
+ return entries, exits
+
+ def backtest(self, ohlcv, variables=None,
+ filters=None, lookback=None, plot=False,
+ signals=False, side='long', cscv_nbins=10,
+ cscv_objective=lambda r: r.mean(), html=None, compounded=True, execution_price='close',
+ k_colors='world', **args):
+
+ """Backtest analysis tool set.
+ Use vectorbt as base module to create numerical operations features.
+ Use seaborn and pyechart as base modules to create analysis charts platform.
+
+ Args:
+ ohlcv: A dataframe of your trading target.
+ variables: A dict of your customized strategy Attributes.
+ Default is None.
+ filters: A dict of your customized filter Attributes.
+ Default is None.
+ lookback: A int of slice that you want to get recent ohlcv.
+ Default is None.
+ plot: A bool of control plot display.
+ Default is False.
+ signals: A bool of controlentries, exits, fig_data return.
+ Default is False.
+ side: A str of transaction direction,short or long.
+ Default is long.
+ cscv_nbins: A int of CSCV algorithm bin size to control overfitting calculation.
+ Default is 10.
+ cscv_objective: A function of in sample(is) and out of sample(oos) return benchmark algorithm.
+ Default is lambda r:r.mean().
+ html: A str of your customized html format file to show plot.
+ Default is None.
+ compounded: use compounded return as result of backtesting
+ Default is True
+ execution_price: price for trading operation ('open' or 'close').
+ Default is 'open'
+ k_colors: A string value(world or taiwan) of kline color for diff area. Or use dict format like {'increasing_line':#111111, 'decreasing_line':#000000}
+ Default is 'world'
+ **args:
+ Other parameters.
+
+ Returns:
+ A dataframe of vectorbt.Portfolio.from_signals results
+ Plot results display.
+
+ Raises:
+ "side should be 'long' or 'short'": if side is not 'short' or 'long'.
+ """
+
+ variables = variables or dict()
+ filters = filters or dict()
+
+ variables_without_stop = copy.copy(variables)
+
+ # sl_trail: patch for vbt updates
+ exit_vars = ['sl_stop', 'ts_stop', 'tp_stop', 'sl_trail']
+ stop_vars = {}
+ for e in exit_vars:
+ if e in variables_without_stop:
+ stop_vars[e] = variables[e]
+ variables_without_stop.pop(e)
+
+ ohlcv_lookback = ohlcv.iloc[-lookback:] if lookback else ohlcv
+
+ variable_enumerate = enumerate_variables(variables_without_stop)
+
+ if not variable_enumerate:
+ variable_enumerate = [self._default_parameters]
+
+ entries, exits, fig_data = enumerate_signal(ohlcv_lookback, self, variable_enumerate)
+
+ if filters:
+ filter_signals = self._enumerate_filters(ohlcv_lookback, filters)
+ entries, exits, fig_data = self._add_filters(entries, exits, fig_data, filter_signals)
+
+ entries, exits = self._add_stops(ohlcv_lookback, entries, exits, stop_vars)
+
+ if signals:
+ return entries, exits, fig_data
+
+ if side == 'long':
+
+ if not compounded:
+ args['size'] = vbt.settings.portfolio['init_cash'] / ohlcv_lookback.close[0]
+
+ assert execution_price == 'close' or execution_price == 'open'
+ price = ohlcv_lookback[execution_price] if execution_price == 'close' else ohlcv_lookback[execution_price].shift(-1).bfill()
+
+ portfolio = vbt.Portfolio.from_signals(
+ price, entries.fillna(False), exits.fillna(False), **args)
+ # TESTING SHORTING ADDITION CAPABILTIES
+ elif side == 'short':
+ if not compounded:
+ args['size'] = vbt.settings.portfolio['init_cash'] / ohlcv_lookback.close[0]
+
+ assert execution_price == 'close' or execution_price == 'open'
+ price = ohlcv_lookback[execution_price] if execution_price == 'close' else ohlcv_lookback[execution_price].shift(-1).bfill()
+
+ portfolio = vbt.Portfolio.from_signals(
+ price, short_entries=entries.fillna(False), short_exits=exits.fillna(False), **args)
+
+ else:
+ raise Exception("side should be 'long' or 'short'")
+
+ if (plot or html is not None) and isinstance(entries, pd.Series):
+ plot_strategy(ohlcv_lookback, entries, exits, portfolio, fig_data, html=html, k_colors=k_colors)
+
+ elif plot and isinstance(entries, pd.DataFrame):
+
+ # perform CSCV algorithm
+ cscv = CSCV(n_bins=cscv_nbins, objective=cscv_objective)
+ cscv.add_daily_returns(portfolio.daily_returns())
+ cscv_result = cscv.estimate_overfitting(plot=False)
+
+ # plot results
+ plot_combination(portfolio, cscv_result)
+ plt.show()
+ variable_visualization(portfolio)
+
+ return portfolio
diff --git a/.history/finlab_crypto/strategy_20220919211032.py b/.history/finlab_crypto/strategy_20220919211032.py
new file mode 100644
index 0000000..86c1eb9
--- /dev/null
+++ b/.history/finlab_crypto/strategy_20220919211032.py
@@ -0,0 +1,404 @@
+"""Strategy function plug-in.
+
+You can use Filter and Strategy function as decorator
+that make strategies easy to construct filters layers
+and common strategy detection methods, such as back-testing,
+parameter tuning and analysis charts.
+
+ Typical usage example:
+```
+ @Filter(timeperiod=20)
+ def your_filter(ohlcv):
+ your filter logic...
+ return filter > filter_value, figures
+ f60 = your_filter.create({'timeperiod': 60})
+```
+ -------------------------------
+```
+ @Strategy()
+ def your_strategy(ohlcv):
+ your strategy logic...
+ return entries, exits, figures
+ portfolio = your_strategy.backtest(ohlcv, freq='4h', plot=True)
+```
+"""
+
+from finlab_crypto.utility import (enumerate_variables, enumerate_signal,
+ stop_early, plot_combination, plot_strategy,
+ variable_visualization, remove_pd_object
+ )
+from finlab_crypto.overfitting import CSCV
+import copy
+import vectorbt as vbt
+from vectorbt.portfolio.enums import Direction
+import pandas as pd
+import matplotlib.pyplot as plt
+from collections import Iterable
+
+
+class Filter(object):
+ """Filter package features plug-in.
+
+ Offer easy way to create filter to use in class Strategy.
+
+ Attributes:
+ default_parameters: customized filter attributes.
+ """
+
+ def __init__(self, **default_parameters):
+ """inits filter."""
+ self.func = None
+ self.filters = {}
+ self._default_parameters = default_parameters
+ self.set_parameters(default_parameters)
+
+ def __call__(self, func):
+ """decorator function
+
+ Args
+ func: A function of the customized filter.
+ """
+ self.func = func
+ return self
+
+ def set_parameters(self, variables):
+ """set your customized filter parameters.
+
+ let filter class use variables dict to set method
+
+ Args:
+ variables: a dict of your customized filter attributes.
+
+ """
+ if self._default_parameters:
+ for key, val in self._default_parameters.items():
+ setattr(self, key, val)
+
+ if variables:
+ for key, val in variables.items():
+ setattr(self, key, val)
+
+
+ def show_parameters(self):
+ parameters = {}
+ for key, val in self._default_parameters.items():
+ parameters[key] = getattr(self, key)
+ print(parameters)
+
+ def create(self, variables=None):
+ """generate filter signals, fig_data.
+
+ offer easy way to create filter signals, fig_data
+
+ Args:
+ variables: a dict of your customized filter attributes.
+ Returns:
+ signals: a dataframe of filter signals.
+ fig_data: a dict of required data for figure display.
+ """
+
+ def ret_f(ohlcv):
+
+ variable_enumerate = enumerate_variables(variables)
+ if len(variable_enumerate) == 0:
+ variable_enumerate.append(self._default_parameters)
+
+ signals = {}
+ fig_data = {}
+ for v in variable_enumerate:
+
+ self.set_parameters(v)
+ results = self.func(ohlcv)
+
+ v = remove_pd_object(v)
+
+ if isinstance(results, Iterable):
+ signals[str(v)], fig_data = results
+ else:
+ signals[str(v)] = results
+
+ signals = pd.DataFrame(signals)
+ signals.columns.name = 'filter'
+
+ param_names = list(eval(signals.columns[0]).keys())
+ arrays = ([signals.columns.map(lambda s: eval(s)[p]) for p in param_names])
+ tuples = list(zip(*arrays))
+ columns = pd.MultiIndex.from_tuples(tuples, names=param_names)
+ signals.columns = columns
+
+ return signals, fig_data
+
+ return ret_f
+
+class Strategy(object):
+ """strategy features plug-in.
+
+ offer common strategy detection methods, such as back-testing,
+ parameter tuning and analysis charts.
+
+ Attributes:
+ default_parameters: customized strategy attributes.
+
+ """
+
+ def __init__(self, **default_parameters):
+ """inits strategy."""
+ self.filters = {}
+ self._default_parameters = default_parameters
+ self.set_parameters(default_parameters)
+
+ def __call__(self, func):
+ """decorator function
+
+ Args
+ func: A function of customized strategy.
+ """
+ self.func = func
+ return self
+
+ def set_parameters(self, variables):
+ """set your customized strategy parameters.
+
+ let strategy class use variables dict to set method.
+
+ Args:
+ variables: a dict of your customized strategy attributes.
+
+ """
+
+ # remove stop vars
+ stop_vars = ['sl_stop', 'tp_stop', 'ts_stop']
+ for svar in stop_vars:
+ if hasattr(self, svar):
+ delattr(self, svar)
+
+ # set defualt variables
+ if self._default_parameters:
+ for key, val in self._default_parameters.items():
+ setattr(self, key, val)
+
+ # set custom variables
+ if variables:
+ for key, val in variables.items():
+ setattr(self, key, val)
+
+ def show_parameters(self):
+ parameters = {}
+ for key, val in self._default_parameters.items():
+ parameters[key] = getattr(self, key)
+ print(parameters)
+
+ @staticmethod
+ def _enumerate_filters(ohlcv, filters):
+ """enumerate filters data.
+
+ process filter dictionary data to prepare for adding filter signals.
+
+ Args:
+ ohlcv: a dataframe of your trading target.
+ filters: a dict of your customized filter attributes.
+
+ Returns:
+ a dict that generate tuple with filter signal dataframe and figures data.
+ for example:
+
+ {'mmi': (timeperiod 20
+ timestamp
+ 2020-11-25 02:00:00+00:00 true
+ 2020-11-25 03:00:00+00:00 true
+ 2020-11-25 04:00:00+00:00 true
+
+ [3 rows x 1 columns], {'figures': {'mmi_index': timestamp
+ 2020-11-25 02:00:00+00:00 0.7
+ 2020-11-25 03:00:00+00:00 0.7
+ 2020-11-25 04:00:00+00:00 0.7
+ name: close, length: 28597, dtype: float64}})}
+
+ """
+ ret = {}
+ for fname, f in filters.items():
+ # get filter signals and figures
+ filter_df, filter_figures = f(ohlcv)
+ ret[fname] = (filter_df, filter_figures)
+ return ret
+
+ @staticmethod
+ def _add_filters(entries, exits, fig_data, filters):
+ """add filters in strategy.
+
+ generate entries, exits, fig_data after add filters.
+
+ Args:
+ entries: A dataframe of entries point time series.
+ exits: A dataframe of exits point time series.
+ fig_data: A dict of your customized figure Attributes.
+ filters: A dict of _enumerate_filters function return.
+
+ Returns:
+ entries: A dataframe of entries point time series after add filter function.
+ exits: A dataframe of exits point time series after add filter function.
+ fig_data: A dict of tuple with filter signal dataframe and figures data.
+
+ """
+ for fname, (filter_df, filter_figures) in filters.items():
+ filter_df.columns = filter_df.columns.set_names([fname + '_' + n for n in filter_df.columns.names])
+ entries = filter_df.vbt.tile(entries.shape[1]).vbt & entries.vbt.repeat(filter_df.shape[1]).vbt
+ exits = exits.vbt.repeat(filter_df.shape[1])
+ exits.columns = entries.columns
+
+ # merge figures
+ if filter_figures is not None:
+ if 'figures' in filter_figures:
+ if 'figures' not in fig_data:
+ fig_data['figures'] = {}
+ for name, fig in filter_figures['figures'].items():
+ fig_data['figures'][fname + '_' + name] = fig
+ if 'overlaps' in filter_figures:
+ if 'overlaps' not in fig_data:
+ fig_data['overlaps'] = {}
+ for name, fig in filter_figures['overlaps'].items():
+ fig_data['overlaps'][fname + '_' + name] = fig
+
+ return entries, exits, fig_data
+
+ @staticmethod
+ def _add_stops(ohlcv, entries, exits, variables):
+ """Add early trading stop condition in strategy.
+
+ Args:
+ ohlcv: A dataframe of your trading target.
+ entries: A dataframe of entry point time series.
+ exits: A dataframe of exits point time series.
+ variables: A dict of your customized strategy Attributes.
+
+ Returns:
+ entries: A dataframe of entries point time series after add stop_early function.
+ exits: A dataframe of exits point time series after add stop_early function.
+
+ """
+ entries, exits = stop_early(ohlcv, entries, exits, variables)
+ entries = entries.squeeze()
+ exits = exits.squeeze()
+ return entries, exits
+
+ def backtest(self, ohlcv, variables=None,
+ filters=None, lookback=None, plot=False,
+ signals=False, side='long', cscv_nbins=10,
+ cscv_objective=lambda r: r.mean(), html=None, compounded=True, execution_price='close',
+ k_colors='world', **args):
+
+ """Backtest analysis tool set.
+ Use vectorbt as base module to create numerical operations features.
+ Use seaborn and pyechart as base modules to create analysis charts platform.
+
+ Args:
+ ohlcv: A dataframe of your trading target.
+ variables: A dict of your customized strategy Attributes.
+ Default is None.
+ filters: A dict of your customized filter Attributes.
+ Default is None.
+ lookback: A int of slice that you want to get recent ohlcv.
+ Default is None.
+ plot: A bool of control plot display.
+ Default is False.
+ signals: A bool of controlentries, exits, fig_data return.
+ Default is False.
+ side: A str of transaction direction,short or long.
+ Default is long.
+ cscv_nbins: A int of CSCV algorithm bin size to control overfitting calculation.
+ Default is 10.
+ cscv_objective: A function of in sample(is) and out of sample(oos) return benchmark algorithm.
+ Default is lambda r:r.mean().
+ html: A str of your customized html format file to show plot.
+ Default is None.
+ compounded: use compounded return as result of backtesting
+ Default is True
+ execution_price: price for trading operation ('open' or 'close').
+ Default is 'open'
+ k_colors: A string value(world or taiwan) of kline color for diff area. Or use dict format like {'increasing_line':#111111, 'decreasing_line':#000000}
+ Default is 'world'
+ **args:
+ Other parameters.
+
+ Returns:
+ A dataframe of vectorbt.Portfolio.from_signals results
+ Plot results display.
+
+ Raises:
+ "side should be 'long' or 'short'": if side is not 'short' or 'long'.
+ """
+
+ variables = variables or dict()
+ filters = filters or dict()
+
+ variables_without_stop = copy.copy(variables)
+
+ # sl_trail: patch for vbt updates
+ exit_vars = ['sl_stop', 'ts_stop', 'tp_stop', 'sl_trail']
+ stop_vars = {}
+ for e in exit_vars:
+ if e in variables_without_stop:
+ stop_vars[e] = variables[e]
+ variables_without_stop.pop(e)
+
+ ohlcv_lookback = ohlcv.iloc[-lookback:] if lookback else ohlcv
+
+ variable_enumerate = enumerate_variables(variables_without_stop)
+
+ if not variable_enumerate:
+ variable_enumerate = [self._default_parameters]
+
+ entries, exits, fig_data = enumerate_signal(ohlcv_lookback, self, variable_enumerate)
+
+ if filters:
+ filter_signals = self._enumerate_filters(ohlcv_lookback, filters)
+ entries, exits, fig_data = self._add_filters(entries, exits, fig_data, filter_signals)
+
+ entries, exits = self._add_stops(ohlcv_lookback, entries, exits, stop_vars)
+
+ if signals:
+ return entries, exits, fig_data
+
+ if side == 'long':
+
+ if not compounded:
+ args['size'] = vbt.settings.portfolio['init_cash'] / ohlcv_lookback.close[0]
+
+ assert execution_price == 'close' or execution_price == 'open'
+ price = ohlcv_lookback[execution_price] if execution_price == 'close' else ohlcv_lookback[execution_price].shift(-1).bfill()
+
+ portfolio = vbt.Portfolio.from_signals(
+ price, entries.fillna(False), exits.fillna(False), **args)
+ # TESTING SHORTING ADDITION CAPABILTIES
+ elif side == 'short':
+ if not compounded:
+ args['size'] = vbt.settings.portfolio['init_cash'] / ohlcv_lookback.close[0]
+
+ assert execution_price == 'close' or execution_price == 'open'
+ price = ohlcv_lookback[execution_price] if execution_price == 'close' else ohlcv_lookback[execution_price].shift(-1).bfill()
+
+ portfolio = vbt.Portfolio.from_signals(
+ price,
+ direction=Direction.ShortOnly,
+ short_entries=entries.fillna(False),
+ short_exits=exits.fillna(False), **args)
+
+ else:
+ raise Exception("side should be 'long' or 'short'")
+
+ if (plot or html is not None) and isinstance(entries, pd.Series):
+ plot_strategy(ohlcv_lookback, entries, exits, portfolio, fig_data, html=html, k_colors=k_colors)
+
+ elif plot and isinstance(entries, pd.DataFrame):
+
+ # perform CSCV algorithm
+ cscv = CSCV(n_bins=cscv_nbins, objective=cscv_objective)
+ cscv.add_daily_returns(portfolio.daily_returns())
+ cscv_result = cscv.estimate_overfitting(plot=False)
+
+ # plot results
+ plot_combination(portfolio, cscv_result)
+ plt.show()
+ variable_visualization(portfolio)
+
+ return portfolio
diff --git a/.history/finlab_crypto/utility_20220919204154.py b/.history/finlab_crypto/utility_20220919204154.py
new file mode 100644
index 0000000..e6723fd
--- /dev/null
+++ b/.history/finlab_crypto/utility_20220919204154.py
@@ -0,0 +1,326 @@
+from IPython.display import display, HTML, IFrame, clear_output
+from itertools import compress, product
+from collections.abc import Iterable
+import matplotlib.pyplot as plt
+import tqdm.notebook as tqdm
+import ipywidgets as widgets
+import vectorbt as vbt
+import seaborn as sns
+import pandas as pd
+import numpy as np
+import copy
+import os
+
+from . import chart
+from . import overfitting
+
+
+def is_evalable(obj):
+ try:
+ eval(str(obj))
+ return True
+ except:
+ return False
+
+def remove_pd_object(d):
+ ret = {}
+ for n, v in d.items():
+ if ((not isinstance(v, pd.Series) and not isinstance(v, pd.DataFrame) and not callable(v) and is_evalable(v))
+ or isinstance(v, str)):
+ ret[n] = v
+ return ret
+
+def enumerate_variables(variables):
+
+ if not variables:
+ return []
+
+ enumeration_name = []
+ enumeration_vars = []
+
+ constant_d = {}
+
+ for name, v in variables.items():
+ if (isinstance(v, Iterable) and not isinstance(v, str)
+ and not isinstance(v, pd.Series)
+ and not isinstance(v, pd.DataFrame)):
+
+ enumeration_name.append(name)
+ enumeration_vars.append(v)
+ else:
+ constant_d[name] = v
+
+ variable_enumerations = [dict(**dict(zip(enumeration_name, ps)), **constant_d)
+ for ps in list(product(*enumeration_vars))]
+
+ return variable_enumerations
+
+
+
+def enumerate_signal(ohlcv, strategy, variables, ):
+ entries = {}
+ exits = {}
+
+ fig = {}
+
+ iteration = tqdm.tqdm(variables) if len(variables) > 1 else variables
+ for v in iteration:
+ strategy.set_parameters(v)
+ results = strategy.func(ohlcv)
+
+ v = remove_pd_object(v)
+
+ entries[str(v)], exits[str(v)] = results[0], results[1]
+ if len(results) >= 3:
+ fig = results[2]
+
+ entries = pd.DataFrame(entries)
+ exits = pd.DataFrame(exits)
+
+ # setup columns
+ param_names = list(eval(entries.columns[0]).keys())
+ arrays = ([entries.columns.map(lambda s: eval(s)[p]) for p in param_names])
+ tuples = list(zip(*arrays))
+ if tuples:
+ columns = pd.MultiIndex.from_tuples(tuples, names=param_names)
+ exits.columns = columns
+ entries.columns = columns
+ return entries, exits, fig
+
+def stop_early(ohlcv, entries, exits, stop_vars, enumeration=True):
+
+ if not stop_vars:
+ return entries, exits
+
+ # check for stop_vars
+ length = -1
+ stop_vars_set = {'sl_stop', 'ts_stop', 'tp_stop', 'sl_trail'}
+ for s, slist in stop_vars.items():
+ if s not in stop_vars_set:
+ raise Exception(f'variable { s } is not one of the stop variables'
+ ': sl_stop, ts_stop, or tp_stop')
+ if not isinstance(slist, Iterable):
+ stop_vars[s] = [slist]
+
+ if length == -1:
+ length = len(stop_vars[s])
+
+ if not enumeration and length != -1 and length != len(stop_vars[s]):
+ raise Exception(f'lengths of the variables are not align: '
+ + str([len(stop_vars[s]) for s, slist in stop_vars.items()]))
+
+ if enumeration:
+ stop_vars = enumerate_variables(stop_vars)
+ stop_vars = {key: [stop_vars[i][key] for i in range(len(stop_vars))] for key in stop_vars[0].keys()}
+
+ # vbt patch: change ts_stop to sl_trail
+ if 'ts_stop' in stop_vars:
+ ts_stop = stop_vars.pop('ts_stop')
+ stop_vars['sl_trail'] = ts_stop
+
+ sl_advstex = vbt.OHLCSTX.run(
+ entries,
+ ohlcv['open'],
+ ohlcv['high'],
+ ohlcv['low'],
+ ohlcv['close'],
+ **stop_vars
+ )
+
+ stop_exits = sl_advstex.exits
+
+ nrepeat = int(len(stop_exits.columns) / len(entries.columns))
+ if isinstance(stop_exits, pd.DataFrame):
+ exits = exits.vbt.tile(nrepeat)
+ entries = entries.vbt.tile(nrepeat)
+
+ stop_exits = stop_exits.vbt | exits.values
+ entries.columns = stop_exits.columns
+
+ return entries, stop_exits
+
+def plot_strategy(ohlcv, entries, exits, portfolio ,fig_data, html=None, k_colors='world'):
+
+ # format trade data
+ txn = portfolio.positions.records
+ txn['enter_time'] = ohlcv.iloc[txn.entry_idx].index.values
+ txn['exit_time'] = ohlcv.iloc[txn.exit_idx].index.values
+
+ # plot trade data
+ mark_lines = []
+ for name, t in txn.iterrows():
+ x = [str(t.enter_time), str(t.exit_time)]
+ y = [t.entry_price, t.exit_price]
+ name = t.loc[['entry_price', 'exit_price', 'return']].to_string()
+ mark_lines.append((name, x, y))
+
+ # calculate overlap figures
+ overlaps = {}
+ if 'overlaps' in fig_data:
+ overlaps = fig_data['overlaps']
+
+ # calculate sub-figures
+ figures = {}
+ if 'figures' in fig_data:
+ figures = fig_data['figures']
+
+ figures['entries & exits'] = pd.DataFrame(
+ {'entries':entries.squeeze(), 'exits': exits.squeeze()})
+ figures['performance'] = portfolio.cumulative_returns()
+
+ c, info = chart.chart(ohlcv, overlaps=overlaps,
+ figures=figures, markerlines=mark_lines,
+ start_date=ohlcv.index[-min(2000, len(ohlcv))], end_date=ohlcv.index[-1], k_colors=k_colors)
+ c.load_javascript()
+ if html is not None:
+ c.render(html)
+ else:
+ c.render()
+ display(HTML(filename='render.html'))
+
+ return
+
+def plot_combination(portfolio, cscv_result=None, metric='final_value'):
+
+ sns.set()
+ sns.set_style("whitegrid")
+
+ fig, axes = plt.subplots(1, 2, figsize=(15, 4), sharey=False, sharex=False)
+ fig.suptitle('Backtest Results')
+
+ def heat_map(item, name1, name2, ax):
+ if name1 != name2:
+ sns.heatmap(item.reset_index().pivot(name1, name2)[0], cmap='magma_r', ax=ax)
+ else:
+ getattr(portfolio, item_name).groupby(name1).mean().plot(ax=ax)
+
+ def best_n(portfolio, n):
+ return getattr(portfolio, metric)().sort_values().tail(n).index
+
+ best_10 = best_n(portfolio, 10)
+
+ ax = (portfolio.cumulative_returns()[best_10] * 100).plot(ax=axes[0])
+ ax.set(xlabel='time', ylabel='cumulative return (%)')
+
+ axes[1].title.set_text('Drawdown (%)')
+ for n, c in zip([5, 10, 20, 30], sns.color_palette("GnBu_d")):
+ bests = best_n(portfolio, n)
+ drawdown = portfolio.drawdown()[bests].min(axis=1)
+ ax = drawdown.plot(linewidth=1, ax=axes[1])
+ # ax.fill_between(drawdown.index, 0, drawdown * 100, alpha=0.2, color=c)
+ ax.set(xlabel='time', ylabel='drawdown (%)')
+
+ plt.show()
+
+
+ items = ['final_value', 'sharpe_ratio', 'sortino_ratio']
+ fig, axes = plt.subplots(1, len(items), figsize=(15, 3),
+ sharey=False, sharex=False, constrained_layout=False)
+ fig.subplots_adjust(top=0.75)
+ fig.suptitle('Partial Differentiation')
+
+ final_value = portfolio.final_value()
+ if isinstance(final_value.index, pd.MultiIndex):
+ index_names = final_value.index.names
+ else:
+ index_names = [final_value.index.name]
+
+ for i, item in enumerate(items):
+ results = {}
+ for name in index_names:
+ s = getattr(portfolio, item)()
+ s = s.replace([np.inf, -np.inf], np.nan)
+ results[name] = s.groupby(name).mean()
+ results = pd.DataFrame(results)
+ axes[i].title.set_text(item)
+ results.plot(ax=axes[i])
+
+ if cscv_result is None:
+ return
+
+ results = cscv_result
+
+ fig, axes = plt.subplots(1, 3, figsize=(15, 5),
+ sharey=False, sharex=False, constrained_layout=False)
+ fig.subplots_adjust(bottom=0.5)
+ fig.suptitle('Combinatorially Symmetric Cross-validation')
+
+ pbo_test = round(results['pbo_test'] * 100, 2)
+ axes[0].title.set_text(f'Probability of overfitting: {pbo_test} %')
+ axes[0].hist(x=[l for l in results['logits'] if l > -10000], bins='auto')
+ axes[0].set_xlabel('Logits')
+ axes[0].set_ylabel('Frequency')
+
+ # performance degradation
+ axes[1].title.set_text('Performance degradation')
+ x, y = pd.DataFrame([results['R_n_star'], results['R_bar_n_star']]).dropna(axis=1).values
+ sns.regplot(x, y, ax=axes[1])
+ #axes[1].set_xlim(min(results['R_n_star']) * 1.2,max(results['R_n_star']) * 1.2)
+ #axes[1].set_ylim(min(results['R_bar_n_star']) * 1.2,max(results['R_bar_n_star']) * 1.2)
+ axes[1].set_xlabel('In-sample Performance')
+ axes[1].set_ylabel('Out-of-sample Performance')
+
+ # first and second Stochastic dominance
+ axes[2].title.set_text('Stochastic dominance')
+ if len(results['dom_df']) != 0: results['dom_df'].plot(ax=axes[2], secondary_y=['SD2'])
+ axes[2].set_xlabel('Performance optimized vs non-optimized')
+ axes[2].set_ylabel('Frequency')
+
+
+def variable_visualization(portfolio):
+
+ param_names = portfolio.cumulative_returns().columns.names
+ dropdown1 = widgets.Dropdown(
+ options=param_names,
+ value=param_names[0],
+ description='axis 1:',
+ disabled=False,
+ )
+ dropdown2 = widgets.Dropdown(
+ options=param_names,
+ value=param_names[0],
+ description='axis 2:',
+ disabled=False,
+ )
+
+ performance_metric = ['final_value',
+ 'calmar_ratio', 'max_drawdown', 'sharpe_ratio',
+ 'downside_risk', 'omega_ratio', 'conditional_value_at_risk']
+
+ performance_dropdwon = widgets.Dropdown(
+ options=performance_metric,
+ value=performance_metric[0],
+ description='performance',
+ disabled=False,
+ )
+
+ out = widgets.Output()
+
+ import matplotlib.pyplot as plt
+ def update(v):
+ name1 = dropdown1.value
+ name2 = dropdown2.value
+ performance = performance_dropdwon.value
+
+ with out:
+ out.clear_output()
+ if name1 != name2:
+ df = (getattr(portfolio, performance)()
+ .reset_index().groupby([name1, name2]).mean()[performance]
+ .reset_index().pivot(name1, name2)[performance])
+
+ df = df.replace([np.inf, -np.inf], np.nan)
+ sns.heatmap(df)
+ else:
+ getattr(portfolio, performance)().groupby(name1).mean().plot()
+ plt.show()
+
+
+ dropdown1.observe(update, 'value')
+ dropdown2.observe(update, 'value')
+ performance_dropdwon.observe(update, 'value')
+ drawdowns = widgets.VBox([performance_dropdwon,
+ widgets.HBox([dropdown1, dropdown2])])
+ display(drawdowns)
+ display(out)
+ update(0)
diff --git a/.history/finlab_crypto/utility_20220919204204.py b/.history/finlab_crypto/utility_20220919204204.py
new file mode 100644
index 0000000..afcd557
--- /dev/null
+++ b/.history/finlab_crypto/utility_20220919204204.py
@@ -0,0 +1,327 @@
+from IPython.display import display, HTML, IFrame, clear_output
+from itertools import compress, product
+from collections.abc import Iterable
+import matplotlib.pyplot as plt
+import tqdm.notebook as tqdm
+import ipywidgets as widgets
+import vectorbt as vbt
+import seaborn as sns
+import pandas as pd
+import numpy as np
+import copy
+import os
+
+from . import chart
+from . import overfitting
+
+
+def is_evalable(obj):
+ try:
+ eval(str(obj))
+ return True
+ except:
+ return False
+
+def remove_pd_object(d):
+ ret = {}
+ for n, v in d.items():
+ if ((not isinstance(v, pd.Series) and not isinstance(v, pd.DataFrame) and not callable(v) and is_evalable(v))
+ or isinstance(v, str)):
+ ret[n] = v
+ return ret
+
+def enumerate_variables(variables):
+
+ if not variables:
+ return []
+
+ enumeration_name = []
+ enumeration_vars = []
+
+ constant_d = {}
+
+ for name, v in variables.items():
+ if (isinstance(v, Iterable) and not isinstance(v, str)
+ and not isinstance(v, pd.Series)
+ and not isinstance(v, pd.DataFrame)):
+
+ enumeration_name.append(name)
+ enumeration_vars.append(v)
+ else:
+ constant_d[name] = v
+
+ variable_enumerations = [dict(**dict(zip(enumeration_name, ps)), **constant_d)
+ for ps in list(product(*enumeration_vars))]
+
+ return variable_enumerations
+
+
+
+def enumerate_signal(ohlcv, strategy, variables, ):
+ entries = {}
+ exits = {}
+
+ fig = {}
+
+ iteration = tqdm.tqdm(variables) if len(variables) > 1 else variables
+ for v in iteration:
+ strategy.set_parameters(v)
+ results = strategy.func(ohlcv)
+
+ v = remove_pd_object(v)
+
+ entries[str(v)], exits[str(v)] = results[0], results[1]
+ if len(results) >= 3:
+ fig = results[2]
+
+ entries = pd.DataFrame(entries)
+ exits = pd.DataFrame(exits)
+
+ # setup columns
+ param_names = list(eval(entries.columns[0]).keys())
+ arrays = ([entries.columns.map(lambda s: eval(s)[p]) for p in param_names])
+ tuples = list(zip(*arrays))
+ if tuples:
+ columns = pd.MultiIndex.from_tuples(tuples, names=param_names)
+ exits.columns = columns
+ entries.columns = columns
+ return entries, exits, fig
+
+def stop_early(ohlcv, entries, exits, stop_vars, enumeration=True):
+
+ if not stop_vars:
+ return entries, exits
+
+ # check for stop_vars
+ length = -1
+ stop_vars_set = {'sl_stop', 'ts_stop', 'tp_stop', 'sl_trail'}
+ for s, slist in stop_vars.items():
+ if s not in stop_vars_set:
+ raise Exception(f'variable { s } is not one of the stop variables'
+ ': sl_stop, ts_stop, or tp_stop')
+ if not isinstance(slist, Iterable):
+ stop_vars[s] = [slist]
+
+ if length == -1:
+ length = len(stop_vars[s])
+
+ if not enumeration and length != -1 and length != len(stop_vars[s]):
+ raise Exception(f'lengths of the variables are not align: '
+ + str([len(stop_vars[s]) for s, slist in stop_vars.items()]))
+
+ if enumeration:
+ stop_vars = enumerate_variables(stop_vars)
+ stop_vars = {key: [stop_vars[i][key] for i in range(len(stop_vars))] for key in stop_vars[0].keys()}
+
+ # vbt patch: change ts_stop to sl_trail
+ if 'ts_stop' in stop_vars:
+ ts_stop = stop_vars.pop('ts_stop')
+ stop_vars['sl_trail'] = ts_stop
+
+ sl_advstex = vbt.OHLCSTX.run(
+ entries,
+ ohlcv['open'],
+ ohlcv['high'],
+ ohlcv['low'],
+ ohlcv['close'],
+ **stop_vars
+ )
+
+ stop_exits = sl_advstex.exits
+
+ nrepeat = int(len(stop_exits.columns) / len(entries.columns))
+ if isinstance(stop_exits, pd.DataFrame):
+ exits = exits.vbt.tile(nrepeat)
+ entries = entries.vbt.tile(nrepeat)
+
+ stop_exits = stop_exits.vbt | exits.values
+ entries.columns = stop_exits.columns
+
+ return entries, stop_exits
+
+def plot_strategy(ohlcv, entries, exits, portfolio ,fig_data, html=None, k_colors='world'):
+
+ # format trade data
+ txn = portfolio.positions.records
+ txn['enter_time'] = ohlcv.iloc[txn.entry_idx].index.values
+ txn['exit_time'] = ohlcv.iloc[txn.exit_idx].index.values
+
+ # plot trade data
+ mark_lines = []
+ for name, t in txn.iterrows():
+ x = [str(t.enter_time), str(t.exit_time)]
+ y = [t.entry_price, t.exit_price]
+ name = t.loc[['entry_price', 'exit_price', 'return']].to_string()
+ mark_lines.append((name, x, y))
+
+ # calculate overlap figures
+ overlaps = {}
+ if 'overlaps' in fig_data:
+ overlaps = fig_data['overlaps']
+
+ # calculate sub-figures
+ figures = {}
+ if 'figures' in fig_data:
+ figures = fig_data['figures']
+
+ figures['entries & exits'] = pd.DataFrame(
+ {'entries':entries.squeeze(), 'exits': exits.squeeze()})
+ figures['performance'] = portfolio.cumulative_returns()
+
+ c, info = chart.chart(ohlcv, overlaps=overlaps,
+ figures=figures, markerlines=mark_lines,
+ ## TESTING BIGGER CHARTS
+ start_date=ohlcv.index[-min(2000, len(ohlcv))], end_date=ohlcv.index[-1], k_colors=k_colors)
+ c.load_javascript()
+ if html is not None:
+ c.render(html)
+ else:
+ c.render()
+ display(HTML(filename='render.html'))
+
+ return
+
+def plot_combination(portfolio, cscv_result=None, metric='final_value'):
+
+ sns.set()
+ sns.set_style("whitegrid")
+
+ fig, axes = plt.subplots(1, 2, figsize=(15, 4), sharey=False, sharex=False)
+ fig.suptitle('Backtest Results')
+
+ def heat_map(item, name1, name2, ax):
+ if name1 != name2:
+ sns.heatmap(item.reset_index().pivot(name1, name2)[0], cmap='magma_r', ax=ax)
+ else:
+ getattr(portfolio, item_name).groupby(name1).mean().plot(ax=ax)
+
+ def best_n(portfolio, n):
+ return getattr(portfolio, metric)().sort_values().tail(n).index
+
+ best_10 = best_n(portfolio, 10)
+
+ ax = (portfolio.cumulative_returns()[best_10] * 100).plot(ax=axes[0])
+ ax.set(xlabel='time', ylabel='cumulative return (%)')
+
+ axes[1].title.set_text('Drawdown (%)')
+ for n, c in zip([5, 10, 20, 30], sns.color_palette("GnBu_d")):
+ bests = best_n(portfolio, n)
+ drawdown = portfolio.drawdown()[bests].min(axis=1)
+ ax = drawdown.plot(linewidth=1, ax=axes[1])
+ # ax.fill_between(drawdown.index, 0, drawdown * 100, alpha=0.2, color=c)
+ ax.set(xlabel='time', ylabel='drawdown (%)')
+
+ plt.show()
+
+
+ items = ['final_value', 'sharpe_ratio', 'sortino_ratio']
+ fig, axes = plt.subplots(1, len(items), figsize=(15, 3),
+ sharey=False, sharex=False, constrained_layout=False)
+ fig.subplots_adjust(top=0.75)
+ fig.suptitle('Partial Differentiation')
+
+ final_value = portfolio.final_value()
+ if isinstance(final_value.index, pd.MultiIndex):
+ index_names = final_value.index.names
+ else:
+ index_names = [final_value.index.name]
+
+ for i, item in enumerate(items):
+ results = {}
+ for name in index_names:
+ s = getattr(portfolio, item)()
+ s = s.replace([np.inf, -np.inf], np.nan)
+ results[name] = s.groupby(name).mean()
+ results = pd.DataFrame(results)
+ axes[i].title.set_text(item)
+ results.plot(ax=axes[i])
+
+ if cscv_result is None:
+ return
+
+ results = cscv_result
+
+ fig, axes = plt.subplots(1, 3, figsize=(15, 5),
+ sharey=False, sharex=False, constrained_layout=False)
+ fig.subplots_adjust(bottom=0.5)
+ fig.suptitle('Combinatorially Symmetric Cross-validation')
+
+ pbo_test = round(results['pbo_test'] * 100, 2)
+ axes[0].title.set_text(f'Probability of overfitting: {pbo_test} %')
+ axes[0].hist(x=[l for l in results['logits'] if l > -10000], bins='auto')
+ axes[0].set_xlabel('Logits')
+ axes[0].set_ylabel('Frequency')
+
+ # performance degradation
+ axes[1].title.set_text('Performance degradation')
+ x, y = pd.DataFrame([results['R_n_star'], results['R_bar_n_star']]).dropna(axis=1).values
+ sns.regplot(x, y, ax=axes[1])
+ #axes[1].set_xlim(min(results['R_n_star']) * 1.2,max(results['R_n_star']) * 1.2)
+ #axes[1].set_ylim(min(results['R_bar_n_star']) * 1.2,max(results['R_bar_n_star']) * 1.2)
+ axes[1].set_xlabel('In-sample Performance')
+ axes[1].set_ylabel('Out-of-sample Performance')
+
+ # first and second Stochastic dominance
+ axes[2].title.set_text('Stochastic dominance')
+ if len(results['dom_df']) != 0: results['dom_df'].plot(ax=axes[2], secondary_y=['SD2'])
+ axes[2].set_xlabel('Performance optimized vs non-optimized')
+ axes[2].set_ylabel('Frequency')
+
+
+def variable_visualization(portfolio):
+
+ param_names = portfolio.cumulative_returns().columns.names
+ dropdown1 = widgets.Dropdown(
+ options=param_names,
+ value=param_names[0],
+ description='axis 1:',
+ disabled=False,
+ )
+ dropdown2 = widgets.Dropdown(
+ options=param_names,
+ value=param_names[0],
+ description='axis 2:',
+ disabled=False,
+ )
+
+ performance_metric = ['final_value',
+ 'calmar_ratio', 'max_drawdown', 'sharpe_ratio',
+ 'downside_risk', 'omega_ratio', 'conditional_value_at_risk']
+
+ performance_dropdwon = widgets.Dropdown(
+ options=performance_metric,
+ value=performance_metric[0],
+ description='performance',
+ disabled=False,
+ )
+
+ out = widgets.Output()
+
+ import matplotlib.pyplot as plt
+ def update(v):
+ name1 = dropdown1.value
+ name2 = dropdown2.value
+ performance = performance_dropdwon.value
+
+ with out:
+ out.clear_output()
+ if name1 != name2:
+ df = (getattr(portfolio, performance)()
+ .reset_index().groupby([name1, name2]).mean()[performance]
+ .reset_index().pivot(name1, name2)[performance])
+
+ df = df.replace([np.inf, -np.inf], np.nan)
+ sns.heatmap(df)
+ else:
+ getattr(portfolio, performance)().groupby(name1).mean().plot()
+ plt.show()
+
+
+ dropdown1.observe(update, 'value')
+ dropdown2.observe(update, 'value')
+ performance_dropdwon.observe(update, 'value')
+ drawdowns = widgets.VBox([performance_dropdwon,
+ widgets.HBox([dropdown1, dropdown2])])
+ display(drawdowns)
+ display(out)
+ update(0)
diff --git a/.history/finlab_crypto/utility_20220919205351.py b/.history/finlab_crypto/utility_20220919205351.py
new file mode 100644
index 0000000..afcd557
--- /dev/null
+++ b/.history/finlab_crypto/utility_20220919205351.py
@@ -0,0 +1,327 @@
+from IPython.display import display, HTML, IFrame, clear_output
+from itertools import compress, product
+from collections.abc import Iterable
+import matplotlib.pyplot as plt
+import tqdm.notebook as tqdm
+import ipywidgets as widgets
+import vectorbt as vbt
+import seaborn as sns
+import pandas as pd
+import numpy as np
+import copy
+import os
+
+from . import chart
+from . import overfitting
+
+
+def is_evalable(obj):
+ try:
+ eval(str(obj))
+ return True
+ except:
+ return False
+
+def remove_pd_object(d):
+ ret = {}
+ for n, v in d.items():
+ if ((not isinstance(v, pd.Series) and not isinstance(v, pd.DataFrame) and not callable(v) and is_evalable(v))
+ or isinstance(v, str)):
+ ret[n] = v
+ return ret
+
+def enumerate_variables(variables):
+
+ if not variables:
+ return []
+
+ enumeration_name = []
+ enumeration_vars = []
+
+ constant_d = {}
+
+ for name, v in variables.items():
+ if (isinstance(v, Iterable) and not isinstance(v, str)
+ and not isinstance(v, pd.Series)
+ and not isinstance(v, pd.DataFrame)):
+
+ enumeration_name.append(name)
+ enumeration_vars.append(v)
+ else:
+ constant_d[name] = v
+
+ variable_enumerations = [dict(**dict(zip(enumeration_name, ps)), **constant_d)
+ for ps in list(product(*enumeration_vars))]
+
+ return variable_enumerations
+
+
+
+def enumerate_signal(ohlcv, strategy, variables, ):
+ entries = {}
+ exits = {}
+
+ fig = {}
+
+ iteration = tqdm.tqdm(variables) if len(variables) > 1 else variables
+ for v in iteration:
+ strategy.set_parameters(v)
+ results = strategy.func(ohlcv)
+
+ v = remove_pd_object(v)
+
+ entries[str(v)], exits[str(v)] = results[0], results[1]
+ if len(results) >= 3:
+ fig = results[2]
+
+ entries = pd.DataFrame(entries)
+ exits = pd.DataFrame(exits)
+
+ # setup columns
+ param_names = list(eval(entries.columns[0]).keys())
+ arrays = ([entries.columns.map(lambda s: eval(s)[p]) for p in param_names])
+ tuples = list(zip(*arrays))
+ if tuples:
+ columns = pd.MultiIndex.from_tuples(tuples, names=param_names)
+ exits.columns = columns
+ entries.columns = columns
+ return entries, exits, fig
+
+def stop_early(ohlcv, entries, exits, stop_vars, enumeration=True):
+
+ if not stop_vars:
+ return entries, exits
+
+ # check for stop_vars
+ length = -1
+ stop_vars_set = {'sl_stop', 'ts_stop', 'tp_stop', 'sl_trail'}
+ for s, slist in stop_vars.items():
+ if s not in stop_vars_set:
+ raise Exception(f'variable { s } is not one of the stop variables'
+ ': sl_stop, ts_stop, or tp_stop')
+ if not isinstance(slist, Iterable):
+ stop_vars[s] = [slist]
+
+ if length == -1:
+ length = len(stop_vars[s])
+
+ if not enumeration and length != -1 and length != len(stop_vars[s]):
+ raise Exception(f'lengths of the variables are not align: '
+ + str([len(stop_vars[s]) for s, slist in stop_vars.items()]))
+
+ if enumeration:
+ stop_vars = enumerate_variables(stop_vars)
+ stop_vars = {key: [stop_vars[i][key] for i in range(len(stop_vars))] for key in stop_vars[0].keys()}
+
+ # vbt patch: change ts_stop to sl_trail
+ if 'ts_stop' in stop_vars:
+ ts_stop = stop_vars.pop('ts_stop')
+ stop_vars['sl_trail'] = ts_stop
+
+ sl_advstex = vbt.OHLCSTX.run(
+ entries,
+ ohlcv['open'],
+ ohlcv['high'],
+ ohlcv['low'],
+ ohlcv['close'],
+ **stop_vars
+ )
+
+ stop_exits = sl_advstex.exits
+
+ nrepeat = int(len(stop_exits.columns) / len(entries.columns))
+ if isinstance(stop_exits, pd.DataFrame):
+ exits = exits.vbt.tile(nrepeat)
+ entries = entries.vbt.tile(nrepeat)
+
+ stop_exits = stop_exits.vbt | exits.values
+ entries.columns = stop_exits.columns
+
+ return entries, stop_exits
+
+def plot_strategy(ohlcv, entries, exits, portfolio ,fig_data, html=None, k_colors='world'):
+
+ # format trade data
+ txn = portfolio.positions.records
+ txn['enter_time'] = ohlcv.iloc[txn.entry_idx].index.values
+ txn['exit_time'] = ohlcv.iloc[txn.exit_idx].index.values
+
+ # plot trade data
+ mark_lines = []
+ for name, t in txn.iterrows():
+ x = [str(t.enter_time), str(t.exit_time)]
+ y = [t.entry_price, t.exit_price]
+ name = t.loc[['entry_price', 'exit_price', 'return']].to_string()
+ mark_lines.append((name, x, y))
+
+ # calculate overlap figures
+ overlaps = {}
+ if 'overlaps' in fig_data:
+ overlaps = fig_data['overlaps']
+
+ # calculate sub-figures
+ figures = {}
+ if 'figures' in fig_data:
+ figures = fig_data['figures']
+
+ figures['entries & exits'] = pd.DataFrame(
+ {'entries':entries.squeeze(), 'exits': exits.squeeze()})
+ figures['performance'] = portfolio.cumulative_returns()
+
+ c, info = chart.chart(ohlcv, overlaps=overlaps,
+ figures=figures, markerlines=mark_lines,
+ ## TESTING BIGGER CHARTS
+ start_date=ohlcv.index[-min(2000, len(ohlcv))], end_date=ohlcv.index[-1], k_colors=k_colors)
+ c.load_javascript()
+ if html is not None:
+ c.render(html)
+ else:
+ c.render()
+ display(HTML(filename='render.html'))
+
+ return
+
+def plot_combination(portfolio, cscv_result=None, metric='final_value'):
+
+ sns.set()
+ sns.set_style("whitegrid")
+
+ fig, axes = plt.subplots(1, 2, figsize=(15, 4), sharey=False, sharex=False)
+ fig.suptitle('Backtest Results')
+
+ def heat_map(item, name1, name2, ax):
+ if name1 != name2:
+ sns.heatmap(item.reset_index().pivot(name1, name2)[0], cmap='magma_r', ax=ax)
+ else:
+ getattr(portfolio, item_name).groupby(name1).mean().plot(ax=ax)
+
+ def best_n(portfolio, n):
+ return getattr(portfolio, metric)().sort_values().tail(n).index
+
+ best_10 = best_n(portfolio, 10)
+
+ ax = (portfolio.cumulative_returns()[best_10] * 100).plot(ax=axes[0])
+ ax.set(xlabel='time', ylabel='cumulative return (%)')
+
+ axes[1].title.set_text('Drawdown (%)')
+ for n, c in zip([5, 10, 20, 30], sns.color_palette("GnBu_d")):
+ bests = best_n(portfolio, n)
+ drawdown = portfolio.drawdown()[bests].min(axis=1)
+ ax = drawdown.plot(linewidth=1, ax=axes[1])
+ # ax.fill_between(drawdown.index, 0, drawdown * 100, alpha=0.2, color=c)
+ ax.set(xlabel='time', ylabel='drawdown (%)')
+
+ plt.show()
+
+
+ items = ['final_value', 'sharpe_ratio', 'sortino_ratio']
+ fig, axes = plt.subplots(1, len(items), figsize=(15, 3),
+ sharey=False, sharex=False, constrained_layout=False)
+ fig.subplots_adjust(top=0.75)
+ fig.suptitle('Partial Differentiation')
+
+ final_value = portfolio.final_value()
+ if isinstance(final_value.index, pd.MultiIndex):
+ index_names = final_value.index.names
+ else:
+ index_names = [final_value.index.name]
+
+ for i, item in enumerate(items):
+ results = {}
+ for name in index_names:
+ s = getattr(portfolio, item)()
+ s = s.replace([np.inf, -np.inf], np.nan)
+ results[name] = s.groupby(name).mean()
+ results = pd.DataFrame(results)
+ axes[i].title.set_text(item)
+ results.plot(ax=axes[i])
+
+ if cscv_result is None:
+ return
+
+ results = cscv_result
+
+ fig, axes = plt.subplots(1, 3, figsize=(15, 5),
+ sharey=False, sharex=False, constrained_layout=False)
+ fig.subplots_adjust(bottom=0.5)
+ fig.suptitle('Combinatorially Symmetric Cross-validation')
+
+ pbo_test = round(results['pbo_test'] * 100, 2)
+ axes[0].title.set_text(f'Probability of overfitting: {pbo_test} %')
+ axes[0].hist(x=[l for l in results['logits'] if l > -10000], bins='auto')
+ axes[0].set_xlabel('Logits')
+ axes[0].set_ylabel('Frequency')
+
+ # performance degradation
+ axes[1].title.set_text('Performance degradation')
+ x, y = pd.DataFrame([results['R_n_star'], results['R_bar_n_star']]).dropna(axis=1).values
+ sns.regplot(x, y, ax=axes[1])
+ #axes[1].set_xlim(min(results['R_n_star']) * 1.2,max(results['R_n_star']) * 1.2)
+ #axes[1].set_ylim(min(results['R_bar_n_star']) * 1.2,max(results['R_bar_n_star']) * 1.2)
+ axes[1].set_xlabel('In-sample Performance')
+ axes[1].set_ylabel('Out-of-sample Performance')
+
+ # first and second Stochastic dominance
+ axes[2].title.set_text('Stochastic dominance')
+ if len(results['dom_df']) != 0: results['dom_df'].plot(ax=axes[2], secondary_y=['SD2'])
+ axes[2].set_xlabel('Performance optimized vs non-optimized')
+ axes[2].set_ylabel('Frequency')
+
+
+def variable_visualization(portfolio):
+
+ param_names = portfolio.cumulative_returns().columns.names
+ dropdown1 = widgets.Dropdown(
+ options=param_names,
+ value=param_names[0],
+ description='axis 1:',
+ disabled=False,
+ )
+ dropdown2 = widgets.Dropdown(
+ options=param_names,
+ value=param_names[0],
+ description='axis 2:',
+ disabled=False,
+ )
+
+ performance_metric = ['final_value',
+ 'calmar_ratio', 'max_drawdown', 'sharpe_ratio',
+ 'downside_risk', 'omega_ratio', 'conditional_value_at_risk']
+
+ performance_dropdwon = widgets.Dropdown(
+ options=performance_metric,
+ value=performance_metric[0],
+ description='performance',
+ disabled=False,
+ )
+
+ out = widgets.Output()
+
+ import matplotlib.pyplot as plt
+ def update(v):
+ name1 = dropdown1.value
+ name2 = dropdown2.value
+ performance = performance_dropdwon.value
+
+ with out:
+ out.clear_output()
+ if name1 != name2:
+ df = (getattr(portfolio, performance)()
+ .reset_index().groupby([name1, name2]).mean()[performance]
+ .reset_index().pivot(name1, name2)[performance])
+
+ df = df.replace([np.inf, -np.inf], np.nan)
+ sns.heatmap(df)
+ else:
+ getattr(portfolio, performance)().groupby(name1).mean().plot()
+ plt.show()
+
+
+ dropdown1.observe(update, 'value')
+ dropdown2.observe(update, 'value')
+ performance_dropdwon.observe(update, 'value')
+ drawdowns = widgets.VBox([performance_dropdwon,
+ widgets.HBox([dropdown1, dropdown2])])
+ display(drawdowns)
+ display(out)
+ update(0)
diff --git a/.history/finlab_crypto/utility_20220919210854.py b/.history/finlab_crypto/utility_20220919210854.py
new file mode 100644
index 0000000..09bc1a4
--- /dev/null
+++ b/.history/finlab_crypto/utility_20220919210854.py
@@ -0,0 +1,327 @@
+from IPython.display import display, HTML, IFrame, clear_output
+from itertools import compress, product
+from collections.abc import Iterable
+import matplotlib.pyplot as plt
+import tqdm.notebook as tqdm
+import ipywidgets as widgets
+import vectorbt as vbt
+import seaborn as sns
+import pandas as pd
+import numpy as np
+import copy
+import os
+
+from . import chart
+from . import overfitting
+
+
+def is_evalable(obj):
+ try:
+ eval(str(obj))
+ return True
+ except:
+ return False
+
+def remove_pd_object(d):
+ ret = {}
+ for n, v in d.items():
+ if ((not isinstance(v, pd.Series) and not isinstance(v, pd.DataFrame) and not callable(v) and is_evalable(v))
+ or isinstance(v, str)):
+ ret[n] = v
+ return ret
+
+def enumerate_variables(variables):
+
+ if not variables:
+ return []
+
+ enumeration_name = []
+ enumeration_vars = []
+
+ constant_d = {}
+
+ for name, v in variables.items():
+ if (isinstance(v, Iterable) and not isinstance(v, str)
+ and not isinstance(v, pd.Series)
+ and not isinstance(v, pd.DataFrame)):
+
+ enumeration_name.append(name)
+ enumeration_vars.append(v)
+ else:
+ constant_d[name] = v
+
+ variable_enumerations = [dict(**dict(zip(enumeration_name, ps)), **constant_d)
+ for ps in list(product(*enumeration_vars))]
+
+ return variable_enumerations
+
+
+
+def enumerate_signal(ohlcv, strategy, variables, ):
+ entries = {}
+ exits = {}
+
+ fig = {}
+
+ iteration = tqdm.tqdm(variables) if len(variables) > 1 else variables
+ for v in iteration:
+ strategy.set_parameters(v)
+ results = strategy.func(ohlcv)
+
+ v = remove_pd_object(v)
+
+ entries[str(v)], exits[str(v)] = results[0], results[1]
+ if len(results) >= 3:
+ fig = results[2]
+
+ entries = pd.DataFrame(entries)
+ exits = pd.DataFrame(exits)
+
+ # setup columns
+ param_names = list(eval(entries.columns[0]).keys())
+ arrays = ([entries.columns.map(lambda s: eval(s)[p]) for p in param_names])
+ tuples = list(zip(*arrays))
+ if tuples:
+ columns = pd.MultiIndex.from_tuples(tuples, names=param_names)
+ exits.columns = columns
+ entries.columns = columns
+ return entries, exits, fig
+
+def stop_early(ohlcv, entries, exits, stop_vars, enumeration=True):
+
+ if not stop_vars:
+ return entries, exits
+
+ # check for stop_vars
+ length = -1
+ stop_vars_set = {'sl_stop', 'ts_stop', 'tp_stop', 'sl_trail'}
+ for s, slist in stop_vars.items():
+ if s not in stop_vars_set:
+ raise Exception(f'variable { s } is not one of the stop variables'
+ ': sl_stop, ts_stop, or tp_stop')
+ if not isinstance(slist, Iterable):
+ stop_vars[s] = [slist]
+
+ if length == -1:
+ length = len(stop_vars[s])
+
+ if not enumeration and length != -1 and length != len(stop_vars[s]):
+ raise Exception(f'lengths of the variables are not align: '
+ + str([len(stop_vars[s]) for s, slist in stop_vars.items()]))
+
+ if enumeration:
+ stop_vars = enumerate_variables(stop_vars)
+ stop_vars = {key: [stop_vars[i][key] for i in range(len(stop_vars))] for key in stop_vars[0].keys()}
+
+ # vbt patch: change ts_stop to sl_trail
+ if 'ts_stop' in stop_vars:
+ ts_stop = stop_vars.pop('ts_stop')
+ stop_vars['sl_trail'] = ts_stop
+
+ sl_advstex = vbt.OHLCSTX.run(
+ entries,
+ ohlcv['open'],
+ ohlcv['high'],
+ ohlcv['low'],
+ ohlcv['close'],
+ **stop_vars
+ )
+
+ stop_exits = sl_advstex.exits
+
+ nrepeat = int(len(stop_exits.columns) / len(entries.columns))
+ if isinstance(stop_exits, pd.DataFrame):
+ exits = exits.vbt.tile(nrepeat)
+ entries = entries.vbt.tile(nrepeat)
+
+ stop_exits = stop_exits.vbt | exits.values
+ entries.columns = stop_exits.columns
+
+ return entries, stop_exits
+
+def plot_strategy(ohlcv, entries, exits, portfolio, fig_data, html=None, k_colors='world'):
+
+ # format trade data
+ txn = portfolio.positions.records
+ txn['enter_time'] = ohlcv.iloc[txn.entry_idx].index.values
+ txn['exit_time'] = ohlcv.iloc[txn.exit_idx].index.values
+
+ # plot trade data
+ mark_lines = []
+ for name, t in txn.iterrows():
+ x = [str(t.enter_time), str(t.exit_time)]
+ y = [t.entry_price, t.exit_price]
+ name = t.loc[['entry_price', 'exit_price', 'return']].to_string()
+ mark_lines.append((name, x, y))
+
+ # calculate overlap figures
+ overlaps = {}
+ if 'overlaps' in fig_data:
+ overlaps = fig_data['overlaps']
+
+ # calculate sub-figures
+ figures = {}
+ if 'figures' in fig_data:
+ figures = fig_data['figures']
+
+ figures['entries & exits'] = pd.DataFrame(
+ {'entries':entries.squeeze(), 'exits': exits.squeeze()})
+ figures['performance'] = portfolio.cumulative_returns()
+
+ c, info = chart.chart(ohlcv, overlaps=overlaps,
+ figures=figures, markerlines=mark_lines,
+ ## TESTING BIGGER CHARTS
+ start_date=ohlcv.index[-min(5000, len(ohlcv))], end_date=ohlcv.index[-1], k_colors=k_colors)
+ c.load_javascript()
+ if html is not None:
+ c.render(html)
+ else:
+ c.render()
+ display(HTML(filename='render.html'))
+
+ return
+
+def plot_combination(portfolio, cscv_result=None, metric='final_value'):
+
+ sns.set()
+ sns.set_style("whitegrid")
+
+ fig, axes = plt.subplots(1, 2, figsize=(15, 4), sharey=False, sharex=False)
+ fig.suptitle('Backtest Results')
+
+ def heat_map(item, name1, name2, ax):
+ if name1 != name2:
+ sns.heatmap(item.reset_index().pivot(name1, name2)[0], cmap='magma_r', ax=ax)
+ else:
+ getattr(portfolio, item_name).groupby(name1).mean().plot(ax=ax)
+
+ def best_n(portfolio, n):
+ return getattr(portfolio, metric)().sort_values().tail(n).index
+
+ best_10 = best_n(portfolio, 10)
+
+ ax = (portfolio.cumulative_returns()[best_10] * 100).plot(ax=axes[0])
+ ax.set(xlabel='time', ylabel='cumulative return (%)')
+
+ axes[1].title.set_text('Drawdown (%)')
+ for n, c in zip([5, 10, 20, 30], sns.color_palette("GnBu_d")):
+ bests = best_n(portfolio, n)
+ drawdown = portfolio.drawdown()[bests].min(axis=1)
+ ax = drawdown.plot(linewidth=1, ax=axes[1])
+ # ax.fill_between(drawdown.index, 0, drawdown * 100, alpha=0.2, color=c)
+ ax.set(xlabel='time', ylabel='drawdown (%)')
+
+ plt.show()
+
+
+ items = ['final_value', 'sharpe_ratio', 'sortino_ratio']
+ fig, axes = plt.subplots(1, len(items), figsize=(15, 3),
+ sharey=False, sharex=False, constrained_layout=False)
+ fig.subplots_adjust(top=0.75)
+ fig.suptitle('Partial Differentiation')
+
+ final_value = portfolio.final_value()
+ if isinstance(final_value.index, pd.MultiIndex):
+ index_names = final_value.index.names
+ else:
+ index_names = [final_value.index.name]
+
+ for i, item in enumerate(items):
+ results = {}
+ for name in index_names:
+ s = getattr(portfolio, item)()
+ s = s.replace([np.inf, -np.inf], np.nan)
+ results[name] = s.groupby(name).mean()
+ results = pd.DataFrame(results)
+ axes[i].title.set_text(item)
+ results.plot(ax=axes[i])
+
+ if cscv_result is None:
+ return
+
+ results = cscv_result
+
+ fig, axes = plt.subplots(1, 3, figsize=(15, 5),
+ sharey=False, sharex=False, constrained_layout=False)
+ fig.subplots_adjust(bottom=0.5)
+ fig.suptitle('Combinatorially Symmetric Cross-validation')
+
+ pbo_test = round(results['pbo_test'] * 100, 2)
+ axes[0].title.set_text(f'Probability of overfitting: {pbo_test} %')
+ axes[0].hist(x=[l for l in results['logits'] if l > -10000], bins='auto')
+ axes[0].set_xlabel('Logits')
+ axes[0].set_ylabel('Frequency')
+
+ # performance degradation
+ axes[1].title.set_text('Performance degradation')
+ x, y = pd.DataFrame([results['R_n_star'], results['R_bar_n_star']]).dropna(axis=1).values
+ sns.regplot(x, y, ax=axes[1])
+ #axes[1].set_xlim(min(results['R_n_star']) * 1.2,max(results['R_n_star']) * 1.2)
+ #axes[1].set_ylim(min(results['R_bar_n_star']) * 1.2,max(results['R_bar_n_star']) * 1.2)
+ axes[1].set_xlabel('In-sample Performance')
+ axes[1].set_ylabel('Out-of-sample Performance')
+
+ # first and second Stochastic dominance
+ axes[2].title.set_text('Stochastic dominance')
+ if len(results['dom_df']) != 0: results['dom_df'].plot(ax=axes[2], secondary_y=['SD2'])
+ axes[2].set_xlabel('Performance optimized vs non-optimized')
+ axes[2].set_ylabel('Frequency')
+
+
+def variable_visualization(portfolio):
+
+ param_names = portfolio.cumulative_returns().columns.names
+ dropdown1 = widgets.Dropdown(
+ options=param_names,
+ value=param_names[0],
+ description='axis 1:',
+ disabled=False,
+ )
+ dropdown2 = widgets.Dropdown(
+ options=param_names,
+ value=param_names[0],
+ description='axis 2:',
+ disabled=False,
+ )
+
+ performance_metric = ['final_value',
+ 'calmar_ratio', 'max_drawdown', 'sharpe_ratio',
+ 'downside_risk', 'omega_ratio', 'conditional_value_at_risk']
+
+ performance_dropdwon = widgets.Dropdown(
+ options=performance_metric,
+ value=performance_metric[0],
+ description='performance',
+ disabled=False,
+ )
+
+ out = widgets.Output()
+
+ import matplotlib.pyplot as plt
+ def update(v):
+ name1 = dropdown1.value
+ name2 = dropdown2.value
+ performance = performance_dropdwon.value
+
+ with out:
+ out.clear_output()
+ if name1 != name2:
+ df = (getattr(portfolio, performance)()
+ .reset_index().groupby([name1, name2]).mean()[performance]
+ .reset_index().pivot(name1, name2)[performance])
+
+ df = df.replace([np.inf, -np.inf], np.nan)
+ sns.heatmap(df)
+ else:
+ getattr(portfolio, performance)().groupby(name1).mean().plot()
+ plt.show()
+
+
+ dropdown1.observe(update, 'value')
+ dropdown2.observe(update, 'value')
+ performance_dropdwon.observe(update, 'value')
+ drawdowns = widgets.VBox([performance_dropdwon,
+ widgets.HBox([dropdown1, dropdown2])])
+ display(drawdowns)
+ display(out)
+ update(0)
diff --git a/.history/setup_20220919203503.py b/.history/setup_20220919203503.py
new file mode 100644
index 0000000..2d526bd
--- /dev/null
+++ b/.history/setup_20220919203503.py
@@ -0,0 +1,36 @@
+import setuptools
+
+with open("README.md", "r") as fh:
+ long_description = fh.read()
+setuptools.setup(
+ name='finlab-crypto',
+ version='0.2.22',
+ author="FinLab",
+ author_email="finlabstaff@gmail.com",
+ description="A backtesting framework for crypto currency",
+ long_description=long_description,
+ long_description_content_type="text/markdown",
+ packages=['finlab_crypto'],
+ install_requires=[
+ 'numpy==1.20.0',
+ 'numba==0.53.1',
+ 'pandas>=1.1.5',
+ 'python-binance>=0.7.5',
+ 'pyecharts==1.9.1',
+ 'vectorbt',
+ 'statsmodels>=0.10.2',
+ 'tqdm>=4.41.1',
+ 'seaborn>=0.10.1',
+ ],
+ python_requires='>=3',
+ classifiers=[
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.4",
+ "Programming Language :: Python :: 3.5",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
+ "Operating System :: OS Independent",
+ ],
+ )
diff --git a/.history/strategies/bb_20220919205044.py b/.history/strategies/bb_20220919205044.py
new file mode 100644
index 0000000..9aae4b0
--- /dev/null
+++ b/.history/strategies/bb_20220919205044.py
@@ -0,0 +1,30 @@
+import talib
+import numpy as np
+import pandas as pd
+from finlab_crypto.strategy import Strategy
+
+@Strategy(bb_sma_length=20, std_dev_mult=2)
+def bb_strategy(ohlcv):
+
+ bb_sma_length = bb_strategy.bb_sma_length
+ std_dev_mult = bb_strategy.std_dev_mult
+
+ bb_sma_values = ohlcv.close.rolling(bb_sma_length).mean()
+ std_dev_value = ohlcv.close.rolling(bb_sma_length).std()
+
+ upperBB_values = bb_sma_values + std_dev_mult * std_dev_value
+ lowerBB_values = bb_sma_values - std_dev_mult * std_dev_value
+
+ entries = ohlcv.close >= upperBB_values
+ exits = ohlcv.close <= lowerBB_values
+
+ #make chart
+ figure = {
+ 'overlaps': {
+ 'BB SMA': bb_sma_values,
+ 'Upper BB': upperBB_values,
+ 'Lower BB': lowerBB_values,
+ }
+ }
+
+ return entries, exits, figure
\ No newline at end of file
diff --git a/.history/strategies/sma_20220919204600.py b/.history/strategies/sma_20220919204600.py
new file mode 100644
index 0000000..054e34b
--- /dev/null
+++ b/.history/strategies/sma_20220919204600.py
@@ -0,0 +1,23 @@
+from finlab_crypto.strategy import Strategy
+
+@Strategy(sma1_length=20, sma2_length=50)
+def sma_strategy(ohlcv):
+
+ sma1_length = sma_strategy.sma1_length
+ sma2_length = sma_strategy.sma2_length
+
+ sma1_values = ohlcv.open.rolling(sma1_length).mean()
+ sma2_values = ohlcv.open.rolling(sma2_length).mean()
+
+ # Compare current SMA values to the ones on the last candle to see if they've cross
+ entries = (sma1_values > sma2_values) & (sma1_values.shift(1) <= sma2_values.shift(1))
+ exits = (sma1_values < sma2_values) & (sma1_values.shift(1) >= sma2_values.shift(1))
+
+ figure = {
+ 'overlaps': {
+ str(sma1_length) + 'SMA': sma1_values,
+ str(sma2_length) + 'SMA': sma2_values,
+ }
+ }
+
+ return entries, exits, figure
\ No newline at end of file
diff --git a/README.md b/README.md
index 8c8d502..09fdc4c 100644
--- a/README.md
+++ b/README.md
@@ -137,6 +137,16 @@ BINANCE_KEY=<> BINANCE_SECRET=<> coverage
```
## Updates
+
+Version 0.2.28 - [@kodiakcrypto](https://github.com/kodiakcrypto)
+
+* added shorting capability
+* added 'both' as a valid 'backtest.side' param
+ * entry opens long and closes short, and exit closes long and opens short
+* changed default fees to 0.075% because standard taker fees
+* slippage to 0 by default
+* updated SMA + BB demo strategy examples
+
Version 0.2.27
* support new version of pandas 3.8
diff --git a/finlab_crypto/__init__.py b/finlab_crypto/__init__.py
index e675928..0d3f3f6 100644
--- a/finlab_crypto/__init__.py
+++ b/finlab_crypto/__init__.py
@@ -8,13 +8,13 @@
import sys
import os
-__version__ = '0.2.27'
+__version__ = '0.2.28'
# set default fees and slippage
vbt.settings.portfolio['init_cash'] = 100.0 # in $
-vbt.settings.portfolio['fees'] = 0.001 # in %
-vbt.settings.portfolio['slippage'] = 0.001 # in %
+vbt.settings.portfolio['fees'] = 0.00075 # % as decimal (add 2 zeros)
+vbt.settings.portfolio['slippage'] = 0.0 # ^^
# has workspace
def check_and_create_dir(dname):
@@ -23,7 +23,7 @@ def check_and_create_dir(dname):
os.mkdir(dname)
def setup_colab():
- google_drive_connected = os.path.isdir('/content/drive/My Drive')
+ google_drive_connected = os.path.isdir('/content/drive/MyDrive')
if not google_drive_connected:
print('|------------------------------')
@@ -40,13 +40,13 @@ def ln_dir(path):
if not os.path.isdir(dir):
os.symlink(path, dir)
- check_and_create_dir('/content/drive/My Drive/crypto_workspace')
- # check_and_create_dir('/content/drive/My Drive/crypto_workspace/strategies')
- check_and_create_dir('/content/drive/My Drive/crypto_workspace/history')
- # check_and_create_dir('/content/drive/My Drive/crypto_workspace/filters')
- # ln_dir("/content/drive/My Drive/crypto_workspace/strategies")
- # ln_dir("/content/drive/My Drive/crypto_workspace/filters")
- ln_dir("/content/drive/My Drive/crypto_workspace/history")
+ check_and_create_dir('/content/drive/MyDrive/crypto_workspace')
+ # check_and_create_dir('/content/drive/MyDrive/crypto_workspace/strategies')
+ check_and_create_dir('/content/drive/MyDrive/crypto_workspace/history')
+ # check_and_create_dir('/content/drive/MyDrive/crypto_workspace/filters')
+ # ln_dir("/content/drive/MyDrive/crypto_workspace/strategies")
+ # ln_dir("/content/drive/MyDrive/crypto_workspace/filters")
+ ln_dir("/content/drive/MyDrive/crypto_workspace/history")
def setup():
IN_COLAB = 'google.colab' in sys.modules
@@ -56,4 +56,3 @@ def setup():
# check_and_create_dir('strategies')
# check_and_create_dir('filters')
check_and_create_dir('history')
-
diff --git a/finlab_crypto/chart.py b/finlab_crypto/chart.py
index da8fc37..b1c82fc 100755
--- a/finlab_crypto/chart.py
+++ b/finlab_crypto/chart.py
@@ -115,8 +115,7 @@ def chart(dfstock, overlaps=dict(), figures=dict(), markers=dict(), markerlines=
#################
overlap_chart = (
- Line()
- .add_xaxis(xaxis_data=dfstock.index.astype(str).to_list())
+ Line().add_xaxis(xaxis_data=dfstock.index.astype(str).to_list())
)
for name, o in overlaps.items():
overlap_chart.add_yaxis(
diff --git a/finlab_crypto/strategy.py b/finlab_crypto/strategy.py
index 0944849..4f98ae6 100755
--- a/finlab_crypto/strategy.py
+++ b/finlab_crypto/strategy.py
@@ -30,6 +30,7 @@ def your_strategy(ohlcv):
from finlab_crypto.overfitting import CSCV
import copy
import vectorbt as vbt
+from vectorbt.portfolio.enums import Direction
import pandas as pd
import matplotlib.pyplot as plt
from collections.abc import Iterable
@@ -282,7 +283,9 @@ def _add_stops(ohlcv, entries, exits, variables):
def backtest(self, ohlcv, variables=None,
filters=None, lookback=None, plot=False,
signals=False, side='long', cscv_nbins=10,
- cscv_objective=lambda r: r.mean(), html=None, compounded=True, execution_price='close',
+ cscv_objective=lambda r: r.mean(), html=None,
+ compounded=True, execution_price='close',
+ amount_of_candles=1000, init_cash=100,
k_colors='world', **args):
"""Backtest analysis tool set.
@@ -323,10 +326,9 @@ def backtest(self, ohlcv, variables=None,
Plot results display.
Raises:
- 'Shorting is not support yet':if side is 'short'.
- "side should be 'long' or 'short'":if side is not 'short' or 'long'.
-
+ "side should be 'long', 'short' or 'both'": if side isnt any of those.
"""
+
variables = variables or dict()
filters = filters or dict()
@@ -359,7 +361,6 @@ def backtest(self, ohlcv, variables=None,
return entries, exits, fig_data
if side == 'long':
-
if not compounded:
args['size'] = vbt.settings.portfolio['init_cash'] / ohlcv_lookback.close[0]
@@ -367,16 +368,42 @@ def backtest(self, ohlcv, variables=None,
price = ohlcv_lookback[execution_price] if execution_price == 'close' else ohlcv_lookback[execution_price].shift(-1).bfill()
portfolio = vbt.Portfolio.from_signals(
- ohlcv_lookback[execution_price], entries.fillna(False), exits.fillna(False), **args)
+ price,
+ direction='longonly',
+ entries=entries.fillna(False),
+ exits=exits.fillna(False), **args)
elif side == 'short':
- raise Exception('Shorting is not support yet')
+ if not compounded:
+ args['size'] = vbt.settings.portfolio['init_cash'] / ohlcv_lookback.close[0]
+ assert execution_price == 'close' or execution_price == 'open'
+ price = ohlcv_lookback[execution_price] if execution_price == 'close' else ohlcv_lookback[execution_price].shift(-1).bfill()
+
+ portfolio = vbt.Portfolio.from_signals(
+ price,
+ direction='shortonly',
+ entries=entries.fillna(False),
+ exits=exits.fillna(False), **args)
+
+ elif side =='both':
+ if not compounded:
+ args['size'] = vbt.settings.portfolio['init_cash'] / ohlcv_lookback.close[0]
+
+ assert execution_price == 'close' or execution_price == 'open'
+ price = ohlcv_lookback[execution_price] if execution_price == 'close' else ohlcv_lookback[execution_price].shift(-1).bfill()
+
+ portfolio = vbt.Portfolio.from_signals(
+ price,
+ direction='both',
+ entries=entries.fillna(False),
+ exits=exits.fillna(False), **args)
else:
- raise Exception("side should be 'long' or 'short'")
+ raise Exception("side should be 'long' or 'short' or 'both'")
if (plot or html is not None) and isinstance(entries, pd.Series):
- plot_strategy(ohlcv_lookback, entries, exits, portfolio, fig_data, html=html, k_colors=k_colors)
+ plot_strategy(ohlcv_lookback, entries, exits, portfolio, fig_data,
+ html=html, k_colors=k_colors, amount_of_candles=amount_of_candles)
elif plot and isinstance(entries, pd.DataFrame):
@@ -390,4 +417,4 @@ def backtest(self, ohlcv, variables=None,
plt.show()
variable_visualization(portfolio)
- return portfolio
+ return portfolio
\ No newline at end of file
diff --git a/finlab_crypto/utility.py b/finlab_crypto/utility.py
index 2f96609..74fc304 100644
--- a/finlab_crypto/utility.py
+++ b/finlab_crypto/utility.py
@@ -139,7 +139,7 @@ def stop_early(ohlcv, entries, exits, stop_vars, enumeration=True):
return entries, stop_exits
-def plot_strategy(ohlcv, entries, exits, portfolio ,fig_data, html=None, k_colors='world'):
+def plot_strategy(ohlcv, entries, exits, portfolio, fig_data, html=None, k_colors='world', amount_of_candles=1000):
# format trade data
txn = portfolio.positions.records
@@ -170,7 +170,8 @@ def plot_strategy(ohlcv, entries, exits, portfolio ,fig_data, html=None, k_color
c, info = chart.chart(ohlcv, overlaps=overlaps,
figures=figures, markerlines=mark_lines,
- start_date=ohlcv.index[-min(1000, len(ohlcv))], end_date=ohlcv.index[-1], k_colors=k_colors)
+ ## TESTING BIGGER CHARTS
+ start_date=ohlcv.index[-min(amount_of_candles, len(ohlcv))], end_date=ohlcv.index[-1], k_colors=k_colors)
c.load_javascript()
if html is not None:
c.render(html)
diff --git a/setup.py b/setup.py
index e894466..d7d7cab 100644
--- a/setup.py
+++ b/setup.py
@@ -4,10 +4,10 @@
long_description = fh.read()
setuptools.setup(
name='finlab-crypto',
- version='0.2.27',
+ version='0.2.28',
author="FinLab",
author_email="finlabstaff@gmail.com",
- description="A backtesting framework for crytpo currency",
+ description="A backtesting framework for crypto currency",
long_description=long_description,
long_description_content_type="text/markdown",
packages=['finlab_crypto'],
diff --git a/strategies/bb.py b/strategies/bb.py
index 3fb03f4..9aae4b0 100644
--- a/strategies/bb.py
+++ b/strategies/bb.py
@@ -3,25 +3,28 @@
import pandas as pd
from finlab_crypto.strategy import Strategy
-@Strategy(window=14, nstd=2)
+@Strategy(bb_sma_length=20, std_dev_mult=2)
def bb_strategy(ohlcv):
- window = bb_strategy.window
- nstd = bb_strategy.nstd
+ bb_sma_length = bb_strategy.bb_sma_length
+ std_dev_mult = bb_strategy.std_dev_mult
- mean = ohlcv.close.rolling(window).mean()
- std = ohlcv.close.rolling(window).std()
+ bb_sma_values = ohlcv.close.rolling(bb_sma_length).mean()
+ std_dev_value = ohlcv.close.rolling(bb_sma_length).std()
- up = mean + nstd * std
- dn = mean - nstd * std
-
- entries = ohlcv.close > up
- exits = ohlcv.close < dn
+ upperBB_values = bb_sma_values + std_dev_mult * std_dev_value
+ lowerBB_values = bb_sma_values - std_dev_mult * std_dev_value
+ entries = ohlcv.close >= upperBB_values
+ exits = ohlcv.close <= lowerBB_values
+
+ #make chart
figure = {
- 'overlaps': {
- 'up': mean + nstd * std,
- 'dn': mean - nstd * std,
- }
+ 'overlaps': {
+ 'BB SMA': bb_sma_values,
+ 'Upper BB': upperBB_values,
+ 'Lower BB': lowerBB_values,
+ }
}
+
return entries, exits, figure
\ No newline at end of file
diff --git a/strategies/sma.py b/strategies/sma.py
index 2f53048..054e34b 100644
--- a/strategies/sma.py
+++ b/strategies/sma.py
@@ -1,21 +1,22 @@
from finlab_crypto.strategy import Strategy
-@Strategy(sma1=21, sma2=144)
+@Strategy(sma1_length=20, sma2_length=50)
def sma_strategy(ohlcv):
- v1 = sma_strategy.sma1
- v2 = sma_strategy.sma2
+ sma1_length = sma_strategy.sma1_length
+ sma2_length = sma_strategy.sma2_length
- sma1 = ohlcv.open.rolling(v1).mean()
- sma2 = ohlcv.open.rolling(v2).mean()
-
- entries = (sma1 > sma2) & (sma1.shift() < sma2.shift())
- exits = (sma1 < sma2) & (sma1.shift() > sma2.shift())
+ sma1_values = ohlcv.open.rolling(sma1_length).mean()
+ sma2_values = ohlcv.open.rolling(sma2_length).mean()
+
+ # Compare current SMA values to the ones on the last candle to see if they've cross
+ entries = (sma1_values > sma2_values) & (sma1_values.shift(1) <= sma2_values.shift(1))
+ exits = (sma1_values < sma2_values) & (sma1_values.shift(1) >= sma2_values.shift(1))
figure = {
'overlaps': {
- str(v1) + 'ma': sma1,
- str(v2) + 'ma': sma2,
+ str(sma1_length) + 'SMA': sma1_values,
+ str(sma2_length) + 'SMA': sma2_values,
}
}