From 8dfa13df768429bb29deed18cf4898ac9530a7a1 Mon Sep 17 00:00:00 2001 From: Diogo Matos Chaves Date: Fri, 1 Mar 2024 16:27:07 -0300 Subject: [PATCH] Add tests for genetic algorithm optimizer --- stratestic/backtesting/_mixin.py | 13 ++- .../backtesting/optimization/_optimization.py | 6 +- .../out/bollinger_bands_trading_costs.py | 5 +- .../out/machine_learning_no_trading_costs.py | 6 +- .../out/momentum_no_trading_costs.py | 5 +- .../moving_average_crossover_trading_costs.py | 5 +- tests/backtesting/iterative/test_iterative.py | 105 +++++++++++++++--- .../backtesting/vectorized/test_vectorized.py | 30 ++++- 8 files changed, 146 insertions(+), 29 deletions(-) diff --git a/stratestic/backtesting/_mixin.py b/stratestic/backtesting/_mixin.py index d8f7d19..8537969 100644 --- a/stratestic/backtesting/_mixin.py +++ b/stratestic/backtesting/_mixin.py @@ -280,7 +280,7 @@ def optimize( self, params, optimization_metric="Return", - optimizer: Literal["brute", "gen_alg"] = 'brute', + optimizer: Literal["brute_force", "gen_alg"] = 'brute_force', **kwargs ): """Optimizes the trading strategy using brute force. @@ -321,7 +321,7 @@ def optimize( opt_params, strategy_params_mapping, optimization_steps = adapt_optimization_input(self.strategy, params) self.bar = progressbar.ProgressBar( - max_value=optimization_steps if self._optimizer == 'brute' else progressbar.UnknownLength, + max_value=optimization_steps if self._optimizer == 'brute_force' else progressbar.UnknownLength, redirect_stdout=True ) self.optimization_steps = 0 @@ -340,8 +340,11 @@ def optimize( self.bar.finish() print() - return (get_params_mapping(self.strategy, opt, strategy_params_mapping, params), - -self._update_and_run(opt, True, True, strategy_params_mapping, params)) + optimized_params = get_params_mapping(self.strategy, opt, strategy_params_mapping, params) + optimized_result = self._update_and_run(opt, True, True, strategy_params_mapping, params) + optimized_result = optimized_result if self._optimizer == 'gen_alg' else -optimized_result + + return optimized_params, optimized_result def maximum_leverage(self, margin_threshold=None): """ @@ -427,7 +430,7 @@ def _check_metric_input(self, optimization_metric): self.optimization_metric = optimization_options[optimization_metric] def _check_optimizer_input(self, optimizer): - optimizer_options = ["brute", "gen_alg"] + optimizer_options = ["brute_force", "gen_alg"] if optimizer not in optimizer_options: raise ValueError(f"The selected optimizer is not supported. " f"Choose one of: {', '.join(optimizer_options)}") diff --git a/stratestic/backtesting/optimization/_optimization.py b/stratestic/backtesting/optimization/_optimization.py index f936939..bea5628 100644 --- a/stratestic/backtesting/optimization/_optimization.py +++ b/stratestic/backtesting/optimization/_optimization.py @@ -47,7 +47,7 @@ def strategy_optimizer( strategy_runner, opt_params: tuple, runner_args: tuple, - optimizer: Literal["brute", "gen_alg"] = 'brute', + optimizer: Literal["brute_force", "gen_alg"] = 'brute', **kwargs ): """ @@ -63,7 +63,7 @@ def strategy_optimizer( (start2, stop2, step2), ...). runner_args : tuple Additional arguments required by the `strategy_runner` function. - optimizer : Literal["brute", "gen_alg"] + optimizer : Literal["brute_force", "gen_alg"] Choice of algorithm for the optimization. **kwargs : dict Additional keyword arguments passed to the `brute` function from scipy.optimize. @@ -79,7 +79,7 @@ def strategy_optimizer( defined by `opt_params`. The objective function to be optimized is defined by `strategy_runner`. """ - if optimizer == "brute": + if optimizer == "brute_force": return brute( strategy_runner, diff --git a/tests/backtesting/iterative/out/bollinger_bands_trading_costs.py b/tests/backtesting/iterative/out/bollinger_bands_trading_costs.py index 83c71e0..1da394b 100644 --- a/tests/backtesting/iterative/out/bollinger_bands_trading_costs.py +++ b/tests/backtesting/iterative/out/bollinger_bands_trading_costs.py @@ -3,7 +3,10 @@ expected_performance = 0.985337 expected_outperformance = -0.030928 -expected_optimization_results = ({'ma': 2.0, 'sd': 1.0}, 0) +expected_optimization_results = { + "brute_force": ({'ma': 2.0, 'sd': 1.0}, 0), + "gen_alg": ({'ma': 5.0, 'sd': 5.0}, 0), +} expected_results = [ { diff --git a/tests/backtesting/iterative/out/machine_learning_no_trading_costs.py b/tests/backtesting/iterative/out/machine_learning_no_trading_costs.py index ffe48f9..76d8638 100644 --- a/tests/backtesting/iterative/out/machine_learning_no_trading_costs.py +++ b/tests/backtesting/iterative/out/machine_learning_no_trading_costs.py @@ -1,6 +1,10 @@ +import numpy as np from pandas import Timestamp -expected_optimization_results = ({'nr_lags': 2.0}, 13.175614657220974) +expected_optimization_results = { + "brute_force": ({'nr_lags': 2.0}, 13.175614657220974), + "gen_alg": ({'nr_lags': 5.0}, np.inf), +} expected_results = [ { diff --git a/tests/backtesting/iterative/out/momentum_no_trading_costs.py b/tests/backtesting/iterative/out/momentum_no_trading_costs.py index 7dc05fd..9502597 100644 --- a/tests/backtesting/iterative/out/momentum_no_trading_costs.py +++ b/tests/backtesting/iterative/out/momentum_no_trading_costs.py @@ -3,7 +3,10 @@ expected_performance = 1.009399 expected_outperformance = -0.006867 -expected_optimization_results = ({'window': 5.0}, -0.2131523135281635) +expected_optimization_results = { + "brute_force": ({'window': 5.0}, -0.2131523135281635), + "gen_alg": ({'window': 5.0}, -0.2131523135281635), +} expected_results = [ { diff --git a/tests/backtesting/iterative/out/moving_average_crossover_trading_costs.py b/tests/backtesting/iterative/out/moving_average_crossover_trading_costs.py index 58c8513..40c7b33 100644 --- a/tests/backtesting/iterative/out/moving_average_crossover_trading_costs.py +++ b/tests/backtesting/iterative/out/moving_average_crossover_trading_costs.py @@ -3,7 +3,10 @@ expected_performance = 1.003375 expected_outperformance = -0.003015 -expected_optimization_results = ({'sma_l': 6.0, 'sma_s': 1.0}, 100.0) +expected_optimization_results = { + "brute_force": ({'sma_l': 6.0, 'sma_s': 1.0}, 100.0), + "gen_alg": ({'sma_s': 5.0, 'sma_l': 7.0}, 100.0) +} expected_results = [ { diff --git a/tests/backtesting/iterative/test_iterative.py b/tests/backtesting/iterative/test_iterative.py index e8e24ce..b31d4c6 100644 --- a/tests/backtesting/iterative/test_iterative.py +++ b/tests/backtesting/iterative/test_iterative.py @@ -162,6 +162,21 @@ def test_optimize_parameters_input_validation( backtester.load_data(data=test_data) backtester.optimize(optimization_params) + @pytest.mark.parametrize( + "optimizer,extra_args", + [ + pytest.param( + "brute_force", + {}, + id="BruteForce", + ), + pytest.param( + "gen_alg", + {"pop_size": 4, "max_gen": 1, "random_state": 42}, + id="GenAlg", + ) + ] + ) @pytest.mark.parametrize( "fixture", [ @@ -169,7 +184,7 @@ def test_optimize_parameters_input_validation( for fixture_name, fixture in fixtures.items() ], ) - def test_optimize_parameters(self, fixture, common_fixture): + def test_optimize_parameters(self, fixture, optimizer, extra_args, common_fixture): strategy = fixture["in"]["strategy"] params = fixture["in"]["params"] optimization_params = fixture["in"]["optimization_params"] @@ -181,11 +196,25 @@ def test_optimize_parameters(self, fixture, common_fixture): ite = IterativeBacktester(strategy_instance, trading_costs=trading_costs) - optimization_results = ite.optimize(*optimization_params) + optimization_results = ite.optimize(*optimization_params, optimizer=optimizer, **extra_args) - assert optimization_results == fixture["out"]["expected_optimization_results"] + assert optimization_results == fixture["out"]["expected_optimization_results"][optimizer] - @pytest.mark.slow + @pytest.mark.parametrize( + "optimizer,extra_args", + [ + pytest.param( + "brute_force", + {}, + id="BruteForce", + ), + pytest.param( + "gen_alg", + {"pop_size": 4, "max_gen": 1, "random_state": 42}, + id="GenAlg", + ) + ] + ) @pytest.mark.parametrize( "input_params,optimization_params,expected_results", [ @@ -195,7 +224,10 @@ def test_optimize_parameters(self, fixture, common_fixture): "method": "Unanimous" }, [{"window": (2, 4)}, {"ma": (1, 3)}], - [{'window': 3.0}, {'ma': 2.0}], + { + "brute_force": [{'window': 3.0}, {'ma': 2.0}], + "gen_alg": [{'window': 4.0}, {'ma': 3.0}] + }, id='2_strategies-unanimous' ), pytest.param( @@ -204,7 +236,10 @@ def test_optimize_parameters(self, fixture, common_fixture): "method": "Majority" }, [{"window": (2, 4)}, {"ma": (1, 3)}], - [{'window': 3.0}, {'ma': 1.0}], + { + "brute_force": [{'window': 3.0}, {'ma': 1.0}], + "gen_alg": [{'window': 4.0}, {'ma': 3.0}] + }, id='2_strategies-majority' ), pytest.param( @@ -213,7 +248,10 @@ def test_optimize_parameters(self, fixture, common_fixture): "method": "Majority" }, [{"window": (2, 4)}, {"ma": (1, 3)}, {}], - [{'window': 3.0}, {'ma': 2.0}, {}], + { + "brute_force": [{'window': 3.0}, {'ma': 2.0}, {}], + "gen_alg": [{'window': 4.0}, {'ma': 3.0}, {}] + }, id='3_strategies-majority' ), pytest.param( @@ -222,7 +260,10 @@ def test_optimize_parameters(self, fixture, common_fixture): "method": "Unanimous" }, [{"sma_s": (2, 4), "sma_l": (4, 6)}], - [{'sma_s': 3.0, 'sma_l': 4.0}], + { + "brute_force": [{'sma_s': 3.0, 'sma_l': 4.0}], + "gen_alg": [{'sma_l': 6.0, 'sma_s': 4.0}] + }, id='1_strategies-unanimous' ), ], @@ -232,6 +273,8 @@ def test_optimize_parameters_combined_strategies( input_params, optimization_params, expected_results, + optimizer, + extra_args, common_fixture ): test_data = data.set_index("open_time") @@ -240,10 +283,25 @@ def test_optimize_parameters_combined_strategies( ite = IterativeBacktester(strategy_instance) - optimization_results, perf = ite.optimize(optimization_params) + optimization_results, perf = ite.optimize(optimization_params, optimizer=optimizer, **extra_args) - assert optimization_results == expected_results + assert optimization_results == expected_results[optimizer] + @pytest.mark.parametrize( + "optimizer,extra_args", + [ + pytest.param( + "brute_force", + {}, + id="BruteForce", + ), + pytest.param( + "gen_alg", + {"pop_size": 4, "max_gen": 1, "random_state": 42}, + id="GenAlg", + ) + ] + ) @pytest.mark.parametrize( "input_params,optimization_params", [ @@ -269,6 +327,8 @@ def test_load_data_optimize_parameters_combined_strategies( self, input_params, optimization_params, + optimizer, + extra_args, common_fixture ): test_data = data.set_index("open_time") @@ -280,12 +340,27 @@ def test_load_data_optimize_parameters_combined_strategies( ite_2 = IterativeBacktester(strategy_instance_2) ite_2.load_data(data=test_data) - optimization_results_1, perf_1 = ite_1.optimize(optimization_params) - optimization_results_2, perf_2 = ite_2.optimize(optimization_params) + optimization_results_1, perf_1 = ite_1.optimize(optimization_params, optimizer=optimizer, **extra_args) + optimization_results_2, perf_2 = ite_2.optimize(optimization_params, optimizer=optimizer, **extra_args) assert optimization_results_1 == optimization_results_2 assert perf_1 == perf_2 + @pytest.mark.parametrize( + "optimizer,extra_args", + [ + pytest.param( + "brute_force", + {}, + id="BruteForce", + ), + pytest.param( + "gen_alg", + {"pop_size": 4, "max_gen": 1, "random_state": 42}, + id="GenAlg", + ) + ] + ) @pytest.mark.parametrize( "input_params,optimization_params", [ @@ -311,6 +386,8 @@ def test_optimize_parameters_combined_strategies_equal_to_vectorized( self, input_params, optimization_params, + optimizer, + extra_args, common_fixture ): test_data = data.set_index("open_time") @@ -321,8 +398,8 @@ def test_optimize_parameters_combined_strategies_equal_to_vectorized( ite = IterativeBacktester(strategy_instance_1) vect = IterativeBacktester(strategy_instance_2) - optimization_results_1, perf_1 = ite.optimize(optimization_params) - optimization_results_2, perf_2 = vect.optimize(optimization_params) + optimization_results_1, perf_1 = ite.optimize(optimization_params, optimizer=optimizer, **extra_args) + optimization_results_2, perf_2 = vect.optimize(optimization_params, optimizer=optimizer, **extra_args) assert optimization_results_1 == optimization_results_2 assert perf_1 == perf_2 diff --git a/tests/backtesting/vectorized/test_vectorized.py b/tests/backtesting/vectorized/test_vectorized.py index 6847a53..03888c7 100644 --- a/tests/backtesting/vectorized/test_vectorized.py +++ b/tests/backtesting/vectorized/test_vectorized.py @@ -57,43 +57,67 @@ def test_run(self, fixture, mocked_plotly_figure_show): ), print(d, key) @pytest.mark.parametrize( - "strategy,optimization_params,exception", + "strategy,optimization_params,optimizer,metric,exception", [ pytest.param( Momentum(2), [dict(window=(2, 4))], + "brute_force", + "Return", OptimizationParametersInvalid, id="wrong-optimization-parameters-single-strategy", ), pytest.param( StrategyCombiner([Momentum(2), MovingAverage(2)]), dict(window=(2, 4)), + "gen_alg", + "Return", OptimizationParametersInvalid, id="wrong-optimization-parameters-multiple-strategy", ), pytest.param( StrategyCombiner([Momentum(2)]), [dict(window=(2, 4)), dict(ma=(2, 4))], + "brute_force", + "Return", OptimizationParametersInvalid, id="too-many-optimization-parameters-multiple-strategy", ), pytest.param( StrategyCombiner([Momentum(2), MovingAverage(2)]), [dict(window=(2, 4))], + "gen_alg", + "Return", OptimizationParametersInvalid, id="too-few-optimization-parameters-multiple-strategy", ), + pytest.param( + MovingAverage(2), + [dict(ma=(2, 4))], + "invalid_optimizer", + "Return", + ValueError, + id="InvalidOptimizer", + ), + pytest.param( + MovingAverage(2), + [dict(ma=(2, 4))], + "gen_alg", + "invalid_metric", + ValueError, + id="InvalidOptimizationMetric", + ), ], ) def test_optimize_parameters_input_validation( - self, strategy, optimization_params, exception + self, strategy, optimization_params, optimizer, metric, exception ): test_data = data.set_index("open_time") with pytest.raises(exception) as excinfo: vect = VectorizedBacktester(strategy) vect.load_data(data=test_data) - vect.optimize(optimization_params) + vect.optimize(optimization_params, optimizer=optimizer, optimization_metric=metric) @pytest.mark.parametrize( "fixture",