diff --git a/epde/__init__.py b/epde/__init__.py index e69de29..2179f5c 100644 --- a/epde/__init__.py +++ b/epde/__init__.py @@ -0,0 +1,6 @@ +from .interface.interface import EpdeSearch +from .interface.logger import Logger +from .interface.equation_translator import translate_equation + +from .interface.prepared_tokens import CustomTokens, CacheStoredTokens, ExternalDerivativesTokens +from .interface.prepared_tokens import GridTokens, TrigonometricTokens, PhasedSine1DTokens \ No newline at end of file diff --git a/epde/evaluators.py b/epde/evaluators.py index fc4460b..e7dc4f7 100644 --- a/epde/evaluators.py +++ b/epde/evaluators.py @@ -134,20 +134,14 @@ def phased_sine(*grids, **kwargs): def phased_sine_1d(*grids, **kwargs): coordwise_elems = kwargs['freq'] * 2*np.pi*(grids[0] + kwargs['phase']/kwargs['freq']) - # for dim in range(len(grids))] return np.power(np.sin(coordwise_elems), kwargs['power']) -# def grid_eval_fun(*grids, **kwargs): -# return np.power(grids[int(kwargs['dim'])], kwargs['power']) - def const_eval_fun(*grids, **kwargs): return np.full_like(a=grids[0], fill_value=kwargs['value']) - def const_grad_fun(*grids, **kwargs): return np.zeros_like(a=grids[0]) - def get_velocity_common(*grids, **kwargs): a = [kwargs['p' + str(idx*3+1)] * grids[0]**2 + kwargs['p' + str(idx*3 + 2)] * grids[0] + kwargs['p' + str(idx*3 + 3)] for idx in range(5)] alpha = np.exp(a[0] * grids[1] + a[1]); beta = a[2] * grids[1]**2 + a[3] * grids[1] + a[4] @@ -157,115 +151,66 @@ def velocity_heating_eval_fun(*grids, **kwargs): ''' Assumption of the velocity field for two-dimensional heat equation with convetion. ''' - # a = [kwargs['p' + str(idx*3+1)] * grids[0]**2 + kwargs['p' + str(idx*3 + 2)] * grids[0] + kwargs['p' + str(idx*3 + 3)] for idx in range(5)] - # alpha = np.exp(a[0] * grids[1] + a[1]); beta = a[2] * grids[1]**2 + a[3] * grids[1] + a[4] alpha, beta = get_velocity_common(*grids, **kwargs) return alpha * beta -# Proof of concept, if works properly, replace with permutations approach to gradient construction - - def vhef_grad_1(*grids, **kwargs): - # a = [kwargs['p' + str(idx*3+1)] * grids[0]**2 + kwargs['p' + str(idx*3 + 2)] * grids[0] + kwargs['p' + str(idx*3 + 3)] for idx in range(5)] - # alpha = np.exp(a[0] * grids[1] + a[1]); beta = a[2] * grids[1]**2 + a[3] * grids[1] + a[4] alpha, beta = get_velocity_common(*grids, **kwargs) return grids[0]**2 * grids[1] * alpha * beta - def vhef_grad_2(*grids, **kwargs): - # a = [kwargs['p' + str(idx*3+1)] * grids[0]**2 + kwargs['p' + str(idx*3 + 2)] * grids[0] + kwargs['p' + str(idx*3 + 3)] for idx in range(5)] - # alpha = np.exp(a[0] * grids[1] + a[1]); beta = a[2] * grids[1]**2 + a[3] * grids[1] + a[4] alpha, beta = get_velocity_common(*grids, **kwargs) return grids[0] * grids[1] * alpha * beta - def vhef_grad_3(*grids, **kwargs): - # a = [kwargs['p' + str(idx*3+1)] * grids[0]**2 + kwargs['p' + str(idx*3 + 2)] * grids[0] + kwargs['p' + str(idx*3 + 3)] for idx in range(5)] - # alpha = np.exp(a[0] * grids[1] + a[1]); beta = a[2] * grids[1]**2 + a[3] * grids[1] + a[4] alpha, beta = get_velocity_common(*grids, **kwargs) return grids[1] * alpha * beta - def vhef_grad_4(*grids, **kwargs): - # a = [kwargs['p' + str(idx*3+1)] * grids[0]**2 + kwargs['p' + str(idx*3 + 2)] * grids[0] + kwargs['p' + str(idx*3 + 3)] for idx in range(5)] - # alpha = np.exp(a[0] * grids[1] + a[1]); beta = a[2] * grids[1]**2 + a[3] * grids[1] + a[4] alpha, beta = get_velocity_common(*grids, **kwargs) return grids[0]**2 * alpha * beta - def vhef_grad_5(*grids, **kwargs): - # a = [kwargs['p' + str(idx*3+1)] * grids[0]**2 + kwargs['p' + str(idx*3 + 2)] * grids[0] + kwargs['p' + str(idx*3 + 3)] for idx in range(5)] - # alpha = np.exp(a[0] * grids[1] + a[1]); beta = a[2] * grids[1]**2 + a[3] * grids[1] + a[4] alpha, beta = get_velocity_common(*grids, **kwargs) return grids[0] * alpha * beta - def vhef_grad_6(*grids, **kwargs): - # a = [kwargs['p' + str(idx*3+1)] * grids[0]**2 + kwargs['p' + str(idx*3 + 2)] * grids[0] + kwargs['p' + str(idx*3 + 3)] for idx in range(5)] - # alpha = np.exp(a[0] * grids[1] + a[1]); beta = a[2] * grids[1]**2 + a[3] * grids[1] + a[4] alpha, beta = get_velocity_common(*grids, **kwargs) return alpha * beta - def vhef_grad_7(*grids, **kwargs): - # a = [kwargs['p' + str(idx*3+1)] * grids[0]**2 + kwargs['p' + str(idx*3 + 2)] * grids[0] + kwargs['p' + str(idx*3 + 3)] for idx in range(5)] - # alpha = np.exp(a[0] * grids[1] + a[1]); beta = a[2] * grids[1]**2 + a[3] * grids[1] + a[4] alpha, beta = get_velocity_common(*grids, **kwargs) return grids[0]**2 * grids[1]**2 * alpha - def vhef_grad_8(*grids, **kwargs): - # a = [kwargs['p' + str(idx*3+1)] * grids[0]**2 + kwargs['p' + str(idx*3 + 2)] * grids[0] + kwargs['p' + str(idx*3 + 3)] for idx in range(5)] - # alpha = np.exp(a[0] * grids[1] + a[1]); beta = a[2] * grids[1]**2 + a[3] * grids[1] + a[4] alpha, beta = get_velocity_common(*grids, **kwargs) return grids[0] * grids[1]**2 * alpha - def vhef_grad_9(*grids, **kwargs): - # a = [kwargs['p' + str(idx*3+1)] * grids[0]**2 + kwargs['p' + str(idx*3 + 2)] * grids[0] + kwargs['p' + str(idx*3 + 3)] for idx in range(5)] - # alpha = np.exp(a[0] * grids[1] + a[1]); beta = a[2] * grids[1]**2 + a[3] * grids[1] + a[4] alpha, beta = get_velocity_common(*grids, **kwargs) return grids[1]**2 * alpha - def vhef_grad_10(*grids, **kwargs): - # a = [kwargs['p' + str(idx*3+1)] * grids[0]**2 + kwargs['p' + str(idx*3 + 2)] * grids[0] + kwargs['p' + str(idx*3 + 3)] for idx in range(5)] - # alpha = np.exp(a[0] * grids[1] + a[1]); beta = a[2] * grids[1]**2 + a[3] * grids[1] + a[4] alpha, beta = get_velocity_common(*grids, **kwargs) return grids[0]**2 * grids[1] * alpha - def vhef_grad_11(*grids, **kwargs): - # a = [kwargs['p' + str(idx*3+1)] * grids[0]**2 + kwargs['p' + str(idx*3 + 2)] * grids[0] + kwargs['p' + str(idx*3 + 3)] for idx in range(5)] - # alpha = np.exp(a[0] * grids[1] + a[1]); beta = a[2] * grids[1]**2 + a[3] * grids[1] + a[4] alpha, beta = get_velocity_common(*grids, **kwargs) return grids[0] * grids[1] * alpha - def vhef_grad_12(*grids, **kwargs): - # a = [kwargs['p' + str(idx*3+1)] * grids[0]**2 + kwargs['p' + str(idx*3 + 2)] * grids[0] + kwargs['p' + str(idx*3 + 3)] for idx in range(5)] - # alpha = np.exp(a[0] * grids[1] + a[1]); beta = a[2] * grids[1]**2 + a[3] * grids[1] + a[4] alpha, beta = get_velocity_common(*grids, **kwargs) return grids[1] * alpha - def vhef_grad_13(*grids, **kwargs): - # a = [kwargs['p' + str(idx*3+1)] * grids[0]**2 + kwargs['p' + str(idx*3 + 2)] * grids[0] + kwargs['p' + str(idx*3 + 3)] for idx in range(5)] - # alpha = np.exp(a[0] * grids[1] + a[1]); beta = a[2] * grids[1]**2 + a[3] * grids[1] + a[4] alpha, beta = get_velocity_common(*grids, **kwargs) return grids[0]**2 * alpha - def vhef_grad_14(*grids, **kwargs): - # a = [kwargs['p' + str(idx*3+1)] * grids[0]**2 + kwargs['p' + str(idx*3 + 2)] * grids[0] + kwargs['p' + str(idx*3 + 3)] for idx in range(5)] - # alpha = np.exp(a[0] * grids[1] + a[1]); beta = a[2] * grids[1]**2 + a[3] * grids[1] + a[4] alpha, beta = get_velocity_common(*grids, **kwargs) return grids[0] * alpha - def vhef_grad_15(*grids, **kwargs): - # a = [kwargs['p' + str(idx*3+1)] * grids[0]**2 + kwargs['p' + str(idx*3 + 2)] * grids[0] + kwargs['p' + str(idx*3 + 3)] for idx in range(5)] - # alpha = np.exp(a[0] * grids[1] + a[1]); beta = a[2] * grids[1]**2 + a[3] * grids[1] + a[4] alpha, beta = get_velocity_common(*grids, **kwargs) return alpha @@ -287,4 +232,4 @@ def vhef_grad_15(*grids, **kwargs): velocity_evaluator = CustomEvaluator(velocity_heating_eval_fun, ['p' + str(idx+1) for idx in range(15)]) velocity_grad_evaluators = [CustomEvaluator(component, ['p' + str(idx+1) for idx in range(15)]) - for component in vhef_grad] + for component in vhef_grad] \ No newline at end of file diff --git a/epde/interface/interface.py b/epde/interface/interface.py index 604b67b..98a8c6e 100644 --- a/epde/interface/interface.py +++ b/epde/interface/interface.py @@ -5,33 +5,32 @@ @author: mike_ubuntu """ -import time import pickle import numpy as np from typing import Union, Callable from collections import OrderedDict -import warnings import epde.globals as global_var +from epde.optimizers.builder import StrategyBuilder + from epde.optimizers.moeadd.moeadd import * from epde.optimizers.moeadd.supplementary import * +from epde.optimizers.moeadd.strategy import MOEADDDirector +from epde.optimizers.moeadd.strategy_elems import MOEADDSectorProcesser +#from epde.optimizers.moeadd.population_constr import SystemsPopulationConstructor as MOEADDSystemPopConstr + +from epde.optimizers.single_criterion.optimizer import EvolutionaryStrategy, SimpleOptimizer +# from epde.optimizers.single_criterion.population_constr import SystemsPopulationConstructor as SOSystemPopConstr +from epde.optimizers.single_criterion.strategy import BaselineDirector from epde.optimizers.single_criterion.supplementary import simple_sorting # from epde.optimizers.moeadd.strategy_elems import SectorProcesserBuilder from epde.preprocessing.domain_pruning import DomainPruner from epde.operators.utils.default_parameter_loader import EvolutionaryParams -from epde.optimizers.builder import StrategyBuilder -from epde.optimizers.single_criterion.optimizer import EvolutionaryStrategy, SimpleOptimizer -from epde.optimizers.moeadd.strategy_elems import MOEADDSectorProcesser from epde.decorators import BoundaryExclusion -from epde.optimizers.single_criterion.population_constr import SystemsPopulationConstructor as SOSystemPopConstr -from epde.optimizers.moeadd.population_constr import SystemsPopulationConstructor as MOEADDSystemPopConstr -from epde.optimizers.single_criterion.strategy import BaselineDirector -from epde.optimizers.moeadd.strategy import MOEADDDirector - from epde.evaluators import simple_function_evaluator, trigonometric_evaluator from epde.supplementary import define_derivatives from epde.cache.cache import upload_simple_tokens, upload_grids, prepare_var_tensor #, np_ndarray_section @@ -97,12 +96,8 @@ def use_global_cache(self): """ Method for add calculated derivatives in the cache """ - # print(f'self.data_tensor: {self.data_tensor.shape}') - # print(f'self.derivatives: {self.derivatives.shape}') derivs_stacked = prepare_var_tensor(self.data_tensor, self.derivatives, time_axis=global_var.time_axis) - # print(f'derivs_stacked: {derivs_stacked.shape}') - # raise Exception('ZUL LUL') try: upload_simple_tokens(self.names, global_var.tensor_cache, derivs_stacked) upload_simple_tokens([self.var_name,], global_var.initial_data_cache, [self.data_tensor,]) @@ -521,6 +516,31 @@ def create_pool(self, data: Union[np.ndarray, list, tuple], variable_names=['u', self.set_preprocessor() data_tokens = [] + + def latex_form(label, **params): + ''' + Parameters + ---------- + label : str + label of the token, for which we construct the latex form. + **params : dict + dictionary with parameter labels as keys and tuple of parameter values + and their output text forms as values. + + Returns + ------- + form : str + LaTeX-styled text form of token. + ''' + if '/' in label: + label = label[:label.find('x')+1] + '_' + label[label.find('x')+1:] + label = label.replace('d', r'\partial ').replace('/', r'}{') + label = r'\frac{' + label + r'}' + + if params['power'][0] > 1: + label = r'\left(' + label + r'\right)^{{{0}}}'.format(params["power"][1]) + return label + for data_elem_idx, data_tensor in enumerate(data): assert isinstance(data_tensor, np.ndarray), 'Input data must be in format of numpy ndarrays or iterable (list or tuple) of numpy arrays' entry = InputDataEntry(var_name=variable_names[data_elem_idx], @@ -530,9 +550,11 @@ def create_pool(self, data: Union[np.ndarray, list, tuple], variable_names=['u', grid=global_var.grid_cache.get_all()[1], max_order=max_deriv_order) entry.use_global_cache() - self.set_derivatives(variable=variable_names[data_elem_idx], deriv=entry.derivatives) - + self.set_derivatives(variable=variable_names[data_elem_idx], deriv=entry.derivatives) + + entry_token_family = TokenFamily(entry.var_name, family_of_derivs=True) + entry_token_family.set_latex_form_constructor(latex_form) entry_token_family.set_status(demands_equation=True, unique_specific_token=False, unique_token_type=False, s_and_d_merged=False, meaningful=True) @@ -826,3 +848,9 @@ def predict(self, system : SoEq, boundary_conditions : BoundaryConditions, grid solution_model = adapter.solve_epde_system(system = system, grids = grid, data = data, boundary_conditions = boundary_conditions, strategy = strategy) return solution_model(adapter.convert_grid(grid)).detach().numpy() + + def visualize_solutions(self, dimensions:list = [0, 1], **visulaizer_kwargs): + if self.multiobjective_mode: + self.optimizer.plot_pareto(dimensions=dimensions, **visulaizer_kwargs) + else: + raise NotImplementedError('Solution visualization is implemented only for multiobjective mode.') \ No newline at end of file diff --git a/epde/interface/prepared_tokens.py b/epde/interface/prepared_tokens.py index 2fb0ee5..9ca622d 100644 --- a/epde/interface/prepared_tokens.py +++ b/epde/interface/prepared_tokens.py @@ -58,7 +58,27 @@ def __init__(self, freq: tuple = (np.pi/2., 2*np.pi), dimensionality=1): self._token_family = TokenFamily(token_type='trigonometric') self._token_family.set_status(unique_specific_token=True, unique_token_type=True, meaningful=False) - + + def latex_form(label, **params): + ''' + Parameters + ---------- + label : str + label of the token, for which we construct the latex form. + **params : dict + dictionary with parameter labels as keys and tuple of parameter values + and their output text forms as values. + + Returns + ------- + form : str + LaTeX-styled text form of token. + ''' + form = label + r'^{{{0}}}'.format(params["power"][1]) + \ + r'(' + params["freq"][1] + r' x_{' + params["dim"][1] + r'})' + return form + + self._token_family.set_latex_form_constructor(latex_form) trig_token_params = OrderedDict([('power', (1, 1)), ('freq', freq), ('dim', (0, dimensionality))]) @@ -70,37 +90,11 @@ def __init__(self, freq: tuple = (np.pi/2., 2*np.pi), dimensionality=1): self._token_family.set_evaluator(trigonometric_evaluator, []) -# class PhasedSineTokens(PreparedTokens): -# def __init__(self, freq: tuple = ((np.pi/2., 2*np.pi),), dimensionality = 1): -# assert len(freq) == dimensionality or len(freq) == 1, 'Incorrect params' -# self._token_family = TokenFamily(token_type='phased_sine') -# self._token_family.set_status(unique_specific_token=True, unique_token_type=True, -# meaningful=False) - -# if len(freq) == 1: # dimensionality > 1 and -# freqs_matched = tuple([freq[0] for idx in range(dimensionality)]) - -# sine_token_params = OrderedDict([('power', (1, 1)),#tuple([(1, 1) for idx in range(dimensionality)])), -# ('freq', freqs_matched), -# ('phase', tuple([(0, 1) for idx in range(dimensionality)]))]) - -# freq_equality_fraction = 0.05 # fraction of allowed frequency interval, that is considered as the same - -# freqs_equality = [(freq[idx][1] - freq[idx][0]) / freq_equality_fraction for idx in range(dimensionality)] -# sine_equal_params = {'power': 0, 'freq': freqs_equality, -# 'phase': 0.05} -# self._token_family.set_params(['sine',], sine_token_params, sine_equal_params) -# self._token_family.set_evaluator(phased_sine_evaluator, []) - class PhasedSine1DTokens(PreparedTokens): def __init__(self, freq: tuple = (np.pi/2., 2*np.pi)): - # assert len(freq) == dimensionality or len(freq) == 1, 'Incorrect params' self._token_family = TokenFamily(token_type='phased_sine_1d') self._token_family.set_status(unique_specific_token=True, unique_token_type=True, meaningful=False) - - # if len(freq) == 1: # dimensionality > 1 and - # freqs_matched = tuple([freq[0] for idx in range(dimensionality)]) sine_token_params = OrderedDict([('power', (1, 1)),#tuple([(1, 1) for idx in range(dimensionality)])), ('freq', freq), @@ -108,7 +102,26 @@ def __init__(self, freq: tuple = (np.pi/2., 2*np.pi)): freq_equality_fraction = 0.05 # fraction of allowed frequency interval, that is considered as the same - # freqs_equality = [(freq[idx][1] - freq[idx][0]) / freq_equality_fraction for idx in range(dimensionality)] + def latex_form(label, **params): + ''' + Parameters + ---------- + label : str + label of the token, for which we construct the latex form. + **params : dict + dictionary with parameter labels as keys and tuple of parameter values + and their output text forms as values. + + Returns + ------- + form : str + LaTeX-styled text form of token. + ''' + pwr_sign = r'^{{{0}}}'.format(params["power"][1]) if params["power"][0] != 1 else '' + return label + pwr_sign + r'(' + params["freq"][1] + r' x_{1} + ' \ + + params["phase"][1] + r')' + + self._token_family.set_latex_form_constructor(latex_form) sine_equal_params = {'power': 0, 'freq': (freq[1] - freq[0]) / freq_equality_fraction, 'phase': 0.05} self._token_family.set_params(['sine',], sine_token_params, sine_equal_params) @@ -133,6 +146,28 @@ def __init__(self, labels = ['t',], dimensionality=1): self._token_family.set_status(unique_specific_token=True, unique_token_type=True, meaningful=True) + def latex_form(label, **params): + ''' + Parameters + ---------- + label : str + label of the token, for which we construct the latex form. + **params : dict + dictionary with parameter labels as keys and tuple of parameter values + and their output text forms as values. + + Returns + ------- + form : str + LaTeX-styled text form of token. + ''' + form = label + if params['power'][0] > 1: + form = r'(' + form + r')^{{{0}}}'.format(params["power"][0]) + return form + + + self._token_family.set_latex_form_constructor(latex_form) grid_token_params = OrderedDict([('power', (1, 1)), ('dim', (0, dimensionality))]) grid_equal_params = {'power': 0, 'dim': 0} diff --git a/epde/interface/token_family.py b/epde/interface/token_family.py index b8033c5..b7affb5 100644 --- a/epde/interface/token_family.py +++ b/epde/interface/token_family.py @@ -8,7 +8,7 @@ import numpy as np import itertools -from typing import Union +from typing import Union, Callable import pickle @@ -119,6 +119,7 @@ def __init__(self, token_type: str, family_of_derivs: bool = False): self.params_set = False self.cache_set = False self.deriv_evaluator_set = True + self.set_latex_form_constructor() def __len__(self): assert self.params_set, 'Familiy is not fully initialized.' @@ -237,7 +238,6 @@ def set_evaluator(self, eval_function, eval_kwargs_keys=[], suppress_eval_test=T self._evaluator = eval_function else: self._evaluator = EvaluatorContained(eval_function, eval_kwargs_keys) -# self._evaluator.set_params(**eval_params) self.evaluator_set = True if self.params_set and not suppress_eval_test: self.test_evaluator() @@ -266,6 +266,9 @@ def set_deriv_evaluator(self, eval_functions, eval_kwargs_keys=[], suppress_eval if self.params_set and not suppress_eval_test: self.test_evaluator(deriv=True) + def set_latex_form_constructor(self, latex_constructor: Callable = None): + self.latex_constructor = latex_constructor + def test_evaluator(self, deriv=False): """ Method to test, if the evaluator and tokens are set properly @@ -360,7 +363,8 @@ def create(self, label=None, token_status: dict = None, else: factor_deriv_code = None new_factor = Factor(token_name=label, deriv_code=factor_deriv_code, - status=self.status, family_type=self.ftype) + status=self.status, family_type=self.ftype, + latex_constructor = self.latex_constructor) if self.status['unique_token_type']: occupied_by_factor = {token: self.token_params['power'][1] for token in self.tokens} diff --git a/epde/optimizers/blocks.py b/epde/optimizers/blocks.py index 58cd32b..00582f9 100644 --- a/epde/optimizers/blocks.py +++ b/epde/optimizers/blocks.py @@ -210,4 +210,4 @@ def traversal(self, input_obj, EA_kwargs): @property def output(self): - return self.final_vertex.output \ No newline at end of file + return self.final_vertex.output diff --git a/epde/optimizers/moeadd/moeadd.py b/epde/optimizers/moeadd/moeadd.py index 965394d..34db2ac 100644 --- a/epde/optimizers/moeadd/moeadd.py +++ b/epde/optimizers/moeadd/moeadd.py @@ -5,16 +5,15 @@ """ import numpy as np -import time import warnings from typing import Union -from copy import deepcopy -from functools import reduce +# from copy import deepcopy +# from functools import reduce import epde.globals as global_var from epde.optimizers.moeadd.population_constr import SystemsPopulationConstructor -from epde.interface.logger import Logger +from epde.optimizers.moeadd.vis import ParetoVisualizer from epde.optimizers.moeadd.strategy_elems import MOEADDSectorProcesser from epde.optimizers.moeadd.supplementary import fast_non_dominated_sorting, ndl_update, Equality, Inequality @@ -443,4 +442,10 @@ def get_hist(self, best_only: bool = True): if best_only: return [elem[0] for elem in self._hist] else: - return self._hist \ No newline at end of file + return self._hist + + def plot_pareto(self, dimensions:list, **visualizer_kwargs): + assert len(dimensions) == 2, 'Current approach supports only two dimensional plots' + visualizer = ParetoVisualizer(self.pareto_levels) + visualizer.plot_pareto(dimensions = dimensions, **visualizer_kwargs) + \ No newline at end of file diff --git a/epde/optimizers/moeadd/supplementary.py b/epde/optimizers/moeadd/supplementary.py index d706fd4..c7cc214 100644 --- a/epde/optimizers/moeadd/supplementary.py +++ b/epde/optimizers/moeadd/supplementary.py @@ -30,6 +30,7 @@ import numpy as np from abc import ABC, abstractproperty, abstractmethod +from epde.supplementary import rts def check_dominance(target, compared_with) -> bool: """ @@ -50,18 +51,6 @@ def check_dominance(target, compared_with) -> bool: """ flag = False - def rts(value, sign_num: int = 5): - """ - Round to a ``sign_num`` of significant digits. - """ - if value == 0: - return 0 - magn_top = np.log10(value) - idx = -(np.sign(magn_top)*np.ceil(np.abs(magn_top)) - sign_num) - if idx - sign_num > 1: - idx -= 1 - return np.around(value, int(idx)) - sdn = 5 # Number of significant digits for obj_fun_idx in range(len(target.obj_fun)): if rts(target.obj_fun[obj_fun_idx], sdn) <= rts(compared_with.obj_fun[obj_fun_idx], sdn): diff --git a/epde/optimizers/moeadd/vis.py b/epde/optimizers/moeadd/vis.py new file mode 100644 index 0000000..e9aae54 --- /dev/null +++ b/epde/optimizers/moeadd/vis.py @@ -0,0 +1,92 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import numpy as np +import matplotlib.pyplot as plt +import matplotlib as mpl + +mpl.rcParams.update(mpl.rcParamsDefault) +plt.rcParams['text.usetex'] = True + +SMALL_SIZE = 12 +mpl.rc('font', size=SMALL_SIZE) +mpl.rc('axes', titlesize=SMALL_SIZE) + +class ParetoVisualizer(object): + def __init__(self, eq_pareto_levels): + ''' + Проще всего получить pareto_levels из атрибута optimizer.pareto_levels + ''' + self.pareto_frontier = eq_pareto_levels + + def plot_pareto(self, dimensions: tuple = (0, 1), annotate_best=True, plot_level = 1, + filename = None, save_format = 'eps'): + assert len( + dimensions) == 2, 'The pareto levels are projected on the 2D plane, therefore only 2 coordinates are processible' + coords = [[(solution.obj_fun[dimensions[0]], solution.obj_fun[dimensions[1]]) + for solution in self.pareto_frontier.levels[front_idx]] + for front_idx in np.arange(plot_level)]#len(self.pareto_frontier.levels))] + if annotate_best: + try: + annotations = [[solution.latex_form for solution in self.pareto_frontier.levels[front_idx]] + for front_idx in np.arange(len(self.pareto_frontier.levels))] + except AttributeError: + annotations = [[str(solution.obj_fun) for solution in self.pareto_frontier.levels[front_idx]] + for front_idx in np.arange(len(self.pareto_frontier.levels))] + + coords_arrays = [] + for coord_set in coords: + coords_arrays.append(np.array(coord_set)) + + colors = ['r', 'k', 'b', 'y', 'g'] + \ + ['m' for idx in np.arange(len(coords_arrays) - 5)] + + if len(coords_arrays) > 1: + x_min = min(*[np.min(coord_arr[:, 0]) for coord_arr in coords_arrays]) + x_max = max(*[np.max(coord_arr[:, 0]) for coord_arr in coords_arrays]) + y_min = min(*[np.min(coord_arr[:, 1]) for coord_arr in coords_arrays]) + y_max = max(*[np.max(coord_arr[:, 1]) for coord_arr in coords_arrays]) + else: + x_min = np.min(coords_arrays[0][:, 0]); x_max = np.max(coords_arrays[0][:, 0]) + y_min = np.min(coords_arrays[0][:, 1]); y_max = np.max(coords_arrays[0][:, 1]) + + x_interval = max(x_max - x_min, 5) + y_interval = max(y_max - y_min, 2) + + plt.grid() + plt.xlim(x_min - 0.1 * x_interval, x_max + 0.8 * x_interval) # ax.set_ + plt.ylim( y_min - 0.1 * y_interval, y_max + 0.3 * y_interval) # ax.set_ + + plt.xlabel('Objective 1') + plt.ylabel('Objective 2') + + for front_idx in np.arange(min(len(coords_arrays), plot_level)): + + plt.scatter(coords_arrays[front_idx][:, 0], + coords_arrays[front_idx][:, 1], color=colors[front_idx]) + locs_used = [] + for front_elem_idx in np.arange(coords_arrays[front_idx].shape[0]): + if any([all(np.isclose(np.array((coords_arrays[front_idx][front_elem_idx, 0], + coords_arrays[front_idx][front_elem_idx, 1])), entry)) + for entry in locs_used]): + continue + else: + locs_used.append(np.array((coords_arrays[front_idx][front_elem_idx, 0], + coords_arrays[front_idx][front_elem_idx, 1]))) + if front_idx == 0 or not annotate_best: + if annotate_best: + annotation = annotations[front_idx][front_elem_idx] + if annotation[0] != r'$': + annotation = r'$' + annotation + r'$' + print(annotation) + plt.annotate(annotations[front_idx][front_elem_idx], + (coords_arrays[front_idx][front_elem_idx, 0] + 0.4, + coords_arrays[front_idx][front_elem_idx, 1] + 0.2), + bbox = dict(boxstyle="Square,pad=0.3", + fc="white", lw=0.5))#, + # fontsize = 'xx-large') + + + if filename is not None: + plt.savefig(filename + '.' + save_format, dpi = 300, quality = 94, format=save_format) + plt.show() diff --git a/epde/optimizers/strategy.py b/epde/optimizers/strategy.py index de0f22e..5900dbd 100644 --- a/epde/optimizers/strategy.py +++ b/epde/optimizers/strategy.py @@ -75,4 +75,4 @@ def apply_block(self, label, operator_kwargs): def result(self): if not self.run_performed: raise ValueError('Trying to get the output of the strategy before running it.') - return self.linked_blocks.output \ No newline at end of file + return self.linked_blocks.output diff --git a/epde/preprocessing/deriv_calculators.py b/epde/preprocessing/deriv_calculators.py index 1f1dcfa..798fa2d 100644 --- a/epde/preprocessing/deriv_calculators.py +++ b/epde/preprocessing/deriv_calculators.py @@ -7,6 +7,10 @@ """ import numpy as np +from numba import njit +import time + +import matplotlib.pyplot as plt import multiprocessing as mp from typing import Union @@ -14,6 +18,23 @@ from abc import ABC from epde.preprocessing.cheb import process_point_cheb +def Heatmap(Matrix, interval = None, area = ((0, 1), (0, 1)), xlabel = '', ylabel = '', figsize=(8,6), filename = None, title = ''): + y, x = np.meshgrid(np.linspace(area[0][0], area[0][1], Matrix.shape[0]), np.linspace(area[1][0], area[1][1], Matrix.shape[1])) + fig, ax = plt.subplots(figsize = figsize) + plt.xlabel(xlabel) + ax.set(ylabel=ylabel) + ax.xaxis.labelpad = -10 + if interval: + c = ax.pcolormesh(x, y, np.transpose(Matrix), cmap='RdBu', vmin=interval[0], vmax=interval[1]) + else: + c = ax.pcolormesh(x, y, np.transpose(Matrix), cmap='RdBu', vmin=min(-abs(np.max(Matrix)), -abs(np.min(Matrix))), + vmax=max(abs(np.max(Matrix)), abs(np.min(Matrix)))) + # set the limits of the plot to the limits of the data + ax.axis([x.min(), x.max(), y.min(), y.max()]) + fig.colorbar(c, ax=ax) + plt.title(title) + plt.show() + if type(filename) != type(None): plt.savefig(filename + '.eps', format='eps') class AbstractDeriv(ABC): def __init__(self, *args, **kwargs): @@ -130,8 +151,12 @@ def butterworth_filter(freqs, number_of_freqs, steepness): freqs_copy = np.copy(freqs) freqs_copy = np.abs(freqs_copy) freqs_copy.sort() + # print('freqs', freqs) + # print('freqs_copy', freqs_copy[number_of_freqs - 1]) + butterworth_filter_multiplier = 1 / \ (1 + (freqs / freqs_copy[number_of_freqs - 1]) ** (2 * steepness)) + # print(butterworth_filter_multiplier) return freqs * butterworth_filter_multiplier def spectral_derivative_1d(self, func: np.ndarray, grid: np.ndarray, n=None, steepness=1): @@ -249,3 +274,124 @@ def spectral_derivative_high_ord(self, func: np.ndarray, grid: list, axis: int = derivs.append((deriv_descr, cur_deriv)) return derivs + +class TotalVariation(AbstractDeriv): + @staticmethod + def initial_guess(data: np.ndarray, dimensionality: tuple): + grad = np.array([np.gradient(data, axis=dim_idx) for dim_idx, dim in enumerate(dimensionality)]) + # print(grad.shape) + # w = np.array([np.zeros(dimensionality) for idx in np.arange(len(dimensionality)**2)], + # dtype = np.ndarray).reshape([len(dimensionality), len(dimensionality),] + list(dimensionality)) + w = np.array([np.gradient(grad[int(idx/2.)], axis = idx % 2) + for idx in np.arange(len(dimensionality)**2)]).reshape([len(dimensionality), + len(dimensionality),] + list(dimensionality)) + + lap_mul = np.array([np.zeros(dimensionality) + for idx in np.arange(len(dimensionality)**2)]).reshape([len(dimensionality), + len(dimensionality),] + list(dimensionality)) + return grad, w, lap_mul + + @staticmethod + def admm_step(data: np.ndarray, steps: list, initial_u: np.ndarray, initial_w: np.ndarray, + initial_lap: np.ndarray, lbd: float, reg_strng: float, c_const: float) -> tuple: + ''' + *data* has to be already Fourier-transformed + All inputs initial_u, initial_w & initial_lap have to be transformed by DFT. + ''' + def soft_thresholding(arg: np.ndarray, lbd: float) -> np.ndarray: + norm = np.linalg.norm(arg) + return max(norm - lbd, 0) * arg / norm + + def diff_factor(N: int, d: float = 1.) -> np.ndarray: + #freqs = np.fft.fftfreq(N, d = d) + freqs = np.arange(N) + return np.exp(-2*np.pi*freqs/N) - 1 + + initial_u = np.fft.fftn(initial_u, s = initial_u.shape[1:]) + # print(initial_u[0].shape) + # print(np.max(np.abs(initial_u[0])), np.min(np.abs(initial_u[0]))) + Heatmap(np.abs(initial_u[0]), title = 'FFT') + initial_w_fft = np.fft.fftn(initial_w, s = initial_w.shape[2:]) + initial_lap_fft = np.fft.fftn(initial_lap, s = initial_lap.shape[2:]) + + diff_factors = np.array([np.moveaxis(np.broadcast_to(diff_factor(dim_size, d = steps[comp_idx]), + shape = initial_u[0].shape), + source = -1, destination = comp_idx) + for comp_idx, dim_size in enumerate(initial_u[0].shape)]) + + lbd_inv = lbd**(-1) + + u_freq = np.copy(initial_u) + + for grad_idx in range(initial_u.shape[0]): + u_nonzero_freq = np.zeros_like(u_freq[grad_idx]) + + section_len = u_freq[grad_idx].shape[grad_idx] + putting_shape = np.ones(u_freq[grad_idx].ndim, dtype = np.int8) + putting_shape[grad_idx] = section_len-1 + + putting_args = {'indices' : np.arange(1, u_freq[grad_idx].shape[grad_idx]).reshape(putting_shape), + 'axis' : grad_idx} + # print(f'putting_args {putting_args}') + taking_args = {'indices' : range(1, u_freq[grad_idx].shape[grad_idx]), + 'axis' : grad_idx} + # print(f'taking_args {taking_args}') + + def take(arr: np.ndarray, taking_args: dict = taking_args): + return np.take(arr, **taking_args) + + denum_part = lbd_inv * np.sum([np.abs(take(factor))**2 for factor in diff_factors]) + partial_sum = [take(diff_factors[arg_idx]) * (take(initial_w_fft[grad_idx, arg_idx]) - + take(initial_lap_fft[grad_idx, arg_idx])) + for arg_idx in range(u_freq.shape[0])] + print([(np.min(elem), np.max(elem)) for elem in partial_sum]) # * take(data) * take(data) + print(np.min(take(data)), np.max(take(data))) + print('3rd term:', np.min(reg_strng * take(diff_factors[grad_idx]) ), np.max(reg_strng * take(diff_factors[grad_idx]) )) + + grad_nonzero_upd = (lbd_inv * np.sum(partial_sum, axis = 0) + reg_strng * take(diff_factors[grad_idx]) * take(data) / + (denum_part + reg_strng/np.abs(take(diff_factors[grad_idx])))) + print('shit shape', (lbd_inv * np.sum(partial_sum, axis = 0) + reg_strng * take(diff_factors[grad_idx]) * take(data)).shape) + print(np.max(lbd_inv * np.sum(partial_sum, axis = 0) + reg_strng * take(diff_factors[grad_idx]) * take(data)), + np.min(lbd_inv * np.sum(partial_sum, axis = 0) + reg_strng * take(diff_factors[grad_idx]) * take(data))) + Heatmap(np.real(lbd_inv * np.sum(partial_sum, axis = 0) + reg_strng * take(diff_factors[grad_idx]) * take(data)), title = 'partial') + + np.put_along_axis(u_nonzero_freq, values = grad_nonzero_upd, **putting_args) + u_freq[grad_idx] = u_nonzero_freq + Heatmap(np.real(u_freq[0]), title = 'inside the optimization') + + initial_u = np.real(np.fft.ifftn(u_freq, u_freq.shape[1:])) + for i in range(initial_w.shape[0]): + for j in range(initial_w.shape[1]): + initial_w[i, j] = soft_thresholding(np.gradient(initial_u[j], axis = i) + initial_lap[i, j], lbd) + + for i in range(initial_w.shape[0]): + for j in range(initial_w.shape[1]): + initial_lap[i, j] = c_const*(initial_lap[i, j] + np.gradient(initial_u[j], axis = i) + - initial_w[i, j]) + + time.sleep(15) + return initial_u, initial_w, initial_lap + + def optimize_with_admm(self, data, lbd: float, reg_strng: float, c_const: float, nsteps: int = 1e5): + u, w, lap_mul = self.initial_guess(data=data, dimensionality=data.shape) + data_fft = np.fft.fftn(data) + print(f'For some reason has to be abysmal: {np.min(np.real(data_fft)), np.max(np.real(data_fft))}') + for epoch in range(int(nsteps)): + print(epoch) + if epoch % 100 == 0: + Heatmap(u[1], title=str(epoch)) + plt.plot(u[1, :, int(u.shape[2]/2.)]) + plt.show() + u, w, lap_mul = self.admm_step(data = data_fft, steps = np.ones(data.ndim), initial_u = u, initial_w = w, + initial_lap=lap_mul, lbd = lbd, reg_strng = reg_strng, + c_const = c_const) + return u + + # def differentiate(self, data: np.ndarray, max_order: Union[int, list], + # mixed: bool = False, axis=None, *grids) -> list: + # if isinstance(max_order, int): + # max_order = [max_order,] * data.ndim + # else: + # max_order = np.full_like(max_order, np.max(max_order)) + + # if len(grids) == 1 and data.ndim == 2: diff --git a/epde/structure/factor.py b/epde/structure/factor.py index 3fb0b6d..5aecc24 100644 --- a/epde/structure/factor.py +++ b/epde/structure/factor.py @@ -9,25 +9,27 @@ import numpy as np import copy import torch +from typing import Callable from epde.structure.Tokens import TerminalToken import epde.globals as global_var -from epde.supplementary import factor_params_to_str, train_ann, use_ann_to_predict +from epde.supplementary import factor_params_to_str, train_ann, use_ann_to_predict, exp_form class Factor(TerminalToken): - __slots__ = ['_params', '_params_description', '_hash_val', + __slots__ = ['_params', '_params_description', '_hash_val', '_latex_constructor' 'label', 'ftype', 'grid_set', 'grid_idx', 'is_deriv', 'deriv_code', 'cache_linked', '_status', 'equality_ranges', '_evaluator', 'saved'] - def __init__(self, token_name: str, status: dict, family_type: str, - randomize: bool = False, params_description=None, - deriv_code=None, equality_ranges=None): + def __init__(self, token_name: str, status: dict, family_type: str, latex_constructor: Callable, + randomize: bool = False, params_description=None, deriv_code=None, + equality_ranges=None): self.label = token_name self.ftype = family_type self.status = status self.grid_set = False self._hash_val = np.random.randint(0, 1e9) + self._latex_constructor = latex_constructor self.is_deriv = not (deriv_code is None) self.deriv_code = deriv_code @@ -176,6 +178,19 @@ def name(self): form += '}' return form + @property + def latex_name(self): + if self._latex_constructor is not None: + params_dict = {} + for param_idx, param_info in self.params_description.items(): + mnt, exp = exp_form(self.params[param_idx], 3) + exp_str = r'\cdot 10^{{{0}}} '.format(str(exp)) if exp != 0 else '' + + params_dict[param_info['name']] = (self.params[param_idx], str(mnt) + exp_str) + return self._latex_constructor(self.label, **params_dict) + else: + return self.name # other implementations are possible + @property def hash_descr(self) -> int: return self._hash_val @@ -222,4 +237,4 @@ def __deepcopy__(self, memo=None): return new_struct def use_cache(self): - self.cache_linked = True + self.cache_linked = True \ No newline at end of file diff --git a/epde/structure/main_structures.py b/epde/structure/main_structures.py index 466ccaa..a309499 100644 --- a/epde/structure/main_structures.py +++ b/epde/structure/main_structures.py @@ -23,7 +23,7 @@ from epde.structure.encoding import Chromosome from epde.interface.token_family import TFPool from epde.decorators import HistoryExtender, ResetEquationStatus -from epde.supplementary import filter_powers, normalize_ts, population_sort, flatten +from epde.supplementary import filter_powers, normalize_ts, population_sort, flatten, rts, exp_form from epde.structure.factor import Factor from epde.structure.structure_template import ComplexStructure, check_uniqueness @@ -79,7 +79,7 @@ def use_cache(self): if not self.structure[idx].cache_linked: self.structure[idx].use_cache() - # TODO: make self.descr_variable_marker setting for defined parameter + # TODO: non-urgent, make self.descr_variable_marker setting for defined parameter @singledispatchmethod def defined(self, passed_term): @@ -272,6 +272,12 @@ def name(self): form += ' * ' return form + @property + def latex_form(self): + form = reduce(lambda x, y: x + r' \cdot ' + y, [factor.latex_name for + factor in self.structure]) + return form + def contains_deriv(self, family=None): if family is None: return any([factor.is_deriv and factor.deriv_code != [None,] for factor in self.structure]) @@ -440,7 +446,7 @@ def forbidden_token_labels(self): return forbidden_tokens def restore_property(self, deriv: bool = False, mandatory_family: bool = False): - # TODO: rewrite for an arbitrary equation property check + # TODO: non-urgent, rewrite for an arbitrary equation property check if not (deriv or mandatory_family): raise ValueError('No property passed for restoration.') while True: @@ -622,18 +628,6 @@ def weights_final(self, weights): self._weights_final = weights self.weights_final_evald = True - @property - def latex_form(self): - form = r"" - for term_idx in range(len(self.structure)): - if term_idx != self.target_idx: - form += str(self.weights_final[term_idx]) if term_idx < self.target_idx else str( - self.weights_final[term_idx-1]) - form += ' * ' + self.structure[term_idx].latex_form + ' + ' - form += str(self.weights_final[-1]) + ' = ' + \ - self.structure[self.target_idx].text_form - return form - @property def text_form(self): form = '' @@ -652,36 +646,25 @@ def text_form(self): form += 'k_' + str(len(self.structure)) + ' = 0' return form - # def solver_form(self, grids: list = None): - # raise DeprecationWarning('To be removed from the framework!') - # if self.solver_form_defined: - # # print(self.text_form) - # return self._solver_form - # else: - # self._solver_form = [] - # for term_idx in range(len(self.structure)): - # if term_idx != self.target_idx: - # term_form = self.structure[term_idx].solver_form - # weight = self.weights_final[term_idx] if term_idx < self.target_idx else self.weights_final[term_idx-1] - # term_form[0] = term_form[0] * weight - # term_form[0] = torch.flatten(term_form[0]).unsqueeze( - # 1).type(torch.FloatTensor) - # self._solver_form.append(term_form) - - # free_coeff_weight = torch.from_numpy(np.full_like(a=global_var.grid_cache.get('0'), - # fill_value=self.weights_final[-1])) - # free_coeff_weight = torch.flatten(free_coeff_weight).unsqueeze(1).type(torch.FloatTensor) - # target_weight = torch.from_numpy(np.full_like(a=global_var.grid_cache.get('0'), - # fill_value=-1.)) - # target_form = self.structure[self.target_idx].solver_form - # target_form[0] = target_form[0] * target_weight - # target_form[0] = torch.flatten(target_form[0]).unsqueeze(1).type(torch.FloatTensor) - - # self._solver_form.append([free_coeff_weight, [None], 0]) - # self._solver_form.append(target_form) - # self.solver_form_defined = True - # return self._solver_form - + @property + def latex_form(self): + form = self.structure[self.target_idx].latex_form + r' = ' + for idx, term in enumerate(self.structure): + idx_corrected = idx if idx <= self.target_idx else idx - 1 + if idx == self.target_idx or self.weights_final[idx_corrected] == 0: + continue + + digits_rounding_max = 3 + mnt, exp = exp_form(self.weights_final[idx_corrected], digits_rounding_max) + exp_str = r'\cdot 10^{{{0}}} '.format(str(exp)) if exp != 0 else '' + form += str(mnt) + exp_str + term.latex_form + r' + ' + + mnt, exp = exp_form(self.weights_final[-1], digits_rounding_max) + exp_str = r'\cdot 10^{{{0}}} '.format(str(exp)) if exp != 0 else '' + + form += str(mnt) + exp_str + return form + @property def state(self): return self.text_form @@ -787,7 +770,7 @@ def solver_formed_grid(training_grid=None): def check_metaparameters(metaparameters: dict): metaparam_labels = ['terms_number', 'max_factors_in_term', 'sparsity'] return True - # TODO: fix this check + # TODO: maybe fix this check, non-urgent # if any([((label not in metaparameters.keys()) and ) for label in metaparam_labels]): # print('required metaparameters:', metaparam_labels, 'metaparameters:', metaparameters) # raise ValueError('Only partial metaparameter vector has been passed.') @@ -885,17 +868,7 @@ def equation_opt_iteration(population, evol_operator, population_size, iter_inde gc.collect() population = evol_operator.apply(population, unexplained_vars) return population - - def evaluate(self, normalize=True, grids: list = None): - raise DeprecationWarning('Evaluation of system is not necessary') - if len(self.vals) == 1: - value = [equation.evaluate(normalize, return_val=True)[0] for equation in self.vals][0] - # self.vals[0].evaluate(normalize = normalize, return_val = True)[0] - else: - value = np.sum([equation.evaluate(normalize, return_val=True)[0] for equation in self.vals]) - value = np.sum(np.abs(value)) - return value - + @property def obj_fun(self): return np.array(flatten([func(self) for func in self.obj_funs])) @@ -928,13 +901,14 @@ def __eq__(self, other): @property def latex_form(self): - form = r"\begin{eqnarray*}" - for equation in self.vals: - form += equation.latex_form + r", \\ " - form += r"\end{eqnarray*}" + form = r"\begin{eqnarray*} " + for idx, equation in enumerate(self.vals): + postfix = '' if idx == len(self.vals) - 1 else r", \\ " + form += equation.latex_form + postfix + form += r" \end{eqnarray*}" + return form def __hash__(self): - # print(f'GETTING HASH VALUE OF SoEq: {self.vals.hash_descr}') return hash(self.vals.hash_descr) def __deepcopy__(self, memo=None): @@ -970,14 +944,6 @@ def solver_params(self, full_domain, grids=None): ''' Returns solver form, grid and boundary conditions ''' - # if len(self.vals) > 1: - # raise Exception('Solver form is defined only for a "system", that contains a single equation.') - # else: - # form = self.vals[0].solver_form() - # grid = solver_formed_grid() - # bconds = self.vals[0].boundary_conditions(full_domain = full_domain) - # return form, grid, bconds - equation_forms = [] bconds = [] @@ -996,7 +962,7 @@ def fitness_calculated(self): return all([equation.fitness_calculated for equation in self.vals]) def save(self, file_name='epde_systems.pickle'): - dir = os.getcwd() + directory = os.getcwd() with open(file_name, 'wb') as file: to_save = ([equation.text_form for equation in self.vals], self.tokens_for_eq + self.tokens_supp) diff --git a/epde/supplementary.py b/epde/supplementary.py index e9b305f..7cb0b4a 100644 --- a/epde/supplementary.py +++ b/epde/supplementary.py @@ -14,6 +14,24 @@ import matplotlib.pyplot as plt +def exp_form(a, sign_num: int = 4): + if np.isclose(a, 0): + return 0.0, 0 + exp = np.floor(np.log10(np.abs(a))) + return np.around(a / 10**exp, sign_num), int(exp) # np.sign(a) * + +def rts(value, sign_num: int = 5): + """ + Round to a ``sign_num`` of significant digits. + """ + if value == 0: + return 0 + magn_top = np.log10(value) + idx = -(np.sign(magn_top)*np.ceil(np.abs(magn_top)) - sign_num) + if idx - sign_num > 1: + idx -= 1 + return np.around(value, int(idx)) + def train_ann(grids: list, data: np.ndarray, epochs_max: int = 500): dim = 1 if np.any([s == 1 for s in data.shape]) and data.ndim == 2 else data.ndim assert len(grids) == dim, 'Dimensionality of data does not match with passed grids.' @@ -255,11 +273,11 @@ def define_derivatives(var_name='u', dimensionality=1, max_order=2): for order in range(max_order[var_idx]): var_deriv_orders.append([var_idx,] * (order+1)) if order == 0: - deriv_names.append('d' + var_name + '/dx' + str(var_idx+1)) + deriv_names.append('d' + var_name + '/dx' + str(var_idx)) else: deriv_names.append( - 'd^'+str(order+1) + var_name + '/dx'+str(var_idx+1)+'^'+str(order+1)) + 'd^'+str(order+1) + var_name + '/dx'+str(var_idx)+'^'+str(order+1)) print('Deriv orders after definition', var_deriv_orders) return deriv_names, var_deriv_orders diff --git a/epde/vis.py b/epde/vis.py deleted file mode 100644 index 9fc7e31..0000000 --- a/epde/vis.py +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Fri Feb 26 15:55:34 2021 - -@author: mike_ubuntu -""" -import numpy as np -import matplotlib.pyplot as plt - -class ParetoVisualizer(object): - def __init__(self, eq_pareto_levels): - ''' - Проще всего получить pareto_levels из атрибута optimizer.pareto_levels - ''' - self.pareto_frontier = eq_pareto_levels - - def plot_pareto(self, dimensions: tuple = (0, 1), annotate_best=True, plot_level = 1, - filename = None, save_format = 'eps'): - assert len( - dimensions) == 2, 'The pareto levels are projected on the 2D plane, therefore only 2 coordinates are processible' - coords = [[(solution.obj_fun[dimensions[0]], solution.obj_fun[dimensions[1]]) for solution in self.pareto_frontier.levels[front_idx]] - for front_idx in np.arange(len(self.pareto_frontier.levels))] - if annotate_best: - annotations = [[solution.latex_form for solution in self.pareto_frontier.levels[front_idx]] - for front_idx in np.arange(len(self.pareto_frontier.levels))] - coords_arrays = [] - for coord_set in coords: - coords_arrays.append(np.array(coord_set)) - - colors = ['r', 'k', 'b', 'y', 'g'] + \ - ['m' for idx in np.arange(len(coords_arrays) - 5)] - fig, ax = plt.subplots() - ax.set_xscale('log') - ax.grid() - for front_idx in np.arange(min(len(coords_arrays), plot_level)): - ax.scatter(coords_arrays[front_idx][:, 0], - coords_arrays[front_idx][:, 1], color=colors[front_idx]) - for front_elem_idx in np.arange(coords_arrays[front_idx].shape[0]): - if front_idx == 0 or not annotate_best: - if annotate_best: - annotation = annotations[front_idx][front_elem_idx] - if annotation[0] != '$': - annotation = '$' + annotation + '$' - ax.annotate(annotations[front_idx][front_elem_idx], - (coords_arrays[front_idx][front_elem_idx, 0], coords_arrays[front_idx][front_elem_idx, 1]), - fontsize = 'xx-large') - - - if filename is not None: - plt.savefig(filename + '.' + save_format, dpi = 300, quality = 94, format=save_format) - fig.show() diff --git a/setup.py b/setup.py index 70c0529..aedcd65 100644 --- a/setup.py +++ b/setup.py @@ -36,7 +36,7 @@ def get_requirements(): setup( name = 'epde', - version = '1.2.12', + version = '1.2.13', description = SHORT_DESCRIPTION, long_description="PLACEHOLDER", # long_description_content_type='text/x-rst',