Skip to content

Commit

Permalink
Do experiments with lambda
Browse files Browse the repository at this point in the history
  • Loading branch information
LisIva committed Apr 18, 2024
1 parent 89bb2ed commit b92db9b
Show file tree
Hide file tree
Showing 9 changed files with 58 additions and 43 deletions.
2 changes: 1 addition & 1 deletion Experiments on no noise data/experiment_burgers_sindy.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ def hash_term(term):
arr = np.array([differences_ls_none, time_ls, num_found_eq])
arr = arr.T
df = pd.DataFrame(data=arr, columns=['MAE', 'time', 'number_found_eq'])
df.to_csv(os.path.join(Path().absolute().parent, "data_burg_sindy", f"{title}.csv"))
df.to_csv(os.path.join(Path().absolute().parent, "data_pysindy_burg", f"{title}.csv"))

if print_results:
print('\nTime for every run, s:')
Expand Down
4 changes: 2 additions & 2 deletions Experiments on no noise data/experiment_kdv.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,9 +126,9 @@ def hash_term(term):
grids = np.meshgrid(t, x, indexing='ij')

''' Parameters of the experiment '''
write_csv = False
write_csv = True
print_results = True
max_iter_number = 1
max_iter_number = 50
title = 'dfs0'

time_ls = []
Expand Down
6 changes: 3 additions & 3 deletions Experiments on no noise data/experiment_kdv_sindy.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,9 +103,9 @@ def hash_term(term):
grids = np.meshgrid(t, x, indexing='ij')

''' Parameters of the experiment '''
write_csv = False
write_csv = True
print_results = True
max_iter_number = 1
max_iter_number = 50
title = 'dfs0'

time_ls = []
Expand Down Expand Up @@ -154,7 +154,7 @@ def hash_term(term):
arr = np.array([differences_ls_none, time_ls, num_found_eq])
arr = arr.T
df = pd.DataFrame(data=arr, columns=['MAE', 'time', 'number_found_eq'])
df.to_csv(os.path.join(Path().absolute().parent, "data_kdv_sindy", f"{title}.csv"))
df.to_csv(os.path.join(Path().absolute().parent, "data_pysindy_kdv", f"{title}.csv"))

if print_results:
print('\nTime for every run, s:')
Expand Down
6 changes: 3 additions & 3 deletions Experiments on no noise data/experiment_wave.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,9 +96,9 @@ def hash_term(term):
grids = np.meshgrid(t, x, indexing='ij')

''' Parameters of the experiment '''
write_csv = False
print_results = False
max_iter_number = 1
write_csv = True
print_results = True
max_iter_number = 50
title = 'dfs0'
''''''

Expand Down
7 changes: 4 additions & 3 deletions Experiments on noised data/noised_burgers.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,10 +97,11 @@ def hash_term(term):
grids = np.meshgrid(t, x, indexing='ij')

''' Parameters of the experiment '''
write_csv = False
write_csv = True
print_results = True
max_iter_number = 50
magnitudes = [1. * 1e-5, 1.5 * 1e-5, 2 * 1e-5, 2.5 * 1e-5, 3. * 1e-5, 3.67 * 1e-5] #
max_iter_number = 5
# magnitudes = [1. * 1e-5, 1.5 * 1e-5, 2 * 1e-5, 2.5 * 1e-5, 3. * 1e-5, 3.67 * 1e-5] #
magnitudes = [3. * 1e-5]

terms = [('du/dx1',), ('du/dx2', 'u'), ('u',), ('du/dx2',), ('u', 'du/dx1'), ('du/dx1', 'du/dx2'), ]
hashed_ls = [hash_term(term) for term in terms]
Expand Down
2 changes: 1 addition & 1 deletion Experiments on noised data/noised_burgers_sindy.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ def hash_term(term):
arr = np.array([differences_ls_none, time_ls, num_found_eq])
arr = arr.T
df = pd.DataFrame(data=arr, columns=['MAE', 'time', 'number_found_eq'])
df.to_csv(os.path.join(Path().absolute().parent, "data_burg_sindy", f"{title}.csv"))
df.to_csv(os.path.join(Path().absolute().parent, "data_pysindy_burg", f"{title}.csv"))
if print_results:
print()
print(f'\nAverage time, s: {sum(time_ls) / len(time_ls):.2f}')
Expand Down
5 changes: 3 additions & 2 deletions Experiments on noised data/noised_kdv_sindy.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,8 @@ def hash_term(term):
draw_time = []
draw_avgmae = []
start_gl = time.time()
magnitudes = [1. * 1e-5, 3.5 * 1e-5, 5.5 * 1e-5, 8. * 1e-5, 2.26 * 1e-4]
# magnitudes = [1. * 1e-5, 1. * 1e-4, 3.5 * 1e-5, 5.5 * 1e-5, 8. * 1e-5, 2.26 * 1e-4]
magnitudes = [1. * 1e-4]
for magnitude in magnitudes:
title = f'dfs{magnitude}'

Expand Down Expand Up @@ -162,7 +163,7 @@ def hash_term(term):
arr = np.array([differences_ls_none, time_ls, num_found_eq])
arr = arr.T
df = pd.DataFrame(data=arr, columns=['MAE', 'time', 'number_found_eq'])
df.to_csv(os.path.join(Path().absolute().parent, "data_kdv_sindy", f"{title}.csv"))
df.to_csv(os.path.join(Path().absolute().parent, "data_pysindy_kdv", f"{title}.csv"))

if print_results:
print()
Expand Down
56 changes: 33 additions & 23 deletions Experiments on noised data/noised_wave.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,15 +100,20 @@ def hash_term(term):
coefficients[1] = 0.

''' Parameters of the experiment '''
write_csv = False
write_csv = True
print_results = True
max_iter_number = 50
magnitudes = [1. * 1e-2]
magnitudes = [2. * 1e-5, 2.5 * 1e-5, 3. * 1e-5, 3.2 * 1e-5, 3.47 * 1e-5]
# magnitudes = [3.47 * 1e-5]

draw_not_found = []
draw_time = []
draw_avgmae = []
start_gl = time.time()
not_found_ls = []
# string = "0.04073797307153838 * d^2u/dx2^2{power: 1.0} + 0.0014821321520873156 * du/dx1{power: 1.0} + 0.034797130718506576 = d^2u/dx1^2{power: 1.0}"
# difference_ls = find_diff_str(string, coefficients)

for magnitude in magnitudes:
title = f'dfs{magnitude}'

Expand Down Expand Up @@ -146,15 +151,18 @@ def hash_term(term):
differences_ls.append(min(difference_ls))
differences_ls_none.append(min(difference_ls))
mean_diff_ls += difference_ls
print(f"Num. eq. found: {len(difference_ls)}")
else:
differences_ls_none.append(None)
print(f"Num. eq. found: 0")

num_found_eq.append(len(difference_ls))
print('Overall time is:', time1)
print(f'Iteration processed: {i+1}/{max_iter_number}\n')
i += 1
time_ls.append(time1)

not_found_ls.append(num_found_eq.count(0))
if write_csv:
arr = np.array([differences_ls_none, time_ls, num_found_eq])
arr = arr.T
Expand Down Expand Up @@ -182,24 +190,26 @@ def hash_term(term):
draw_time.append(sum(time_ls) / len(time_ls))

end_gl = time.time()
print(f"Overall time: {end_gl - start_gl:.2f}, s.")
plt.title("SymNet")
plt.plot(magnitudes, draw_not_found, linewidth=2, markersize=9, marker='o')
plt.ylabel("No. runs with not found eq.")
plt.xlabel("Magnitude value")
plt.grid()
plt.show()

plt.plot(magnitudes, draw_time, linewidth=2, markersize=9, marker='o')
plt.title("SymNet")
plt.ylabel("Time, s.")
plt.xlabel("Magnitude value")
plt.grid()
plt.show()

plt.plot(magnitudes, draw_avgmae, linewidth=2, markersize=9, marker='o')
plt.title("SymNet")
plt.ylabel("Average MAE")
plt.xlabel("Magnitude value")
plt.grid()
plt.show()
print(f"Overall time: {(end_gl - start_gl) / 3600:.2f}, h.")
print(f"Runs where eq was not found for each magn: {not_found_ls}")
# print(f"Overall time: {end_gl - start_gl:.2f}, s.")
# plt.title("Original")
# plt.plot(magnitudes, draw_not_found, linewidth=2, markersize=9, marker='o')
# plt.ylabel("No. runs with not found eq.")
# plt.xlabel("Magnitude value")
# plt.grid()
# plt.show()
#
# plt.plot(magnitudes, draw_time, linewidth=2, markersize=9, marker='o')
# plt.title("Original")
# plt.ylabel("Time, s.")
# plt.xlabel("Magnitude value")
# plt.grid()
# plt.show()
#
# plt.plot(magnitudes, draw_avgmae, linewidth=2, markersize=9, marker='o')
# plt.title("Original")
# plt.ylabel("Average MAE")
# plt.xlabel("Magnitude value")
# plt.grid()
# plt.show()
13 changes: 8 additions & 5 deletions symnet/initcoefficients.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ def closure():

def select_model(input_names, left_pool, u, derivs, shape, additional_tokens):
models, losses, left_sides = [], [], []
info = ModelsInfo()
# info = ModelsInfo()
for left_side_name in left_pool:
for sparsity in [0.001, 0.0000001]:
m_input_names, idx = clean_names(left_side_name, input_names)
Expand All @@ -106,9 +106,9 @@ def select_model(input_names, left_pool, u, derivs, shape, additional_tokens):
models.append(model)
left_sides.append(left_side_name)

info.selection_info(model, last_loss, sparsity, left_side_name)
# info.selection_info(model, last_loss, sparsity, left_side_name)

info.print_best()
# info.print_best()
idx = losses.index(min(losses))
return models[idx], left_sides[idx]

Expand All @@ -134,12 +134,15 @@ def get_csym_tsym(u, derivs, shape, input_names, pool_names, additional_tokens=N
"""
Can process only one variable! (u)
"""
# TODO: SymNet имеет 4 todo (+, pool_terms, preproc_input)

# TODO: если в левой части e.g. d^2u/dx2^2, то как получить в правой слагаемое d^2u/dx2^2 * u?

left_pool = get_left_pool(max_deriv_order)
model, left_side_name = select_model(input_names, left_pool, u, derivs, shape, sparsity, additional_tokens)
model, left_side_name = select_model(input_names, left_pool, u, derivs, shape, additional_tokens)
tsym, csym = model.coeffs(calprec=16)
# save_fig(csym)
pool_sym_ls = cast_to_symbols(pool_names)

csym_pool_ls = get_csym_pool(tsym, csym, pool_sym_ls, left_side_name)
# save_fig(np.array(csym_pool_ls), add_left=False)
return dict(zip(pool_sym_ls, csym_pool_ls)), pool_sym_ls

0 comments on commit b92db9b

Please sign in to comment.