Skip to content

Commit

Permalink
interim params
Browse files Browse the repository at this point in the history
  • Loading branch information
anujanegi committed Apr 12, 2024
1 parent bfb221a commit 8181718
Show file tree
Hide file tree
Showing 2 changed files with 109 additions and 97 deletions.
2 changes: 1 addition & 1 deletion bsi_zoo/run_benchmark.ipynb
Original file line number Diff line number Diff line change
@@ -1 +1 @@
{"cells": [{"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["from joblib import Memory\n", "from pathlib import Path\n", "import numpy as np\n", "import pandas as pd\n", "import time"]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["from bsi_zoo.benchmark import Benchmark\n", "from bsi_zoo.estimators import (\n", " iterative_L1,\n", " iterative_L2,\n", " iterative_L1_typeII,\n", " iterative_L2_typeII,\n", " gamma_map,\n", " iterative_sqrt,\n", " fake_solver,\n", " eloreta,\n", ")\n", "from bsi_zoo.metrics import euclidean_distance, mse, emd, f1, reconstructed_noise\n", "from bsi_zoo.config import get_leadfield_path"]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["n_jobs = 30\n", "nruns = 10\n", "spatial_cv = [False, True]\n", "subjects = [\"CC120166\", \"CC120313\", \"CC120264\", \"CC120313\", \"CC120309\"]\n", "metrics = [\n", " euclidean_distance,\n", " mse,\n", " emd,\n", " f1,\n", " reconstructed_noise,\n", "] # list of metric functions here\n", "nnzs = [1, 2, 3, 5]\n", "alpha_SNR = [0.99, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.01]\n", "# estimator_alphas = [\n", "# 0.01,\n", "# 0.01544452,\n", "# 0.02385332,\n", "# 0.03684031,\n", "# 0.0568981,\n", "# 0.08787639,\n", "# 0.13572088,\n", "# 0.2096144,\n", "# ] # logspaced\n", "estimator_alphas = np.logspace(0, -2, 20)[1:]\n", "memory = Memory(\".\")"]}], "metadata": {"kernelspec": {"display_name": "Python 3", "language": "python", "name": "python3"}, "language_info": {"codemirror_mode": {"name": "ipython", "version": 3}, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.4"}}, "nbformat": 4, "nbformat_minor": 2}
{"cells":[{"cell_type":"code","execution_count":75,"metadata":{},"outputs":[],"source":["import os\n","import numpy as np\n","import pandas as pd\n","\n","path = \"/home/anuja/tj/bdsg/BSI-Zoo/bsi_zoo/data/free4\"\n","files = os.listdir(path)\n","benchmark_files_free = [i for i in files if 'spatialCV' not in i]\n","benchmark_files_free_with_spatial_cv = [i for i in files if 'spatialCV' in i]\n","\n","\n","eloreta_path = \"/home/anuja/tj/bdsg/BSI-Zoo/bsi_zoo/data/eloreta\"\n","files = os.listdir(eloreta_path)\n","benchmark_files_eloreta = [i for i in files if 'benchmark' not in i]\n","\n","dfs_free = [pd.read_pickle(f'{path}/{file}') for file in benchmark_files_free]\n","df_results_free = pd.concat(dfs_free)\n","df_results_free['alpha'] = 1-df_results_free['alpha'].astype(float)\n","\n","\n","\n","dfs_free_with_spatial_cv = [pd.read_pickle(f'{path}/{file}') for file in benchmark_files_free_with_spatial_cv]\n","eloreta_dfs = [pd.read_pickle(f'{eloreta_path}/{file}') for file in benchmark_files_eloreta]\n","df_results_free_with_spatial_cv = pd.concat(dfs_free_with_spatial_cv)\n","df_results_eloreta = pd.concat(eloreta_dfs)\n","\n","# join with eloreta\n","df_results_free_with_spatial_cv = pd.concat([df_results_eloreta, df_results_free_with_spatial_cv])\n","df_results_free_with_spatial_cv['alpha'] = 1-df_results_free_with_spatial_cv['alpha'].astype(float)"]},{"cell_type":"code","execution_count":92,"metadata":{},"outputs":[],"source":["# save df_results_free as .mat file\n","import scipy.io\n","Output = {}\n","\n","scipy.io.savemat('free_spatialCV.mat', df_results_free_with_spatial_cv.to_dict('list'))"]},{"cell_type":"code","execution_count":95,"metadata":{},"outputs":[{"data":{"text/plain":["dict_keys(['__header__', '__version__', '__globals__', 'estimator', 'euclidean_distance', 'mse', 'emd', 'f1', 'reconstructed_noise', 'alpha', 'cov_type', 'n_sensors', 'n_sources', 'n_times', 'nnz', 'orientation_type', 'path_to_leadfield', 'extra_params', 'estimator__alpha', 'estimator__alpha_cv', 'error'])"]},"execution_count":95,"metadata":{},"output_type":"execute_result"}],"source":["# load .mat file /home/anuja/tj/bdsg/BSI-Zoo/bsi_zoo/data/sent to stefan/fixed.mat\n","import scipy.io\n","\n","d = scipy.io.loadmat('/home/anuja/tj/bdsg/BSI-Zoo/bsi_zoo/data/sent to stefan/free_spatialCV.mat')\n","d.keys()"]},{"cell_type":"code","execution_count":94,"metadata":{},"outputs":[{"data":{"text/plain":["dict_keys(['__header__', '__version__', '__globals__', 'Unnamed: 0', 'estimator', 'euclidean_distance', 'mse', 'emd', 'f1', 'reconstructed_noise', 'alpha', 'cov_type', 'n_times', 'nnz', 'orientation_type', 'path_to_leadfield', 'extra_params', 'estimator__alpha', 'estimator__alpha_cv', 'error'])"]},"execution_count":94,"metadata":{},"output_type":"execute_result"}],"source":["d = scipy.io.loadmat('/home/anuja/tj/bdsg/BSI-Zoo/bsi_zoo/data/sent to stefan/fixed_spatialCV.mat')\n","d.keys()"]}],"metadata":{"kernelspec":{"display_name":"Python 3","language":"python","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.10.12"}},"nbformat":4,"nbformat_minor":2}
204 changes: 108 additions & 96 deletions bsi_zoo/run_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,11 @@
from bsi_zoo.metrics import euclidean_distance, mse, emd, f1, reconstructed_noise
from bsi_zoo.config import get_leadfield_path

n_jobs = 30
nruns = 10
spatial_cv = [False, True]
subjects = ["CC120264", "CC120313", "CC120309"]
# "CC120166", "CC120313",
n_jobs = 12
nruns = 1
spatial_cv = [True]
# True]
subjects = ["CC120313", "CC120309", "CC120166", "CC120264"]
metrics = [
euclidean_distance,
mse,
Expand All @@ -42,18 +42,96 @@
# 0.13572088,
# 0.2096144,
# ] # logspaced
estimator_alphas = np.logspace(0, -2, 20)[1:]
estimator_alphas_I = np.logspace(0, -2, 20)[1:]
estimator_alphas_II = [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000, 100000]
memory = Memory(".")

for do_spatial_cv in spatial_cv:
for subject in subjects:
"""Fixed orientation parameters for the benchmark"""

# """Fixed orientation parameters for the benchmark"""

orientation_type = "fixed"
# orientation_type = "fixed"
# data_args_I = {
# # "n_sensors": [50],
# "n_times": [10],
# # "n_sources": [200],
# "nnz": nnzs,
# "cov_type": ["diag"],
# "path_to_leadfield": [get_leadfield_path(subject, type=orientation_type)],
# "orientation_type": [orientation_type],
# "alpha": alpha_SNR, # this is actually SNR
# }

# data_args_II = {
# # "n_sensors": [50],
# "n_times": [10],
# # "n_sources": [200],
# "nnz": nnzs,
# "cov_type": ["full"],
# "path_to_leadfield": [get_leadfield_path(subject, type=orientation_type)],
# "orientation_type": [orientation_type],
# "alpha": alpha_SNR, # this is actually SNR
# }

# estimators = [
# # (fake_solver, data_args_I, {"alpha": estimator_alphas_I}, {}),
# (eloreta, data_args_I, {"alpha": estimator_alphas_II}, {}),
# # (iterative_L1, data_args_I, {"alpha": estimator_alphas_I}, {}),
# # (iterative_L2, data_args_I, {"alpha": estimator_alphas_I}, {}),
# # (iterative_sqrt, data_args_I, {"alpha": estimator_alphas_I}, {}),
# # (iterative_L1_typeII, data_args_II, {"alpha": estimator_alphas_I}, {}),
# # (iterative_L2_typeII, data_args_II, {"alpha": estimator_alphas_I}, {}),
# # (gamma_map, data_args_II, {"alpha": estimator_alphas_I}, {"update_mode": 1}),
# (gamma_map, data_args_II, {"alpha": estimator_alphas_II}, {"update_mode": 2}),
# # (gamma_map, data_args_II, {"alpha": estimator_alphas_I}, {"update_mode": 3}),
# ]

# df_results = []
# for estimator, data_args, estimator_args, estimator_extra_params in estimators:
# benchmark = Benchmark(
# estimator,
# subject,
# metrics,
# data_args,
# estimator_args,
# random_state=42,
# memory=memory,
# n_jobs=n_jobs,
# do_spatial_cv=do_spatial_cv,
# estimator_extra_params=estimator_extra_params,
# )
# results = benchmark.run(nruns=nruns)
# df_results.append(results)
# # save results
# data_path = Path("bsi_zoo/data/updated_alpha_grid")
# data_path.mkdir(exist_ok=True)
# if do_spatial_cv:
# FILE_NAME = f"{estimator}_{subject}_{data_args['orientation_type'][0]}_spatialCV_{time.strftime('%b-%d-%Y_%H%M', time.localtime())}.pkl"
# else:
# FILE_NAME = f"{estimator}_{subject}_{data_args['orientation_type'][0]}_{time.strftime('%b-%d-%Y_%H%M', time.localtime())}.pkl"
# results.to_pickle(data_path / FILE_NAME)


# df_results = pd.concat(df_results, axis=0)

# data_path = Path("bsi_zoo/data/ramen")
# data_path.mkdir(exist_ok=True)
# if do_spatial_cv:
# FILE_NAME = f"benchmark_data_{subject}_{data_args['orientation_type'][0]}_spatialCV_{time.strftime('%b-%d-%Y_%H%M', time.localtime())}.pkl"
# else:
# FILE_NAME = f"benchmark_data_{subject}_{data_args['orientation_type'][0]}_{time.strftime('%b-%d-%Y_%H%M', time.localtime())}.pkl"
# df_results.to_pickle(data_path / FILE_NAME)

# print(df_results)

""" Free orientation parameters for the benchmark """

orientation_type = "free"
data_args_I = {
# "n_sensors": [50],
"n_sensors": [50],
"n_times": [10],
# "n_sources": [200],
"n_sources": [200],
"nnz": nnzs,
"cov_type": ["diag"],
"path_to_leadfield": [get_leadfield_path(subject, type=orientation_type)],
Expand All @@ -62,9 +140,9 @@
}

data_args_II = {
# "n_sensors": [50],
"n_sensors": [50],
"n_times": [10],
# "n_sources": [200],
"n_sources": [200],
"nnz": nnzs,
"cov_type": ["full"],
"path_to_leadfield": [get_leadfield_path(subject, type=orientation_type)],
Expand All @@ -73,16 +151,16 @@
}

estimators = [
(fake_solver, data_args_I, {"alpha": estimator_alphas}, {}),
# (eloreta, data_args_I, {"alpha": estimator_alphas}, {}),
# (iterative_L1, data_args_I, {"alpha": estimator_alphas}, {}),
# (iterative_L2, data_args_I, {"alpha": estimator_alphas}, {}),
# (iterative_sqrt, data_args_I, {"alpha": estimator_alphas}, {}),
# (iterative_L1_typeII, data_args_II, {"alpha": estimator_alphas}, {}),
# (iterative_L2_typeII, data_args_II, {"alpha": estimator_alphas}, {}),
# (gamma_map, data_args_II, {"alpha": estimator_alphas}, {"update_mode": 1}),
# (gamma_map, data_args_II, {"alpha": estimator_alphas}, {"update_mode": 2}),
# (gamma_map, data_args_II, {"alpha": estimator_alphas}, {"update_mode": 3}),
# (fake_solver, data_args_I, {"alpha": estimator_alphas_I}, {}),
(eloreta, data_args_I, {"alpha": estimator_alphas_II}, {}),
# (iterative_L1, data_args_I, {"alpha": estimator_alphas_I}, {}),
# (iterative_L2, data_args_I, {"alpha": estimator_alphas_I}, {}),
# (iterative_sqrt, data_args_I, {"alpha": estimator_alphas_I}, {}),
# (iterative_L1_typeII, data_args_II, {"alpha": estimator_alphas_I}, {}),
# (iterative_L2_typeII, data_args_II, {"alpha": estimator_alphas_I}, {}),
# (gamma_map, data_args_II, {"alpha": estimator_alphas_I}, {"update_mode": 1}),
# (gamma_map, data_args_II, {"alpha": estimator_alphas_II}, {"update_mode": 2}),
# (gamma_map, data_args_II, {"alpha": estimator_alphas_I}, {"update_mode": 3}),
]

df_results = []
Expand All @@ -102,14 +180,19 @@
results = benchmark.run(nruns=nruns)
df_results.append(results)
# save results
data_path = Path("bsi_zoo/data")
data_path = Path("bsi_zoo/data/eloreta")
data_path.mkdir(exist_ok=True)
FILE_NAME = f"{estimator}_{subject}_{data_args['orientation_type'][0]}_{time.strftime('%b-%d-%Y_%H%M', time.localtime())}.pkl"

if do_spatial_cv:
FILE_NAME = f"{estimator}_{subject}_{data_args['orientation_type'][0]}_spatialCV_{time.strftime('%b-%d-%Y_%H%M', time.localtime())}.pkl"
else:
FILE_NAME = f"{estimator}_{subject}_{data_args['orientation_type'][0]}_{time.strftime('%b-%d-%Y_%H%M', time.localtime())}.pkl"
results.to_pickle(data_path / FILE_NAME)
print(results)

df_results = pd.concat(df_results, axis=0)

data_path = Path("bsi_zoo/data")
data_path = Path("bsi_zoo/data/eloreta")
data_path.mkdir(exist_ok=True)
if do_spatial_cv:
FILE_NAME = f"benchmark_data_{subject}_{data_args['orientation_type'][0]}_spatialCV_{time.strftime('%b-%d-%Y_%H%M', time.localtime())}.pkl"
Expand All @@ -119,74 +202,3 @@

print(df_results)

# """ Free orientation parameters for the benchmark """

# orientation_type = "free"
# data_args_I = {
# "n_sensors": [50],
# "n_times": [10],
# "n_sources": [200],
# "nnz": nnzs,
# "cov_type": ["diag"],
# "path_to_leadfield": [get_leadfield_path(subject, type=orientation_type)],
# "orientation_type": [orientation_type],
# "alpha": alpha_SNR, # this is actually SNR
# }

# data_args_II = {
# "n_sensors": [50],
# "n_times": [10],
# "n_sources": [200],
# "nnz": nnzs,
# "cov_type": ["full"],
# "path_to_leadfield": [get_leadfield_path(subject, type=orientation_type)],
# "orientation_type": [orientation_type],
# "alpha": alpha_SNR, # this is actually SNR
# }

# estimators = [
# (fake_solver, data_args_I, {"alpha": estimator_alphas}, {}),
# (eloreta, data_args_I, {"alpha": estimator_alphas}, {}),
# (iterative_L1, data_args_I, {"alpha": estimator_alphas}, {}),
# (iterative_L2, data_args_I, {"alpha": estimator_alphas}, {}),
# (iterative_sqrt, data_args_I, {"alpha": estimator_alphas}, {}),
# (iterative_L1_typeII, data_args_II, {"alpha": estimator_alphas}, {}),
# (iterative_L2_typeII, data_args_II, {"alpha": estimator_alphas}, {}),
# # (gamma_map, data_args_II, {"alpha": estimator_alphas}, {"update_mode": 1}),
# (gamma_map, data_args_II, {"alpha": estimator_alphas}, {"update_mode": 2}),
# # (gamma_map, data_args_II, {"alpha": estimator_alphas}, {"update_mode": 3}),
# ]

# df_results = []
# for estimator, data_args, estimator_args, estimator_extra_params in estimators:
# benchmark = Benchmark(
# estimator,
# subject,
# metrics,
# data_args,
# estimator_args,
# random_state=42,
# memory=memory,
# n_jobs=n_jobs,
# do_spatial_cv=do_spatial_cv,
# estimator_extra_params=estimator_extra_params,
# )
# results = benchmark.run(nruns=nruns)
# df_results.append(results)
# # save results
# data_path = Path("bsi_zoo/data")
# data_path.mkdir(exist_ok=True)
# FILE_NAME = f"{estimator}_{subject}_{data_args['orientation_type'][0]}_{time.strftime('%b-%d-%Y_%H%M', time.localtime())}.pkl"
# results.to_pickle(data_path / FILE_NAME)

# df_results = pd.concat(df_results, axis=0)

# data_path = Path("bsi_zoo/data")
# data_path.mkdir(exist_ok=True)
# if do_spatial_cv:
# FILE_NAME = f"benchmark_data_{subject}_{data_args['orientation_type'][0]}_spatialCV_{time.strftime('%b-%d-%Y_%H%M', time.localtime())}.pkl"
# else:
# FILE_NAME = f"benchmark_data_{subject}_{data_args['orientation_type'][0]}_{time.strftime('%b-%d-%Y_%H%M', time.localtime())}.pkl"
# df_results.to_pickle(data_path / FILE_NAME)

# print(df_results)

0 comments on commit 8181718

Please sign in to comment.