diff --git a/docs/requirements.txt b/docs/requirements.txt index 18f6c817..6d2090d4 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,4 +1,4 @@ jupyter-book>=0.7.0b napari -PyQt5 +qtpy napari-stress diff --git a/pyproject.toml b/pyproject.toml index 9787c3bd..8439924b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,9 @@ [build-system] requires = ["setuptools", "wheel"] build-backend = "setuptools.build_meta" + +[tool.ruff] +ignore = ["F821"] + +[tool.ruff.per-file-ignores] +"_napari_plugin.py" = ["F401"] diff --git a/src/napari_stress/__init__.py b/src/napari_stress/__init__.py index 2ecaa6b0..b2f4e861 100644 --- a/src/napari_stress/__init__.py +++ b/src/napari_stress/__init__.py @@ -35,3 +35,32 @@ from . import types from . import _vectors as vectors + +__all__ = [ + "__version__", + "measurements", + "approximation", + "reconstruction", + "sample_data", + "plotting", + "utils", + "stress_backend", + "rescale", + "smooth_sinc", + "smoothMLS2D", + "reconstruct_surface", + "decimate", + "extract_vertex_points", + "fit_ellipsoid_to_pointcloud_points", + "fit_ellipsoid_to_pointcloud_vectors", + "TimelapseConverter", + "frame_by_frame", + "fit_spherical_harmonics", + "lebedev_quadrature", + "create_manifold", + "get_droplet_point_cloud", + "get_droplet_point_cloud_4d", + "get_droplet_4d", + "types", + "vectors", +] diff --git a/src/napari_stress/_approximation/__init__.py b/src/napari_stress/_approximation/__init__.py index 18afbeb8..9cd00d6d 100644 --- a/src/napari_stress/_approximation/__init__.py +++ b/src/napari_stress/_approximation/__init__.py @@ -5,3 +5,9 @@ expand_points_on_ellipse, normals_on_ellipsoid, ) + +__all__ = [ + "least_squares_ellipsoid", + "expand_points_on_ellipse", + "normals_on_ellipsoid", +] diff --git a/src/napari_stress/_approximation/fit_ellipsoid.py b/src/napari_stress/_approximation/fit_ellipsoid.py index 74166d39..8d19ae94 100644 --- a/src/napari_stress/_approximation/fit_ellipsoid.py +++ b/src/napari_stress/_approximation/fit_ellipsoid.py @@ -64,7 +64,7 @@ def normals_on_ellipsoid(points: PointsData) -> VectorsData: F = coefficients.flatten()[5] G = coefficients.flatten()[6] H = coefficients.flatten()[7] - I = coefficients.flatten()[8] + J = coefficients.flatten()[8] xx = points[:, 0][:, None] yy = points[:, 1][:, None] @@ -72,7 +72,7 @@ def normals_on_ellipsoid(points: PointsData) -> VectorsData: grad_F_x = 2.0 * A * xx + D * yy + E * zz + G grad_F_y = 2.0 * B * yy + D * xx + F * zz + H - grad_F_z = 2.0 * C * zz + E * xx + F * yy + I + grad_F_z = 2.0 * C * zz + E * xx + F * yy + J grad_F_X = np.hstack((grad_F_x, grad_F_y, grad_F_z)) Vec_Norms = np.sqrt(np.sum(np.multiply(grad_F_X, grad_F_X), axis=1)).reshape( diff --git a/src/napari_stress/_deprecated.py b/src/napari_stress/_deprecated.py deleted file mode 100644 index 3cbcddc4..00000000 --- a/src/napari_stress/_deprecated.py +++ /dev/null @@ -1,390 +0,0 @@ -import numpy as np -import vedo - -import napari -from napari.types import PointsData, SurfaceData, ImageData, LayerDataTuple -from typing import List -import inspect - -from functools import wraps -import tqdm - -# import pandas as pd -from scipy.interpolate import RBFInterpolator, interp2d - - -def pointcloud_to_vertices4D(surfs: list) -> np.ndarray: - - n_vertices = sum([surf.N() for surf in surfs]) - vertices_4d = np.zeros([n_vertices, 4]) - - for idx, surf in enumerate(surfs): - vertices_4d[idx * surf.N() : idx * surf.N() + surf.N(), 1:] = surf.points() - vertices_4d[idx * surf.N() : idx * surf.N() + surf.N(), 0] = idx - - return vertices_4d - - -def vertices4d_to_pointcloud(vertices: np.ndarray) -> list: - - assert vertices.shape[1] == 4 - - frames = np.unique(vertices[:, 0]) - - surfs = [] - for idx in frames: - frame = vertices(np.where(vertices[:, 0] == idx)) - surfs.append(vedo.pointcloud.Points(frame)) - - return surfs - - -def list_of_points_to_pointsdata(points: list) -> PointsData: - """ - Convert list of pointData objects to single pointsdata object - - Parameters - ---------- - points : list - DESCRIPTION. - - Returns - ------- - PointsData - DESCRIPTION. - """ - # First, split list in list of cordinates and properties - list_of_properties = [pt[1]["properties"] for pt in points] - list_of_verts = [pt[0] for pt in points] - - # Create time-index for each point in each frame - time_frames = np.concatenate( - [[idx] * len(timepoint) for idx, timepoint in enumerate(list_of_verts)] - ) - - new_points = np.zeros((len(time_frames), 4)) - new_points[:, 0] = time_frames - new_points[:, 1:] = np.vstack(list_of_verts) - - new_props = {} - for key in list(list_of_properties[0].keys()): - new_props[key] = np.vstack([tp[key] for tp in list_of_properties]) - - return (new_points, {"properties": new_props}, "Points") - - -def _sigmoid(x, center, amplitude, slope, offset): - "https://stackoverflow.com/questions/55725139/fit-sigmoid-function-s-shape-curve-to-data-using-python" - return amplitude / (1 + np.exp(-slope * (x - center))) + offset - - -def _gaussian(x, center, sigma, amplitude): - return ( - amplitude - / np.sqrt((2 * np.pi * sigma**2)) - * np.exp(-((x - center) ** 2) / (2 * sigma**2)) - ) - - -def _detect_maxima(profile, center: float = None): - return np.argmax(profile) - - -def _detect_drop(profile, center: float = None): - return np.argmax(np.abs(np.diff(profile))) - - -def _func_args_to_list(func: callable) -> list: - - sig = inspect.signature(func) - return list(sig.parameters.keys()) - - -def frame_by_frame(function, progress_bar: bool = False): - @wraps(function) - def wrapper(*args, **kwargs): - - sig = inspect.signature(function) - annotations = [sig.parameters[key].annotation for key in sig.parameters.keys()] - - # Dictionary of functions that can convert 4D to list of data - funcs_data_to_list = { - napari.types.PointsData: points_to_list_of_points, - napari.types.SurfaceData: surface_to_list_of_surfaces, - napari.types.ImageData: image_to_list_of_images, - napari.types.LabelsData: image_to_list_of_images, - } - - # DIctionary of functions that can convert lists of data to data - funcs_list_to_data = { - napari.types.PointsData: list_of_points_to_points, - napari.types.SurfaceData: list_of_surfaces_to_surface, - napari.types.ImageData: list_of_images_to_image, - napari.types.LabelsData: list_of_images_to_image, - List[napari.types.LayerDataTuple]: list_of_layerdatatuple_to_layerdatatuple, - } - - supported_data = list(funcs_data_to_list.keys()) - - args = list(args) - n_frames = None - - # Convert 4D data to list(s) of 3D data for every supported argument - # TODO: Check if objects are actually 4D - ind_of_framed_arg = [] # remember which arguments were converted - - for idx, arg in enumerate(args): - if annotations[idx] in supported_data: - args[idx] = funcs_data_to_list[annotations[idx]](arg) - ind_of_framed_arg.append(idx) - n_frames = len(args[idx]) - - # apply function frame by frame - # TODO: Put this in a thread by default? - results = [None] * n_frames - it = tqdm.tqdm(range(n_frames)) if progress_bar else range(n_frames) - for t in it: - _args = args.copy() - - # Replace argument value by frame t of argument value - for idx in ind_of_framed_arg: - _args[idx] = _args[idx][t] - - results[t] = function(*_args, **kwargs) - - return funcs_list_to_data[sig.return_annotation](results) - - return wrapper - - -def list_of_layerdatatuple_to_layerdatatuple(tuple_data: list) -> LayerDataTuple: - """Convert a list of 3D layerdatatuple objects to a single 4D LayerDataTuple""" - - # Possible conversion functions for layerdatatuples - funcs_list_to_data = { - "points": list_of_points_to_points, - "surface": list_of_surfaces_to_surface, - "image": list_of_images_to_image, - "labels": list_of_images_to_image, - } - - # Convert data to array with dimensions [result, frame, data] - data = list(np.asarray(tuple_data).transpose((1, 0, -1))) - - # Reminder: Each list entry is tuple (data, properties, type) - results = [None] * len(data) # allocate list for results - for idx, res in enumerate(data): - dtype = res[0, -1] - _result = [None] * 3 - _result[0] = funcs_list_to_data[dtype](res[:, 0]) - _result[1] = res[0, 1] # smarter way to combine properties? - _result[2] = dtype - results[idx] = _result - - return results - - -def list_of_points_to_points(points: list) -> np.ndarray: - """Convert list of 3D point data to single 4D point data.""" - - n_points = sum([len(frame) for frame in points]) - t = np.concatenate([[idx] * len(frame) for idx, frame in enumerate(points)]) - - points_out = np.zeros((n_points, 4)) - points_out[:, 1:] = np.vstack(points) - points_out[:, 0] = t - - return points_out - - -def image_to_list_of_images(image: ImageData) -> list: - """Convert 4D image to list of images""" - # TODO: Check if it actually is 4D - return list(image) - - -def points_to_list_of_points(points: PointsData) -> list: - """Convert a 4D point array to list of 3D points""" - # TODO: Check if it actually is 4D - n_frames = len(np.unique(points[:, 0])) - - points_out = [None] * n_frames - for t in range(n_frames): - points_out[t] = points[points[:, 0] == t, 1:] - - return points_out - - -def surface_to_list_of_surfaces(surface: SurfaceData) -> list: - """Convert a 4D surface to list of 3D surfaces""" - # TODO: Check if it actually is 4D - points = surface[0] - faces = np.asarray(surface[1], dtype=int) - - n_frames = len(np.unique(points[:, 0])) - points_per_frame = [sum(points[:, 0] == t) for t in range(n_frames)] - - idx_face_new_frame = [] - t = 0 - for idx, face in enumerate(faces): - if points[face[0], 0] == t: - idx_face_new_frame.append(idx) - t += 1 - idx_face_new_frame.append(len(faces)) - - surfaces = [None] * n_frames - for t in range(n_frames): - _points = points[points[:, 0] == t, 1:] - _faces = faces[idx_face_new_frame[t] : idx_face_new_frame[t + 1] - 1] - sum( - points_per_frame[:t] - ) - surfaces[t] = (_points, _faces) - - return surfaces - - -def list_of_images_to_image(images: list) -> ImageData: - """Convert a list of 3D image data to single 4D image data.""" - return np.stack(images) - - -def list_of_surfaces_to_surface(surfs: list) -> tuple: - """ - Convert list of 3D surfaces to single 4D surface. - """ - if isinstance(surfs[0], vedo.mesh.Mesh): - surfs = [(s.points(), s.faces()) for s in surfs] - - vertices = [surf[0] for surf in surfs] - faces = [surf[1] for surf in surfs] - values = None - if len(surfs[0]) == 3: - values = np.concatenate([surf[2] for surf in surfs]) - - vertices = list_of_points_to_points(vertices) - - n_verts = 0 - for idx, surf in enumerate(surfs): - - # Offset indices in faces list by previous amount of points - faces[idx] = n_verts + np.array(faces[idx]) - - # Add number of vertices in current surface to n_verts - n_verts += surf[0].shape[0] - - faces = np.vstack(faces) - - if values is None: - return (vertices, faces) - else: - return (vertices, faces, values) - - -def surf_fit(points, Xq, **kwargs): - - """ - First interpolates a set of points around a query point with a set of radial - basis functions in a given sample density. The inteprolated points are then approximated - with a 2D polynomial - """ - - sample_length = kwargs.get("sample_length", 0.1) - int_method = kwargs.get("int_method", "rbf") - - # get x, y and z coordinates - x = np.asarray(points[:, 0]) - y = np.asarray(points[:, 1]) - z = np.asarray(points[:, 2]) - - # # create interpolation grid - # xi = np.linspace(np.min(x), np.max(x), ((x.max() - x.min()) // sample_length).astype(int)) - # yi = np.linspace(np.min(x), np.max(x), ((y.max() - y.min()) // sample_length).astype(int)) - - # create interpolation/evaluation grid - sL = sample_length - - # add 1 sL to grid range ro ensure interpolation grid of sufficient size to calculate gradients - xgrid = np.mgrid[x.min() - sL : x.max() + sL : sL, y.min() - sL : y.max() + sL : sL] - - shape_x = xgrid.shape[1] - shape_y = xgrid.shape[2] - xgrid = np.asarray(xgrid.reshape(2, -1).T) - - # Create polynomial approximation of provided data on a regular grid with set sample length - if int_method == "rbf": - rbf = RBFInterpolator(np.vstack([x, y]).transpose(), z, epsilon=2) - _x = xgrid[:, 0] - _y = xgrid[:, 1] - _z = rbf(xgrid) - - # elif int_method =='grid': - # grid = griddata(np.vstack([x,y]).transpose(), z, xgrid.transpose(), method='linear') - - elif int_method == "Poly2d": - # Fit custom 2D Polynomial function. Some of the moments are missing in matlab - intented? - z_poly = poly2d(_x, _y) - coeff, r, rank, s = np.linalg.lstsq(z_poly, _z, rcond=None) - _z = poly2d(_x, _y, coeff=coeff).sum(axis=1) - - # Make data 2D (i.e., grid) again - _x = _x.reshape(shape_x, -1) - _y = _y.reshape(shape_x, -1) - _z = _z.reshape(shape_x, -1) - - if _z.shape[0] == 1: - print("Here") - pass - - # Calculate the mean curvature of the interpolated surface - H = mean_curvature(_z, sample_length) - - # fig = plt.figure() - # ax = fig.add_subplot(111, projection='3d') - # ax.scatter(_x, _y, _z) - - # Interpolate mean curvature at query point - f = interp2d(_x, _y, H, kind="linear") - H_mean = f(Xq[0], Xq[1]) - - return H_mean - - -def mean_curvature(z, spacing): - """ - Calculates mean curvature based on the partial derivatives of z based on - the formula from https://en.wikipedia.org/wiki/Mean_curvature - """ - - try: - Zy, Zx = np.gradient(z, spacing) # First-order partial derivatives - Zxy, Zxx = np.gradient(Zx, spacing) # Second-order partial derivatives (I) - Zyy, _ = np.gradient(Zy, spacing) # (II) (note that Zyx = Zxy) - except: - print("Here") - - H = ( - (1 / 2.0) - * ((1 + Zxx**2) * Zyy - 2.0 * Zx * Zy * Zxy + (1 + Zyy**2) * Zxx) - / (1 + Zxx**2 + Zyy**2) ** (1.5) - ) - - return H - - -def poly2d(x, y, coeff=np.ones(9)): - - assert len(coeff) == 9 - - return np.array( - [ - coeff[0] * np.ones(len(x)), - coeff[1] * x, - coeff[2] * y, - coeff[3] * x * y, - coeff[4] * x**2, - coeff[5] * x**2 * y, - coeff[6] * x * y**2, - coeff[7] * y**2, - coeff[8] * x**2 * y**2, - ] - ).T diff --git a/src/napari_stress/_measurements/__init__.py b/src/napari_stress/_measurements/__init__.py index 6caa2ad9..2564b412 100644 --- a/src/napari_stress/_measurements/__init__.py +++ b/src/napari_stress/_measurements/__init__.py @@ -34,3 +34,30 @@ from .toolbox import comprehensive_analysis from .measurements import distance_to_k_nearest_neighbors from .intensity import sample_intensity_along_vector, measure_intensity_on_surface + +__all__ = [ + "calculate_mean_curvature_on_manifold", + "mean_curvature_on_radial_manifold", + "average_mean_curvatures_on_manifold", + "radial_surface_averaged_mean_curvature", + "curvature_on_ellipsoid", + "mean_curvature_on_ellipse_cardinal_points", + "gauss_bonnet_test", + "mean_curvature_differences_radial_cartesian_manifolds", + "deviation_from_ellipsoidal_mode", + "temporal_autocorrelation", + "haversine_distances", + "spatio_temporal_autocorrelation", + "geodesic_distance_matrix", + "geodesic_path", + "correlation_on_surface", + "local_extrema_analysis", + "anisotropic_stress", + "tissue_stress_tensor", + "maximal_tissue_anisotropy", + "calculate_anisotropy", + "comprehensive_analysis", + "distance_to_k_nearest_neighbors", + "sample_intensity_along_vector", + "measure_intensity_on_surface", +] diff --git a/src/napari_stress/_measurements/curvature.py b/src/napari_stress/_measurements/curvature.py index fc160a54..20314279 100644 --- a/src/napari_stress/_measurements/curvature.py +++ b/src/napari_stress/_measurements/curvature.py @@ -1,15 +1,12 @@ # -*- coding: utf-8 -*- -from statistics import mean from .._stress import euclidian_k_form_SPB as euc_kf from .._spherical_harmonics.spherical_harmonics import get_normals_on_manifold from napari_tools_menu import register_function import numpy as np -from napari.types import PointsData, VectorsData, LayerDataTuple from napari.layers import Layer -from napari import Viewer from .._utils import coordinate_conversion as conversion from .._utils.frame_by_frame import frame_by_frame @@ -33,8 +30,8 @@ @register_function(menu="Measurement > Measure mean curvature on ellipsoid (n-STRESS)") @frame_by_frame def curvature_on_ellipsoid( - ellipsoid: VectorsData, sample_points: PointsData -) -> LayerDataTuple: + ellipsoid: "napari.types.VectorsData", sample_points: "napari.types.PointsData" +) -> "napari.types.LayerDataTuple": """ Calculate curvature at sample points on the surface of an ellipse. @@ -87,7 +84,8 @@ def curvature_on_ellipsoid( ) H_ellps_pts = (num_H_ellps / den_H_ellps).squeeze() - # calculate averaged curvatures H_0: 1st method of H0 computation, for Ellipsoid in UV points + # calculate averaged curvatures H_0: 1st method of H0 computation, + # for Ellipsoid in UV points H0_ellps_avg_ellps_UV_curvs = H_ellps_pts.mean(axis=0) H0_ellipsoid_major_minor = mean_curvature_on_ellipse_cardinal_points(ellipsoid) @@ -107,7 +105,9 @@ def curvature_on_ellipsoid( return (sample_points, properties, "points") -def mean_curvature_on_ellipse_cardinal_points(ellipsoid: VectorsData) -> list: +def mean_curvature_on_ellipse_cardinal_points( + ellipsoid: "napari.types.VectorsData", +) -> list: """ Calculate mean points on major axes tip points of ellipsoid. @@ -144,7 +144,7 @@ def mean_curvature_on_ellipse_cardinal_points(ellipsoid: VectorsData) -> list: menu="Measurement > Measure Gauss-Bonnet error on manifold (n-STRESS" ) def gauss_bonnet_test( - input_manifold: manifold, viewer: Viewer = None + input_manifold: manifold, viewer: "napari.Viewer" = None ) -> Tuple[float, float]: """ Use Gauss-Bonnet theorem to measure resolution on manifold. @@ -215,7 +215,8 @@ def calculate_mean_curvature_on_manifold( points = input_manifold.get_coordinates() centered_lbdv_pts = points - points.mean(axis=0)[None, :] - # Makre sure orientation is inward, so H is positive (for Ellipsoid, and small deviations): + # Makre sure orientation is inward, + # so H is positive (for Ellipsoid, and small deviations): Orientations = [np.dot(x, y) for x, y in zip(centered_lbdv_pts, normals)] num_pos_orr = np.sum(np.asarray(Orientations).flatten() > 0) @@ -332,7 +333,9 @@ def surface_integrated_mean_curvature(mean_curvatures: np.ndarray, manifold: man def volume_integrated_mean_curvature(input_manifold: manifold) -> float: - """Determine volume and averaged mean curvature by integrating over radii.""" + """ + Determine volume and averaged mean curvature by integrating over radii. + """ from .._stress.sph_func_SPB import S2_Integral assert input_manifold.manifold_type == "radial" diff --git a/src/napari_stress/_measurements/measurements.py b/src/napari_stress/_measurements/measurements.py index 50a29aff..bc3eedcd 100644 --- a/src/napari_stress/_measurements/measurements.py +++ b/src/napari_stress/_measurements/measurements.py @@ -1,3 +1,4 @@ +# ruff: noqa: F821 import pandas as pd @@ -19,7 +20,6 @@ def distance_to_k_nearest_neighbors( The distance to the k nearest neighbors for each point. """ from scipy.spatial import KDTree - import numpy as np tree = KDTree(points) dist, _ = tree.query(points, k=k + 1) diff --git a/src/napari_stress/_measurements/temporal_correlation.py b/src/napari_stress/_measurements/temporal_correlation.py index 14a07888..382a5973 100644 --- a/src/napari_stress/_measurements/temporal_correlation.py +++ b/src/napari_stress/_measurements/temporal_correlation.py @@ -78,18 +78,18 @@ def spatio_temporal_autocorrelation( inner_product[i, j, :] = result["auto_correlations_average"].flatten() # sum vals of cols for each row, gives us a matrix - summed_inner_product = np.squeeze(np.sum(inner_product, axis=1)) + sum_inner_product = np.squeeze(np.sum(inner_product, axis=1)) num_tau_samples = (np.arange(n_frames) + 1)[::-1].reshape(n_frames, 1) - avg_summed_inner_product = np.divide(summed_inner_product, num_tau_samples) - norm_t_0 = np.sum(summed_inner_product[0, :].flatten()) + avg_sum_inner_product = np.divide(sum_inner_product, num_tau_samples) + norm_t_0 = np.sum(sum_inner_product[0, :].flatten()) - normed_avg_summed_inner_product = avg_summed_inner_product / norm_t_0 + normed_avg_sum_inner_product = avg_sum_inner_product / norm_t_0 results = { - "summed_spatiotemporal_correlations": summed_inner_product, - "avg_summed_spatiotemporal_correlations": avg_summed_inner_product, - "normed_avg_summed_spatiotemporal_correlations": normed_avg_summed_inner_product, + "summed_spatiotemporal_correlations": sum_inner_product, + "avg_summed_spatiotemporal_correlations": avg_sum_inner_product, + "normed_avg_summed_spatiotemporal_correlations": normed_avg_sum_inner_product, } return results @@ -97,7 +97,8 @@ def spatio_temporal_autocorrelation( def haversine_distances(degree_lebedev: int, n_lebedev_points: int): """ - Calculate geodesic (Great Circle) distance matrix on unit sphere, from haversine formula. + Calculate geodesic (Great Circle) distance matrix on unit sphere, + from haversine formula. See Also -------- diff --git a/src/napari_stress/_measurements/toolbox.py b/src/napari_stress/_measurements/toolbox.py index 12e8b731..fda92e5d 100644 --- a/src/napari_stress/_measurements/toolbox.py +++ b/src/napari_stress/_measurements/toolbox.py @@ -169,8 +169,6 @@ def _run(self): def _export(self, results_stress_analysis): """Export results to csv file.""" - from .. import types - from .. import measurements from .. import plotting from .. import utils import datetime @@ -433,8 +431,8 @@ def comprehensive_analysis( features = curvature_ellipsoid["features"] metadata = curvature_ellipsoid["metadata"] mean_curvature_ellipsoid = features[_METADATAKEY_MEAN_CURVATURE] - H_major_minor = metadata[_METADATAKEY_H_E123_ELLIPSOID] - H0_arithmetic_ellipsoid = curvature_ellipsoid_sh[1] + _ = metadata[_METADATAKEY_H_E123_ELLIPSOID] + _ = curvature_ellipsoid_sh[1] H0_surface_ellipsoid = curvature_ellipsoid_sh[2] # ========================================================================= diff --git a/src/napari_stress/_plotting/__init__.py b/src/napari_stress/_plotting/__init__.py index 5542560b..c218900c 100644 --- a/src/napari_stress/_plotting/__init__.py +++ b/src/napari_stress/_plotting/__init__.py @@ -3,3 +3,9 @@ draw_chronological_kde_plot, create_all_stress_plots, ) + +__all__ = [ + "draw_chronological_lineplot_with_errors", + "draw_chronological_kde_plot", + "create_all_stress_plots", +] diff --git a/src/napari_stress/_plotting/create_plots.py b/src/napari_stress/_plotting/create_plots.py index 0ca2c9ce..2a51e54d 100644 --- a/src/napari_stress/_plotting/create_plots.py +++ b/src/napari_stress/_plotting/create_plots.py @@ -129,7 +129,6 @@ def create_all_stress_plots( import matplotlib as mpl from .._utils._aggregate_measurements import find_metadata_in_layers from .. import types - from .. import measurements from .._utils._aggregate_measurements import compile_data_from_layers # Compile data diff --git a/src/napari_stress/_preprocess.py b/src/napari_stress/_preprocess.py index 8bbce938..63cb235f 100644 --- a/src/napari_stress/_preprocess.py +++ b/src/napari_stress/_preprocess.py @@ -1,8 +1,6 @@ from napari.types import ImageData from skimage import transform -from skimage import transform - import numpy as np from ._utils.frame_by_frame import frame_by_frame diff --git a/src/napari_stress/_reconstruction/__init__.py b/src/napari_stress/_reconstruction/__init__.py index d8ee20a6..e40642cb 100644 --- a/src/napari_stress/_reconstruction/__init__.py +++ b/src/napari_stress/_reconstruction/__init__.py @@ -2,3 +2,9 @@ from .refine_surfaces import trace_refinement_of_surface from .toolbox import reconstruct_droplet from .reconstruct_surface import reconstruct_surface_from_quadrature_points + +__all__ = [ + "trace_refinement_of_surface", + "reconstruct_droplet", + "reconstruct_surface_from_quadrature_points", +] diff --git a/src/napari_stress/_reconstruction/toolbox.py b/src/napari_stress/_reconstruction/toolbox.py index 3a533d04..d98ca5cf 100644 --- a/src/napari_stress/_reconstruction/toolbox.py +++ b/src/napari_stress/_reconstruction/toolbox.py @@ -10,7 +10,7 @@ from napari_tools_menu import register_dock_widget from qtpy import uic from qtpy.QtCore import QEvent, QObject -from qtpy.QtWidgets import QWidget, QFileDialog +from qtpy.QtWidgets import QWidget from .._utils.frame_by_frame import frame_by_frame diff --git a/src/napari_stress/_sample_data/__init__.py b/src/napari_stress/_sample_data/__init__.py index b0181b3f..34d3e273 100644 --- a/src/napari_stress/_sample_data/__init__.py +++ b/src/napari_stress/_sample_data/__init__.py @@ -5,3 +5,10 @@ get_droplet_point_cloud_4d, make_blurry_ellipsoid, ) + +__all__ = [ + "get_droplet_4d", + "get_droplet_point_cloud", + "get_droplet_point_cloud_4d", + "make_blurry_ellipsoid", +] diff --git a/src/napari_stress/_spherical_harmonics/spherical_harmonics.py b/src/napari_stress/_spherical_harmonics/spherical_harmonics.py index 4100b56f..e439b343 100644 --- a/src/napari_stress/_spherical_harmonics/spherical_harmonics.py +++ b/src/napari_stress/_spherical_harmonics/spherical_harmonics.py @@ -24,13 +24,14 @@ def shtools_spherical_harmonics_expansion( points: PointsData, max_degree: int = 5, expansion_type: str = "radial" ) -> Tuple[PointsData, np.ndarray]: """ - Approximate a surface by spherical harmonics expansion with pyshtools implementation. + Approximate surface with pyshtools implementation. Parameters ---------- points : PointsData max_degree : int, optional - Degree of spherical harmonics to fit to the data. The default is 5. + Degree of spherical harmonics to fit to the data. + The default is 5. Returns ------- @@ -251,7 +252,8 @@ def lebedev_quadrature( sph_f.spherical_harmonics_function(x, max_degree) for x in coefficients ] - # Get {Z/Y/X} Coordinates at lebedev points, so we can leverage our code more efficiently (and uniformly) on surface: + # Get {Z/Y/X} Coordinates at lebedev points, so we can + # leverage our code more efficiently (and uniformly) on surface: LBDV_Fit = lebedev_info.lbdv_info(max_degree, number_of_quadrature_points) lebedev_points = [ euc_kf.get_quadrature_points_from_sh_function(f, LBDV_Fit, "A") @@ -294,7 +296,7 @@ def create_manifold( Manny_Dict[ "use_manifold_name" ] = False # we are NOT using named shapes in these tests - Manny_Dict["Maniold_Name_Dict"] = Manny_Name_Dict # sph point cloud at lbdv + Manny_Dict["Maniold_Name_Dict"] = Manny_Name_Dict return mnfd.manifold( Manny_Dict, manifold_type=manifold_type, raw_coordinates=points @@ -358,7 +360,8 @@ def calculate_mean_curvature_on_manifold( points = manifold.get_coordinates() centered_lbdv_pts = points - points.mean(axis=0)[None, :] - # Makre sure orientation is inward, so H is positive (for Ellipsoid, and small deviations): + # Makre sure orientation is inward, so H is positive + # (for Ellipsoid, and small deviations): Orientations = [np.dot(x, y) for x, y in zip(centered_lbdv_pts, normals)] num_pos_orr = np.sum(np.asarray(Orientations).flatten() > 0) diff --git a/src/napari_stress/_spherical_harmonics/spherical_harmonics_napari.py b/src/napari_stress/_spherical_harmonics/spherical_harmonics_napari.py index bec7f1c1..f2af7260 100644 --- a/src/napari_stress/_spherical_harmonics/spherical_harmonics_napari.py +++ b/src/napari_stress/_spherical_harmonics/spherical_harmonics_napari.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -from cmath import exp from napari.types import LayerDataTuple, PointsData from napari.layers import Points import numpy as np @@ -51,8 +50,9 @@ def fit_spherical_harmonics( Which implementation to use for spherical harmonics fit (stress or pyshtools). Default is `spherical_harmonics_methods.stress` expansion_type: expansion_type - Which coordinate to use for expansion. Can be `cartesian` or `radial`. For cartesian - expansion, x/y/z will be approximated separately with a spherical harmonics expansion + Which coordinate to use for expansion. Can be `cartesian` or + `radial`. For cartesian expansion, x/y/z will be approximated + separately with a spherical harmonics expansion or radial for radial approximation. @@ -104,18 +104,19 @@ def perform_lebedev_quadrature( viewer: napari.Viewer = None, ) -> LayerDataTuple: """ - Perform lebedev quadrature and manifold creaiton on spherical-harmonics expansion. + Perform lebedev quadrature on spherical-harmonics expansion. Parameters ---------- points : Points number_of_quadrature_points : int, optional - Number of quadrature points to represent the surface. The default is 500. + Number of quadrature points to represent the surface. + The default is 500. use_minimal_point_set : bool, optional Whether or not to use the minimally required number of quadrature points instead of the number given by `number_of_quadrature_points`. Depends on the chosen `max_degree` of the previous spherical harmonics - expansion.The default is False. + expansion. The default is False. viewer : napari.Viewer, optional @@ -126,9 +127,10 @@ def perform_lebedev_quadrature( """ metadata = points.metadata - if not "spherical_harmonics_coefficients" in metadata.keys(): + if "spherical_harmonics_coefficients" not in metadata.keys(): raise ValueError( - "Missing spherical harmonics coefficients. Use spherical harmonics expansion first" + "Missing spherical harmonics coefficients. " + + "Use spherical harmonics expansion first" ) max_degree = metadata["spherical_harmonics_coefficients"].shape[-1] - 1 diff --git a/src/napari_stress/_stress/__init__.py b/src/napari_stress/_stress/__init__.py index ce41c082..2cca9e38 100644 --- a/src/napari_stress/_stress/__init__.py +++ b/src/napari_stress/_stress/__init__.py @@ -1 +1,3 @@ from .lebedev_info_SPB import lbdv_info + +__all__ = ["lbdv_info"] diff --git a/src/napari_stress/_stress/charts_SPB.py b/src/napari_stress/_stress/charts_SPB.py index f720113b..169ca557 100644 --- a/src/napari_stress/_stress/charts_SPB.py +++ b/src/napari_stress/_stress/charts_SPB.py @@ -1,11 +1,6 @@ # From https://github.com/campaslab/STRESS -# from numpy import * import numpy as np import math -from scipy import * -import mpmath -import cmath -from scipy.special import sph_harm ##### Size of Charts: ####### Chart_Min_Polar = np.pi / 4 # Minimum Phi Val in Chart @@ -68,6 +63,7 @@ def Bump_Fn( def eta_A(func, Theta, Phi): # Cutoff fn for phi in [eta_Min_Polar, eta_Max_Polar] + # if Phi=< eta_Min_Polar - eta_Polar_Decay or Phi=> eta_Max_Polar + eta_Polar_Decay if eta_Min_Polar <= Phi and Phi <= eta_Max_Polar: return func(Theta, Phi) @@ -78,7 +74,7 @@ def eta_A(func, Theta, Phi): # Cutoff fn for phi in [eta_Min_Polar, eta_Max_Pol return Bump_Fn(Phi, 1, eta_Min_Polar, eta_Polar_Decay) * func(Theta, Phi) # Bump is SYMETRIC around start else: - return 0 # if Phi=< eta_Min_Polar - eta_Polar_Decay or Phi=> eta_Max_Polar + eta_Polar_Decay + return 0 def eta_A_const( @@ -90,20 +86,18 @@ def eta_A_const( elif eta_Max_Polar < Phi and Phi < eta_Max_Polar + eta_Polar_Decay: return Bump_Fn(Phi, 1, eta_Max_Polar, eta_Polar_Decay) * const + # if Phi=< eta_Min_Polar - eta_Polar_Decay or Phi=> eta_Max_Polar + eta_Polar_Decay elif eta_Min_Polar - eta_Polar_Decay < Phi and Phi < eta_Min_Polar: return Bump_Fn(Phi, 1, eta_Min_Polar, eta_Polar_Decay) * const # Bump is SYMETRIC around start else: - return 0 # if Phi=< eta_Min_Polar - eta_Polar_Decay or Phi=> eta_Max_Polar + eta_Polar_Decay + return 0 def eta_B( func, Theta_Bar, Phi_Bar ): # Cutoff fn for Phi_Bar in [eta_Min_Polar, eta_Max_Polar] - # A_Coors = Coor_B_To_A(Theta_Bar, Phi_Bar) - # print("Equiv to (Theta, Phi) = "+'('+str(A_Coors[0]/pi)+'pi, '+str(A_Coors[1]/pi)+'pi)') - # func needs to be COUNTER rotated back to phi, theta, to be consistent def func_counter_rot(theta_bar, phi_bar): Orig_Coors = Coor_B_To_A(theta_bar, phi_bar) @@ -114,7 +108,8 @@ def func_counter_rot(theta_bar, phi_bar): return eta_A(func_counter_rot, Theta_Bar, Phi_Bar) -# NOTE: (Theta, Phi) -> (Theta_Bar, Phi_Bar) by rotating the north pole (0,0) to the east pole (0,pi/2) +# NOTE: (Theta, Phi) -> +# (Theta_Bar, Phi_Bar) by rotating the north pole (0,0) to the east pole (0,pi/2) def Coor_A_To_B(Theta, Phi): # Converts from A to B X1 = np.cos(Theta) * np.sin(Phi) @@ -125,8 +120,6 @@ def Coor_A_To_B(Theta, Phi): # Converts from A to B # print("Y1= "+str(Y1)) # print("Z1= "+str(Z1)) - XYZ1 = [X1, Y1, Z1] - # How Coordinate Axes are rotated X_Rot = -Z1 Y_Rot = Y1 @@ -137,7 +130,9 @@ def Coor_A_To_B(Theta, Phi): # Converts from A to B # Theta_Bar = Bar_Coors[:, 0] # Phi_Bar = Bar_Coors[:, 1] - # XYZ2 = [cos(Phi_Bar) , sin(Theta_Bar)*sin(Phi_Bar), -1*cos(Theta_Bar)*sin(Phi_Bar)] + # XYZ2 = [cos(Phi_Bar) , + # sin(Theta_Bar)*sin(Phi_Bar), + # -1*cos(Theta_Bar)*sin(Phi_Bar)] # print("Orig Cart. = "+str(XYZ1)) # print("New Cart. = "+str(XYZ2)) @@ -169,7 +164,9 @@ def Coor_B_To_A(Theta_Bar, Phi_Bar): # Converts from B back to A # Theta = A_Coors[:, 0] # Phi = A_Coors[:, 1] - # XYZ1 = [cos(Phi_Bar) , sin(Theta_Bar)*sin(Phi_Bar), -1*cos(Theta_Bar)*sin(Phi_Bar)] + # XYZ1 = [cos(Phi_Bar) , + # sin(Theta_Bar)*sin(Phi_Bar), + # -1*cos(Theta_Bar)*sin(Phi_Bar)] return A_Coors @@ -208,13 +205,13 @@ def Cart_To_Coor_B(x, y, z): ##Calculate Theta_Bar - theta_bar = np.arctan2( - y, -1 * z - ) # Actually gives us unit circle angle for theta_bar < pi, theta_bar-2*pi for theta_bar > pi + theta_bar = np.arctan2(y, -1 * z) + # Actually gives us unit circle angle for theta_bar < pi, + # theta_bar-2*pi for theta_bar > pi - theta_bar = np.where( - theta_bar >= 0, theta_bar, theta_bar + 2 * np.pi - ) # return: theta_bar (where theta_bar >= 0), return: theta_bar + 2*pi (where theta_bar < 0) + theta_bar = np.where(theta_bar >= 0, theta_bar, theta_bar + 2 * np.pi) + # return: theta_bar (where theta_bar >= 0), + # return: theta_bar + 2*pi (where theta_bar < 0) phi_bar = np.arccos(np.divide(x, r)) # numpy version has range [0, pi] @@ -226,78 +223,3 @@ def Cart_To_Coor_B(x, y, z): ####################################################################################################################### # BJG: This is already in manifold_SPB.py - -""" -def Manifold_Fn_Def(theta, phi, r_0, Shape_Name): - - if(Shape_Name == "S2"): - return 1 - - - elif(Shape_Name == "Chew_Toy"): - return 1 + r_0*np.sin(3*phi)*np.cos(theta) - - - elif(Shape_Name == "Dog_Shit"): - return 1 + r_0*np.sin(7*phi)*np.cos(theta) - - - elif(Shape_Name == "Gen_Pill"): - return (1 + r_0)/np.sqrt(1 + ((1 + r_0)**2 - 1)*np.sin(phi)**2) - - - elif(Shape_Name == "Divet"): - if(isscalar(phi)): - if(np.cos(phi) > .9): - return 1 - r_0*np.exp(-1*(.19)/(np.cos(phi)**2 - (.9)**2) + 1) - - else: - return 1 - - else: - # Vectorized for manifold functions - return np.where(np.cos(phi) > .9, 1 - r_0*np.exp(-1*(.19)/(np.cos(phi)**2 - (.9)**2) + 1), 1) - - - elif(Shape_Name == "Double_Divet"): - if(isscalar(phi)): - if(np.cos(phi) > .9): - return 1 - r_0*np.exp(-1*(.19)/(np.cos(phi)**2 - (.9)**2) + 1) - - elif(np.sin(phi)*np.cos(theta) > .9): - return 1 - r_0*np.exp(-1*(.19)/((np.sin(phi)*np.cos(theta))**2 - (.9)**2) + 1) - - else: - return 1 - - else: - # Vectorized for manifold functions - return np.where(np.cos(phi) > .9, 1 - r_0*np.exp(-1*(.19)/(np.cos(phi)**2 - (.9)**2) + 1), - np.where(np.sin(phi)*np.cos(theta) > .9, 1 - r_0*np.exp(-1*(.19)/((np.sin(phi)*np.cos(theta))**2 - (.9)**2) + 1), 1)) - - - elif(Shape_Name == "Divets_Around_Pimple"): - if(isscalar(phi)): - if(abs(np.cos(phi)) > .9): - return 1 - r_0*np.exp(-1*(.19)/(np.cos(phi)**2 - (.9)**2) + 1) - - elif(np.sin(phi)*np.cos(theta) > .9): - return 1 + r_0*np.exp(-1*(.19)/((np.sin(phi)*np.cos(theta))**2 - (.9)**2) + 1) - - else: - return 1 - - else: - # Vectorized for manifold functions - return np.where(abs(np.cos(phi)) > .9, 1 - r_0*np.exp(-1*(.19)/(np.cos(phi)**2 - (.9)**2) + 1), - np.where(np.sin(phi)*np.cos(theta) > .9, 1 + r_0*np.exp(-1*(.19)/((np.sin(phi)*np.cos(theta))**2 - (.9)**2) + 1), 1)) - - - elif(Shape_Name == "Hour_Glass"): - # Note: (Previously) NOT A SPHERE for R_0 = 0 - return 1 - (6*r_0)*np.exp(-1.0/(1-np.cos(phi)**2)) # previously 2 + r_0, for r_0 = .4 - - - else: - print("\n"+"ERROR: Manifold Shape Name Not Recognized"+"\n") -""" diff --git a/src/napari_stress/_stress/euclidian_k_form_SPB.py b/src/napari_stress/_stress/euclidian_k_form_SPB.py index 32563cde..44ce1f91 100644 --- a/src/napari_stress/_stress/euclidian_k_form_SPB.py +++ b/src/napari_stress/_stress/euclidian_k_form_SPB.py @@ -38,7 +38,7 @@ def get_quadrature_points_from_sh_function(SPH_Func, lbdv, Chart): # Return list of quad vals at each quad pt within Chart (CHART A order) def Extract_dPhi_Quad_Pt_Vals_From_SPH_Fn(sph_func, lbdv, Chart): - Extracted_dPhi_Quad_Vals = zero_quad_array(lbdv.lbdv_quad_pts) + # Extracted_dPhi_Quad_Vals = zero_quad_array(lbdv.lbdv_quad_pts) quad_pts = range(lbdv.lbdv_quad_pts) @@ -63,7 +63,7 @@ def Extract_dPhi_Quad_Pt_Vals_From_SPH_Fn(sph_func, lbdv, Chart): # Return list of quad vals at each quad pt within Chart (CHART A order) def Extract_dPhi_Phi_Quad_Pt_Vals_From_SPH_Fn(sph_func, lbdv, Chart): - Extracted_dPhi_Phi_Quad_Vals = zero_quad_array(lbdv.lbdv_quad_pts) + # Extracted_dPhi_Phi_Quad_Vals = zero_quad_array(lbdv.lbdv_quad_pts) quad_pts = range(lbdv.lbdv_quad_pts) @@ -101,7 +101,7 @@ def Combine_Manny_Gauss_Curvatures(Manny, lbdv, verbose=False): K_cominbed_A_pts = Combine_Chart_Quad_Vals(Manny.K_A_pts, Manny.K_B_pts, lbdv) - if verbose == True: + if verbose is True: print( "test of int on manny of K (in euc_kf) = " + str( @@ -225,40 +225,40 @@ def Sharp(dtheta_A_vals, dphi_A_vals, dtheta_B_vals, dphi_B_vals, p_deg, lbdv, M return euc_k_form(1, lbdv.lbdv_quad_pts, p_deg, Manny, Sharped_Quad_Vals) -# Converts 2-forms from Polar to Euclidean at a Quad Pt -def Two_Form_Conv_to_Euc_pt(quad_pt, lbdv, Chart, Manny): +# # Converts 2-forms from Polar to Euclidean at a Quad Pt +# def Two_Form_Conv_to_Euc_pt(quad_pt, lbdv, Chart, Manny): - # BJG: NEED TO CHANGE TO TO SPB FRAME: - """ - x_pt, y_pt, z_pt = Manny.Cart_Coor(quad_pt, lbdv, Chart) - r_sq_pt = Manny.R_Sq_Val(quad_pt, Chart) +# # BJG: NEED TO CHANGE TO TO SPB FRAME: +# """ +# x_pt, y_pt, z_pt = Manny.Cart_Coor(quad_pt, lbdv, Chart) +# r_sq_pt = Manny.R_Sq_Val(quad_pt, Chart) - if(Chart == 'A'): +# if(Chart == 'A'): - denom_A = (r_pt**2)*np.sqrt(x_pt**2 + y_pt**2) +# denom_A = (r_pt**2)*np.sqrt(x_pt**2 + y_pt**2) - dx_dy_comp = -z_pt/denom_A - dx_dz_comp = y_pt/denom_A - dy_dz_comp = -x_pt/denom_A +# dx_dy_comp = -z_pt/denom_A +# dx_dz_comp = y_pt/denom_A +# dy_dz_comp = -x_pt/denom_A - return dx_dy_comp, dx_dz_comp, dy_dz_comp +# return dx_dy_comp, dx_dz_comp, dy_dz_comp - if(Chart == 'B'): +# if(Chart == 'B'): - denom_B = (r_pt**2)*np.sqrt(y_pt**2 + z_pt**2) +# denom_B = (r_pt**2)*np.sqrt(y_pt**2 + z_pt**2) - dx_dy_comp = -z_pt/denom_B - dx_dz_comp = y_pt/denom_B - dy_dz_comp = -x_pt/denom_B +# dx_dy_comp = -z_pt/denom_B +# dx_dz_comp = y_pt/denom_B +# dy_dz_comp = -x_pt/denom_B - return dx_dy_comp, dx_dz_comp, dy_dz_comp - """ +# return dx_dy_comp, dx_dz_comp, dy_dz_comp +# """ - dx_dy_comp = Polar_Two_Form_to_Euc_dx_dy(quad_pt, Chart) - dx_dz_comp = Polar_Two_Form_to_Euc_dx_dz(quad_pt, Chart) - dy_dz_comp = Polar_Two_Form_to_Euc_dy_dz(quad_pt, Chart) +# dx_dy_comp = Polar_Two_Form_to_Euc_dx_dy(quad_pt, Chart) +# dx_dz_comp = Polar_Two_Form_to_Euc_dx_dz(quad_pt, Chart) +# dy_dz_comp = Polar_Two_Form_to_Euc_dy_dz(quad_pt, Chart) - return dx_dy_comp, dx_dz_comp, dy_dz_comp +# return dx_dy_comp, dx_dz_comp, dy_dz_comp # Converts 2-forms from Euclidean to Polar at a Quad Pt @@ -273,7 +273,7 @@ def Two_Form_Conv_to_Polar_pt(quad_pt, lbdv, Manny, Chart): dx_dz_comp = [] dx_dy_comp = [] - if np.isscalar(quad_pt) == True: + if np.isscalar(quad_pt) is True: theta_pt = np.asscalar(theta_pt) phi_pt = np.asscalar(phi_pt) @@ -289,19 +289,16 @@ def Two_Form_Conv_to_Polar_pt(quad_pt, lbdv, Manny, Chart): dx_dy_comp = Cross_of_Basis[2] else: - # Cross_of_Basis = Manny.Normal_Dir_Quad_Pts(lbdv, Chart) #Manny.Normal_Dir(theta_pt, phi_pt, Chart) + # Cross_of_Basis = Manny.Normal_Dir_Quad_Pts(lbdv, Chart) + # Manny.Normal_Dir(theta_pt, phi_pt, Chart) # BJG: New Change: Cross_of_Basis = Manny.Normal_Dir(quad_pt, Chart) - # print("quad_pt = "+str(quad_pt)+", Cross_of_Basis.shape = "+str(Cross_of_Basis.shape)+", Cross_of_Basis = "+str(Cross_of_Basis)) - dy_dz_comp, neg_dx_dz_comp, dx_dy_comp = np.hsplit(Cross_of_Basis, 3) dx_dz_comp = -1.0 * neg_dx_dz_comp - # print("dy_dz_comp = "+str(dy_dz_comp)+"\n"+"dx_dz_comp = "+str(dx_dz_comp)+"\n"+"dx_dy_comp = "+str(dx_dy_comp)+"\n") - # dy_dz_comp = Cross_of_Basis[:, :, 0].T # dx_dz_comp = -1*Cross_of_Basis[:, :, 1].T # dx_dy_comp = Cross_of_Basis[:, :, 2].T @@ -327,10 +324,10 @@ def Gen_Curl_1( dTheta_phi_C_B_SPH_Fn = phi_C_B_SPH_Fn.Quick_Theta_Der() # project this azmuthal derivative into local chart ('A' because we dont rotate) - dTheta_phi_C_A_quad_pt_vals = Extract_Quad_Pt_Vals_From_SPH_Fn( + dTheta_phi_C_A_quad_pt_vals = get_quadrature_points_from_sh_function( dTheta_phi_C_A_SPH_Fn, lbdv, "A" ) - dTheta_phi_C_B_quad_pt_vals = Extract_Quad_Pt_Vals_From_SPH_Fn( + dTheta_phi_C_B_quad_pt_vals = get_quadrature_points_from_sh_function( dTheta_phi_C_B_SPH_Fn, lbdv, "A" ) @@ -344,7 +341,7 @@ def Gen_Curl_1( # inv metric factor, at each point, within Chart: q_val = lbdv.lbdv_quad_pts - quad_pts = range(q_val) + # quad_pts = range(q_val) inv_met_fac_A_pts = np.where( lbdv.Chart_of_Quad_Pts > 0, 1.0 / Manny.Metric_Factor_A_pts, 0 @@ -417,23 +414,26 @@ def Integral_on_Manny(vals_at_quad_pts, Manny, lbdv): # New Version met_fac_over_sin_phi_pts_A = Manny.Metric_Factor_A_over_sin_phi_pts met_fac_over_sin_phi_pts_B = Manny.Metric_Factor_B_over_sin_phi_bar_pts - num_quad_pts = lbdv.lbdv_quad_pts + # num_quad_pts = lbdv.lbdv_quad_pts # We need to rotate integrand into chart B - rotated_vals_at_quad_pts = np.zeros((num_quad_pts, 1)) + # rotated_vals_at_quad_pts = np.zeros((num_quad_pts, 1)) - quad_pts = range(num_quad_pts) - quad_pts_rot = lbdv.Eval_Rot_Lbdv_Quad_vals( - quad_pts - ) # lbdv.Eval_Rot_Lbdv_Quad_vals(quad_pts) - rotated_vals_at_quad_pts = vals_at_quad_pts[quad_pts_rot] + # quad_pts = range(num_quad_pts) + # quad_pts_rot = lbdv.Eval_Rot_Lbdv_Quad_vals( + # quad_pts + # ) # lbdv.Eval_Rot_Lbdv_Quad_vals(quad_pts) + # # rotated_vals_at_quad_pts = vals_at_quad_pts[quad_pts_rot] + # Combine_Chart_Quad_Vals(np.multiply(met_fac_over_sin_phi_pts_A, vals_at_quad_pts), + # np.multiply(met_fac_over_sin_phi_pts_B, rotated_vals_at_quad_pts), + # lbdv) Jacobian_Integrand = np.multiply( Combine_Chart_Quad_Vals( met_fac_over_sin_phi_pts_A, met_fac_over_sin_phi_pts_B, lbdv ), vals_at_quad_pts, - ) # Combine_Chart_Quad_Vals(np.multiply(met_fac_over_sin_phi_pts_A, vals_at_quad_pts), np.multiply(met_fac_over_sin_phi_pts_B, rotated_vals_at_quad_pts), lbdv) + ) Jacobian_Int_On_Manny = sph_f.S2_Integral(Jacobian_Integrand, lbdv) @@ -454,8 +454,8 @@ def Lp_Rel_Error_At_Quad_On_Manny( approx_f_vals, f_vals, lbdv, p, Manny ): # Assumes f NOT 0 - Lp_Err = 0 # ||self - f||_p - Lp_f = 0 # || f ||_p + # Lp_Err = 0 # ||self - f||_p + # Lp_f = 0 # || f ||_p pointwise_errs_to_the_p = abs((approx_f_vals - f_vals) ** p) @@ -528,41 +528,6 @@ def Tangent_Projection(Vectors_at_Quad, lbdv, Manny): return Tangent_Vecs_at_Quad -""" -# Uses above code to get Normal direction in both charts: -def Normal_Dirs_Manny(lbdv, Manny): - - num_quad_pts = lbdv.lbdv_quad_pts - Normal_Dirs_at_Quad = np.zeros(( num_quad_pts, 3 )) - - sigma_theta_A_pts = Manny.Sigma_Theta_A_Pts - sigma_theta_B_pts = Manny.Sigma_Theta_B_Pts - - sigma_phi_A_pts = Manny.Sigma_Phi_A_Pts - sigma_phi_B_pts = Manny.Sigma_Phi_B_Pts - - - for quad_pt in range(num_quad_pts): - - Normal_Vec_pt = [] - - # Chart A: - if(lbdv.Chart_of_Quad_Pts[quad_pt] > 0): - - Normal_Vec_pt = np.cross(sigma_theta_A_pts[quad_pt, :].flatten(), sigma_phi_A_pts[quad_pt, :].flatten()) - - # Chart B: - else: - quad_pt_rot = lbdv.Eval_Rot_Lbdv_Quad_vals(quad_pt) - Normal_Vec_pt = np.cross(sigma_theta_B_pts[quad_pt_rot, :].flatten(), sigma_phi_B_pts[quad_pt_rot, :].flatten()) - - Normal_Vec_pt_Mag = np.sqrt( np.dot(Normal_Vec_pt, Normal_Vec_pt) ) - - Normal_Dirs_at_Quad[quad_pt, :] = Normal_Vec_pt.reshape(1,3)/Normal_Vec_pt_Mag - - return Normal_Dirs_at_Quad #np.hstack(( lbdv.X, lbdv.Y, lbdv.Z )) -""" - # Returns Riemannian Inner Product of 1-Forms input, returns Integral of def Riemann_L2_Inner_Product_One_Form(One_Form_pts_1, One_Form_pts_2, lbdv, manny): @@ -662,7 +627,8 @@ def times_fn(self, fn_vals_at_quad_pts): # def Flat(self, deriv, Matrix_Fn_Quad_pt, lbdv): def Flat(self, G_deriv, A_inv_deriv, lbdv): # Deriv = '', for G, 'theta' to use G_theta, 'phi' to use G_phi - # Matrix_Fn_Quad_pt is a 3x3 matrix function of the quad_pt, Chart (Could be eye(3)) + # Matrix_Fn_Quad_pt is a 3x3 matrix function of the quad_pt, + # Chart (Could be eye(3)) # Store the value of one_form comps, before combining: One_Form_Theta_Comp_A_vals = zero_quad_array(self.Q_value) @@ -788,7 +754,8 @@ def Ext_Der(self, lbdv): 0, ) - # Use sharp function above to convert 1-form components to euclidean vector field + # Use sharp function above to convert 1-form + # components to euclidean vector field return Sharp( f_dtheta_A_quad_vals, f_dphi_A_quad_vals, @@ -852,28 +819,28 @@ def Ext_Der(self, lbdv): # print("d_1: finished projections") # Quad Vals of theta derivatives functions for alpha_sharp_X comps: - alpha_sharp_x_A_dtheta_vals = Extract_Quad_Pt_Vals_From_SPH_Fn( + alpha_sharp_x_A_dtheta_vals = get_quadrature_points_from_sh_function( alpha_sharp_x_SPH_A_dtheta, lbdv, "A" ) - alpha_sharp_x_B_dtheta_vals = Extract_Quad_Pt_Vals_From_SPH_Fn( + alpha_sharp_x_B_dtheta_vals = get_quadrature_points_from_sh_function( alpha_sharp_x_SPH_B_dtheta, lbdv, "B" ) - alpha_sharp_y_A_dtheta_vals = Extract_Quad_Pt_Vals_From_SPH_Fn( + alpha_sharp_y_A_dtheta_vals = get_quadrature_points_from_sh_function( alpha_sharp_y_SPH_A_dtheta, lbdv, "A" ) - alpha_sharp_y_B_dtheta_vals = Extract_Quad_Pt_Vals_From_SPH_Fn( + alpha_sharp_y_B_dtheta_vals = get_quadrature_points_from_sh_function( alpha_sharp_y_SPH_B_dtheta, lbdv, "B" ) - alpha_sharp_z_A_dtheta_vals = Extract_Quad_Pt_Vals_From_SPH_Fn( + alpha_sharp_z_A_dtheta_vals = get_quadrature_points_from_sh_function( alpha_sharp_z_SPH_A_dtheta, lbdv, "A" ) - alpha_sharp_z_B_dtheta_vals = Extract_Quad_Pt_Vals_From_SPH_Fn( + alpha_sharp_z_B_dtheta_vals = get_quadrature_points_from_sh_function( alpha_sharp_z_SPH_B_dtheta, lbdv, "B" ) - # Quad Vals of phi derivatives functions for alpha_sharp_X comps (within Charts): + # Quad Vals of phi derivatives functions for alpha_sharp_X comps: alpha_sharp_x_A_dphi_vals = Extract_dPhi_Quad_Pt_Vals_From_SPH_Fn( alpha_sharp_x_SPH_A, lbdv, "A" ) @@ -973,7 +940,10 @@ def Y_mat(q_pt, Chart, deriv): return np.linalg.solve(Basis_Mat.T, -1 * dBasis_Mat_deriv.T).T # Values of components, in each chart: - # alpha_Sharp_X.Flat() gives us: (alpha_theta_comp_A_vals, alpha_theta_comp_B_vals, alpha_phi_comp_A_vals, alpha_phi_comp_B_vals) + # alpha_Sharp_X.Flat() gives us: (alpha_theta_comp_A_vals, + # alpha_theta_comp_B_vals, + # alpha_phi_comp_A_vals, + # alpha_phi_comp_B_vals) # print("d_1: created needed 1-forms vecs") @@ -1104,26 +1074,20 @@ def Hodge_Star(self, lbdv): quad_pts = range(self.Q_value) quad_pts_inv_rot = lbdv.Eval_Inv_Rot_Lbdv_Quad_vals(quad_pts) - # values at (theta_C, phi_C) = (theta_i, phi_i), given q_i, Difffernt points on manifold! + # values at (theta_C, phi_C) = (theta_i, phi_i), + # given q_i, Difffernt points on manifold! f_A_pts = self.quad_pt_array[quad_pts] f_B_pts = self.quad_pt_array[quad_pts_inv_rot] # metric factor, at each point, within Chart - met_fac_A_pts = ( - self.Manifold.Metric_Factor_A_pts - ) # np.where(lbdv.Chart_of_Quad_Pts > 0, self.Manifold.Metric_Factor_Quad_Pt(quad_pts, lbdv, 'A'), 0) - met_fac_B_pts = ( - self.Manifold.Metric_Factor_B_pts - ) # np.where(lbdv.Chart_of_Quad_Pts > 0, self.Manifold.Metric_Factor_Quad_Pt(quad_pts, lbdv, 'B'), 0) + met_fac_A_pts = self.Manifold.Metric_Factor_A_pts + + met_fac_B_pts = self.Manifold.Metric_Factor_B_pts # Do Hodge Star: star_f_dtheta_dphi_A_pts = np.multiply(f_A_pts, met_fac_A_pts) star_f_dtheta_dphi_B_pts = np.multiply(f_B_pts, met_fac_B_pts) - # Convert to Euclidean Coors: - # dx_dy_A_vals, dx_dz_A_vals, dy_dz_A_vals = Two_Form_Conv_to_Euc_pt(quad_pts, lbdv, 'A', self.Manifold) - # dx_dy_B_vals, dx_dz_B_vals, dy_dz_B_vals = Two_Form_Conv_to_Euc_pt(quad_pts, lbdv, 'B', self.Manifold) - dx_dy_A_vals = self.Manifold.dx_dy_A_Vals_From_Polar dx_dz_A_vals = self.Manifold.dx_dz_A_Vals_From_Polar dy_dz_A_vals = self.Manifold.dy_dz_A_Vals_From_Polar @@ -1245,13 +1209,7 @@ def Hodge_Star(self, lbdv): quad_pts = range(self.Q_value) quad_pts_inv_rot = lbdv.Eval_Inv_Rot_Lbdv_Quad_vals(quad_pts) - # inv metric factor, at each point, within Chart - - # inv_met_fac_A_pts = np.where(lbdv.Chart_of_Quad_Pts > 0, 1/self.Manifold.Metric_Factor_Quad_Pt(quad_pts, lbdv, 'A'), 0) - # inv_met_fac_B_pts = np.where(lbdv.Chart_of_Quad_Pts > 0, 1/self.Manifold.Metric_Factor_Quad_Pt(quad_pts, lbdv, 'B'), 0) - # BJG: new change: - inv_met_fac_A_pts = np.where( lbdv.Chart_of_Quad_Pts > 0, 1.0 / self.Manifold.Metric_Factor_A_pts, 0 ) @@ -1259,7 +1217,8 @@ def Hodge_Star(self, lbdv): lbdv.Chart_of_Quad_Pts > 0, 1.0 / self.Manifold.Metric_Factor_B_pts, 0 ) - # values at (theta_C, phi_C) = (theta_i, phi_i), given q_i, Difffernt points on manifold! + # values at (theta_C, phi_C) = (theta_i, phi_i), + # given q_i, Difffernt points on manifold! beta_dx_dy_A_pts, beta_dx_dz_A_pts, beta_dy_dz_A_pts = np.hsplit( self.quad_pt_array[quad_pts], 3 ) @@ -1406,7 +1365,13 @@ def Gen_Curl_0(self, lbdv): neg_star_df_B_theta_pts, neg_star_df_B_phi_pts, ) - # Sharp(star_alpha_A_theta_pts, star_alpha_A_phi_pts, star_alpha_B_theta_pts, star_alpha_B_phi_pts, self.P_degree, lbdv, self.Manifold) + # Sharp(star_alpha_A_theta_pts, + # star_alpha_A_phi_pts, + # star_alpha_B_theta_pts, + # star_alpha_B_phi_pts, + # self.P_degree, + # lbdv, + # self.Manifold) else: print("Error: Zero Forms needed for this inner-product fn to work") @@ -1496,9 +1461,7 @@ def Gen_Curl_to_K_Form(self, lbdv): ) else: - print( - "Error: Zero Forms needed for this to work (Will implement 1-forms next)" - ) + print("Error: Zero Forms needed for this to work ") # div(v) = -\delta v^{\flat} def Divergence_1_Form(self, lbdv, debug_mode=False): @@ -1510,7 +1473,7 @@ def Divergence_1_Form(self, lbdv, debug_mode=False): ) Vec_X_SPH_Fn_dTheta_A = Vec_X_SPH_Fn_A.Quick_Theta_Der() - Vec_X_A_dTheta_quad_vals = Extract_Quad_Pt_Vals_From_SPH_Fn( + Vec_X_A_dTheta_quad_vals = get_quadrature_points_from_sh_function( Vec_X_SPH_Fn_dTheta_A, lbdv, "A" ) Vec_X_A_dPhi_quad_vals = Extract_dPhi_Quad_Pt_Vals_From_SPH_Fn( @@ -1521,10 +1484,10 @@ def Divergence_1_Form(self, lbdv, debug_mode=False): quad_pts_inv_rot = lbdv.Eval_Inv_Rot_Lbdv_Quad_vals( range(self.Manifold.num_quad_pts) ) - Vec_X_SPH_Fn_A_quad_pts_from_SPH = Extract_Quad_Pt_Vals_From_SPH_Fn( + Vec_X_SPH_Fn_A_quad_pts_from_SPH = get_quadrature_points_from_sh_function( Vec_X_SPH_Fn_A, lbdv, "A" ) - Vec_X_SPH_Fn_B_quad_pts_from_SPH = Extract_Quad_Pt_Vals_From_SPH_Fn( + Vec_X_SPH_Fn_B_quad_pts_from_SPH = get_quadrature_points_from_sh_function( Vec_X_SPH_Fn_B, lbdv, "A" ) norm_err_Vec_X_A = sph_f.Lp_Rel_Error_At_Quad( @@ -1538,7 +1501,7 @@ def Divergence_1_Form(self, lbdv, debug_mode=False): ) if ( norm_err_Vec_X_A > 1.0e-3 or norm_err_Vec_X_A > 1.0e-3 - ) and debug_mode == True: + ) and debug_mode is True: print( "norm_err_Vec_X_A test failed, norm_err_Vec_X_A = " + str(norm_err_Vec_X_A) @@ -1549,7 +1512,7 @@ def Divergence_1_Form(self, lbdv, debug_mode=False): ) Vec_X_SPH_Fn_dTheta_B = Vec_X_SPH_Fn_B.Quick_Theta_Der() - Vec_X_B_dTheta_quad_vals = Extract_Quad_Pt_Vals_From_SPH_Fn( + Vec_X_B_dTheta_quad_vals = get_quadrature_points_from_sh_function( Vec_X_SPH_Fn_dTheta_B, lbdv, "A" ) Vec_X_B_dPhi_quad_vals = Extract_dPhi_Quad_Pt_Vals_From_SPH_Fn( @@ -1562,7 +1525,7 @@ def Divergence_1_Form(self, lbdv, debug_mode=False): ) Vec_Y_SPH_Fn_dTheta_A = Vec_Y_SPH_Fn_A.Quick_Theta_Der() - Vec_Y_A_dTheta_quad_vals = Extract_Quad_Pt_Vals_From_SPH_Fn( + Vec_Y_A_dTheta_quad_vals = get_quadrature_points_from_sh_function( Vec_Y_SPH_Fn_dTheta_A, lbdv, "A" ) Vec_Y_A_dPhi_quad_vals = Extract_dPhi_Quad_Pt_Vals_From_SPH_Fn( @@ -1570,7 +1533,7 @@ def Divergence_1_Form(self, lbdv, debug_mode=False): ) Vec_Y_SPH_Fn_dTheta_B = Vec_Y_SPH_Fn_B.Quick_Theta_Der() - Vec_Y_B_dTheta_quad_vals = Extract_Quad_Pt_Vals_From_SPH_Fn( + Vec_Y_B_dTheta_quad_vals = get_quadrature_points_from_sh_function( Vec_Y_SPH_Fn_dTheta_B, lbdv, "A" ) Vec_Y_B_dPhi_quad_vals = Extract_dPhi_Quad_Pt_Vals_From_SPH_Fn( @@ -1581,10 +1544,10 @@ def Divergence_1_Form(self, lbdv, debug_mode=False): quad_pts_inv_rot = lbdv.Eval_Inv_Rot_Lbdv_Quad_vals( range(self.Manifold.num_quad_pts) ) - Vec_Y_SPH_Fn_A_quad_pts_from_SPH = Extract_Quad_Pt_Vals_From_SPH_Fn( + Vec_Y_SPH_Fn_A_quad_pts_from_SPH = get_quadrature_points_from_sh_function( Vec_Y_SPH_Fn_A, lbdv, "A" ) - Vec_Y_SPH_Fn_B_quad_pts_from_SPH = Extract_Quad_Pt_Vals_From_SPH_Fn( + Vec_Y_SPH_Fn_B_quad_pts_from_SPH = get_quadrature_points_from_sh_function( Vec_Y_SPH_Fn_B, lbdv, "A" ) norm_err_Vec_Y_A = sph_f.Lp_Rel_Error_At_Quad( @@ -1598,7 +1561,7 @@ def Divergence_1_Form(self, lbdv, debug_mode=False): ) if ( norm_err_Vec_Y_A > 1.0e-3 or norm_err_Vec_Y_A > 1.0e-3 - ) and debug_mode == True: + ) and debug_mode is True: print( "norm_err_Vec_Y_A test failed, norm_err_Vec_Y_A = " + str(norm_err_Vec_Y_A) @@ -1614,7 +1577,7 @@ def Divergence_1_Form(self, lbdv, debug_mode=False): ) Vec_Z_SPH_Fn_dTheta_A = Vec_Z_SPH_Fn_A.Quick_Theta_Der() - Vec_Z_A_dTheta_quad_vals = Extract_Quad_Pt_Vals_From_SPH_Fn( + Vec_Z_A_dTheta_quad_vals = get_quadrature_points_from_sh_function( Vec_Z_SPH_Fn_dTheta_A, lbdv, "A" ) Vec_Z_A_dPhi_quad_vals = Extract_dPhi_Quad_Pt_Vals_From_SPH_Fn( @@ -1622,7 +1585,7 @@ def Divergence_1_Form(self, lbdv, debug_mode=False): ) Vec_Z_SPH_Fn_dTheta_B = Vec_Z_SPH_Fn_B.Quick_Theta_Der() - Vec_Z_B_dTheta_quad_vals = Extract_Quad_Pt_Vals_From_SPH_Fn( + Vec_Z_B_dTheta_quad_vals = get_quadrature_points_from_sh_function( Vec_Z_SPH_Fn_dTheta_B, lbdv, "A" ) Vec_Z_B_dPhi_quad_vals = Extract_dPhi_Quad_Pt_Vals_From_SPH_Fn( @@ -1633,10 +1596,10 @@ def Divergence_1_Form(self, lbdv, debug_mode=False): quad_pts_inv_rot = lbdv.Eval_Inv_Rot_Lbdv_Quad_vals( range(self.Manifold.num_quad_pts) ) - Vec_Z_SPH_Fn_A_quad_pts_from_SPH = Extract_Quad_Pt_Vals_From_SPH_Fn( + Vec_Z_SPH_Fn_A_quad_pts_from_SPH = get_quadrature_points_from_sh_function( Vec_Z_SPH_Fn_A, lbdv, "A" ) - Vec_Z_SPH_Fn_B_quad_pts_from_SPH = Extract_Quad_Pt_Vals_From_SPH_Fn( + Vec_Z_SPH_Fn_B_quad_pts_from_SPH = get_quadrature_points_from_sh_function( Vec_Z_SPH_Fn_B, lbdv, "A" ) norm_err_Vec_Z_A = sph_f.Lp_Rel_Error_At_Quad( @@ -1650,7 +1613,7 @@ def Divergence_1_Form(self, lbdv, debug_mode=False): ) if ( norm_err_Vec_Z_A > 1.0e-3 or norm_err_Vec_Z_A > 1.0e-3 - ) and debug_mode == True: + ) and debug_mode is True: print( "norm_err_Vec_Z_A test failed, norm_err_Vec_Z_A = " + str(norm_err_Vec_Z_A) @@ -1719,10 +1682,6 @@ def Divergence_1_Form(self, lbdv, debug_mode=False): V_local_coors_theta_dtheta_V_B = np.zeros((self.Q_value, 1)) V_local_coors_phi_dphi_V_B = np.zeros((self.Q_value, 1)) - manny_MF_tol = self.Manifold.Tol - met_fac_A_pts = self.Manifold.Metric_Factor_A_pts - met_fac_B_pts = self.Manifold.Metric_Factor_B_pts - # for testing A^-1: g_inv_theta_theta_A_pts = self.Manifold.g_Inv_Theta_Theta_A_Pts g_inv_theta_phi_A_pts = self.Manifold.g_Inv_Theta_Phi_A_Pts @@ -1745,7 +1704,6 @@ def Divergence_1_Form(self, lbdv, debug_mode=False): # Change of Basis mats A_Mat_pt = self.Manifold.Change_Basis_Mat(quad_pt, "A") - # print("det(A_Mat_pt) = "+str( np.linalg.det(A_Mat_pt) )+", met_fac_A_pts[quad_pt] = "+str(met_fac_A_pts[quad_pt]) +", A_Mat_pt = "+str(A_Mat_pt) ) A_theta_Mat_pt = self.Manifold.dChange_Basis_Mat_theta(quad_pt, "A") A_phi_Mat_pt = self.Manifold.dChange_Basis_Mat_phi(quad_pt, "A") @@ -1760,7 +1718,7 @@ def Divergence_1_Form(self, lbdv, debug_mode=False): Id_test = np.dot(A_inv_from_G_pt, A_Mat_pt) if ( np.linalg.norm(Id_test - np.eye(3), 2) > 1.0e-8 - and debug_mode == True + and debug_mode is True ): print( "A_inv test failed at pt: " @@ -1774,14 +1732,6 @@ def Divergence_1_Form(self, lbdv, debug_mode=False): A_inv_from_G_pt, Vec_X_A_pt ) # np.linalg.solve(A_Mat_pt, Vec_X_A_pt) - """ - # test new A^-1: - Vec_X_local_coors_A_test_pt = np.dot(A_inv_from_G_pt, Vec_X_A_pt) - norm_err_A = np.linalg.norm(Vec_X_local_coors_A_pt - Vec_X_local_coors_A_test_pt , 2) - if( norm_err_A > 1.e-8 and debug_mode == True): - print("A_inv test failed at pt: "+str(quad_pt)+", norm_err_A = "+str(norm_err_A)) - """ - V_local_coors_theta_A[quad_pt, 0] = Vec_X_local_coors_A_pt[0] V_local_coors_phi_A[quad_pt, 0] = Vec_X_local_coors_A_pt[1] @@ -1838,7 +1788,7 @@ def Divergence_1_Form(self, lbdv, debug_mode=False): Id_test_B = np.dot(B_inv_from_G_pt, B_Mat_pt) if ( np.linalg.norm(Id_test_B - np.eye(3), 2) > 1.0e-8 - and debug_mode == True + and debug_mode is True ): print("B_inv test failed at pt: " + str(quad_pt)) All_B_inv_successful = False @@ -1930,7 +1880,7 @@ def Divergence_1_Form(self, lbdv, debug_mode=False): Debug_Dict["div_V_A_pts"] = div_V_A_pts Debug_Dict["div_V_B_pts"] = div_V_B_pts - if debug_mode == True: + if debug_mode is True: print("\n" + "All_A_inv_successful = " + str(All_A_inv_successful)) print("All_B_inv_successful = " + str(All_B_inv_successful) + "\n") return Debug_Dict @@ -1952,7 +1902,7 @@ def Explicit_LB(self, lbdv): Y_mn_SPH_Fn_dTheta_A = Y_mn_SPH_Fn_A.Quick_Theta_Der() Y_mn_SPH_Fn_dTheta_Theta_A = Y_mn_SPH_Fn_A.Quick_Theta_Der().Quick_Theta_Der() - Y_mn_A_dTheta_quad_vals = Extract_Quad_Pt_Vals_From_SPH_Fn( + Y_mn_A_dTheta_quad_vals = get_quadrature_points_from_sh_function( Y_mn_SPH_Fn_dTheta_A, lbdv, "A" ) Y_mn_A_dPhi_quad_vals = Extract_dPhi_Quad_Pt_Vals_From_SPH_Fn( @@ -1961,7 +1911,7 @@ def Explicit_LB(self, lbdv): Y_mn_A_dTheta_dPhi_quad_vals = Extract_dPhi_Quad_Pt_Vals_From_SPH_Fn( Y_mn_SPH_Fn_dTheta_A, lbdv, "A" ) - Y_mn_A_dTheta_dTheta_quad_vals = Extract_Quad_Pt_Vals_From_SPH_Fn( + Y_mn_A_dTheta_dTheta_quad_vals = get_quadrature_points_from_sh_function( Y_mn_SPH_Fn_dTheta_Theta_A, lbdv, "A" ) Y_mn_A_dPhi_dPhi_quad_vals = Extract_dPhi_Phi_Quad_Pt_Vals_From_SPH_Fn( @@ -1971,7 +1921,7 @@ def Explicit_LB(self, lbdv): Y_mn_SPH_Fn_dTheta_B = Y_mn_SPH_Fn_B.Quick_Theta_Der() Y_mn_SPH_Fn_dTheta_Theta_B = Y_mn_SPH_Fn_B.Quick_Theta_Der().Quick_Theta_Der() - Y_mn_B_dTheta_quad_vals = Extract_Quad_Pt_Vals_From_SPH_Fn( + Y_mn_B_dTheta_quad_vals = get_quadrature_points_from_sh_function( Y_mn_SPH_Fn_dTheta_B, lbdv, "A" ) Y_mn_B_dPhi_quad_vals = Extract_dPhi_Quad_Pt_Vals_From_SPH_Fn( @@ -1980,7 +1930,7 @@ def Explicit_LB(self, lbdv): Y_mn_B_dTheta_dPhi_quad_vals = Extract_dPhi_Quad_Pt_Vals_From_SPH_Fn( Y_mn_SPH_Fn_dTheta_B, lbdv, "A" ) - Y_mn_B_dTheta_dTheta_quad_vals = Extract_Quad_Pt_Vals_From_SPH_Fn( + Y_mn_B_dTheta_dTheta_quad_vals = get_quadrature_points_from_sh_function( Y_mn_SPH_Fn_dTheta_Theta_B, lbdv, "A" ) Y_mn_B_dPhi_dPhi_quad_vals = Extract_dPhi_Phi_Quad_Pt_Vals_From_SPH_Fn( @@ -2026,7 +1976,9 @@ def Explicit_LB(self, lbdv): ) return euc_k_form(0, self.Q_value, self.P_degree, self.Manifold, LB_quad_pts) - # Uses curl operators to compute LB of Zero Forms, to test whether we get better convergence this way (by ignoring intermediate k-form stuctures) + # Uses curl operators to compute LB of Zero Forms, to test whether + # we get better convergence this way + # (by ignoring intermediate k-form stuctures) def LB_Zero_Form_From_Curl(self, lbdv): if self.k_value != 0: @@ -2052,15 +2004,6 @@ def LB_Zero_Form_From_Curl(self, lbdv): return neg_LB_f_0_Form.times_const(-1) - """ - # Compute Co-Differential: k_form -> (k-1)_form - def Co_Diff(self, lbdv): - n = 2 #2-D manifold - k = self.k_value - - return self.Hodge_Star(lbdv).Ext_Der(lbdv).Hodge_Star(lbdv).times_const((-1)**(n*(k+1)+1)) #Multiply by - 1, result is (k-1)-form - """ - # LB: k_form -> k_form (d\delta + \delta d) = (d + \delta)^2 def Laplace_Beltrami(self, lbdv): if self.k_value != 2: @@ -2071,13 +2014,14 @@ def Laplace_Beltrami(self, lbdv): if self.k_value == 1: Second_Term = self.Co_Diff(lbdv).Ext_Der(lbdv) # zero for K==0 - return linear_comb(First_Term, Second_Term, 1, 1) + return self.linear_comb(First_Term, Second_Term, 1, 1) if self.k_value == 2: Second_Term = self.Co_Diff(lbdv).Ext_Der(lbdv) return Second_Term - # Returns Riemannian Inner Product of 1-Forms input, returns vector of scalar inner-products at each point + # Returns Riemannian Inner Product of 1-Forms input, + # returns vector of scalar inner-products at each point def Riemann_Inner_Prouct_One_Form(self, One_Form_2, lbdv): # if we have faulty input @@ -2087,65 +2031,6 @@ def Riemann_Inner_Prouct_One_Form(self, One_Form_2, lbdv): else: Inner_Product_One_Form_Vals_pts = np.zeros((self.Q_value, 1)) - - """ - # Hard to vectorize matrix solves - for quad_pt in range(self.Q_value): - - # If Chart A: - if(lbdv.Chart_of_Quad_Pts[quad_pt] > 0): - - alpha_1_sharp_X_A_pt = self.quad_pt_array[quad_pt, :].T - alpha_2_sharp_X_A_pt = One_Form_2.quad_pt_array[quad_pt, :].T - - Flat_A_mat = self.Manifold.rho_A_Mats[:, :, quad_pt] - - one_form_comps_A_pt = np.dot(Flat_A_mat, alpha_1_sharp_X_A_pt) - - alpha_1_flat_theta_A_pt = one_form_comps_A_pt[0] - alpha_1_flat_phi_A_pt = one_form_comps_A_pt[1] - - A_Mat_pt = self.Manifold.Change_Basis_Mat(quad_pt, lbdv, 'A') - alpha_2_sharp_A_pt = np.linalg.solve(A_Mat_pt, alpha_2_sharp_X_A_pt) - - alpha_2_sharp_theta_A_pt = alpha_2_sharp_A_pt[0] - alpha_2_sharp_phi_A_pt = alpha_2_sharp_A_pt[1] - - #I_A_mat_pt = self.Manifold.I_Mat_Quad_Pt(quad_pt, lbdv, 'A') - - Inner_Product_A_pt = alpha_2_sharp_theta_A_pt*alpha_1_flat_theta_A_pt + alpha_2_sharp_phi_A_pt*alpha_1_flat_phi_A_pt - - Inner_Product_One_Form_Vals_pts[quad_pt, 0] = Inner_Product_A_pt - - # We fill in other pts with Chart B: - else: - - quad_pt_rot = lbdv.Eval_Rot_Lbdv_Quad_vals(quad_pt) - - #same index because we more efficiently split the computation, to avoid overlap in calculation - alpha_1_sharp_X_B_pt = self.quad_pt_array[quad_pt, :].T - alpha_2_sharp_X_B_pt = One_Form_2.quad_pt_array[quad_pt, :].T - - Flat_B_mat = self.Manifold.rho_B_Mats[:, :, quad_pt_rot] - - one_form_comps_B_pt = np.dot(Flat_B_mat, alpha_1_sharp_X_B_pt) - - alpha_1_flat_theta_B_pt = one_form_comps_B_pt[0] - alpha_1_flat_phi_B_pt = one_form_comps_B_pt[1] - - B_Mat_pt = self.Manifold.Change_Basis_Mat(quad_pt_rot, lbdv, 'B') - alpha_2_sharp_B_pt = np.linalg.solve(B_Mat_pt, alpha_2_sharp_X_B_pt) - - alpha_2_sharp_theta_B_pt = alpha_2_sharp_B_pt[0] - alpha_2_sharp_phi_B_pt = alpha_2_sharp_B_pt[1] - - #I_B_mat_pt = self.Manifold.I_Mat_Quad_Pt(quad_pt_rot, lbdv, 'B') - - Inner_Product_B_pt = alpha_2_sharp_theta_B_pt*alpha_1_flat_theta_B_pt + alpha_2_sharp_phi_B_pt*alpha_1_flat_phi_B_pt - - Inner_Product_One_Form_Vals_pts[quad_pt, 0] = Inner_Product_B_pt - """ - Inner_Product_One_Form_Vals_pts = ( np.dot(self.quad_pt_array, One_Form_2.quad_pt_array.T) .diagonal() @@ -2154,19 +2039,15 @@ def Riemann_Inner_Prouct_One_Form(self, One_Form_2, lbdv): return Inner_Product_One_Form_Vals_pts - # Returns Riemannian Inner Product of 2-Forms with ITSELF input, returns vector of scalar inner-products at each point + # Returns Riemannian Inner Product of 2-Forms with ITSELF input, + # returns vector of scalar inner-products at each point def Riemann_Self_Inner_Prouct_Two_Form(self, lbdv): # Corresponding quad pt in chart B (in Chart A Coors) quad_pts = range(self.Q_value) quad_pts_inv_rot = lbdv.Eval_Inv_Rot_Lbdv_Quad_vals(quad_pts) - # inv metric factor, at each point, within Chart - # inv_met_fac_A_pts = np.where(lbdv.Chart_of_Quad_Pts > 0, 1/self.Manifold.Metric_Factor_Quad_Pt(quad_pts, lbdv, 'A'), 0) - # inv_met_fac_B_pts = np.where(lbdv.Chart_of_Quad_Pts > 0, 1/self.Manifold.Metric_Factor_Quad_Pt(quad_pts, lbdv, 'B'), 0) - # BJG: New Change: - inv_met_fac_A_pts = np.where( lbdv.Chart_of_Quad_Pts > 0, 1.0 / self.Manifold.Metric_Factor_A_pts, 0 ) @@ -2174,7 +2055,8 @@ def Riemann_Self_Inner_Prouct_Two_Form(self, lbdv): lbdv.Chart_of_Quad_Pts > 0, 1.0 / self.Manifold.Metric_Factor_B_pts, 0 ) - # values at (theta_C, phi_C) = (theta_i, phi_i), given q_i, Difffernt points on manifold! + # values at (theta_C, phi_C) = (theta_i, phi_i), + # given q_i, Difffernt points on manifold! beta_dx_dy_A_pts, beta_dx_dz_A_pts, beta_dy_dz_A_pts = np.hsplit( self.quad_pt_array[quad_pts], 3 ) diff --git a/src/napari_stress/_stress/lebedev_info_SPB.py b/src/napari_stress/_stress/lebedev_info_SPB.py index 1047d082..8b5e1a5f 100644 --- a/src/napari_stress/_stress/lebedev_info_SPB.py +++ b/src/napari_stress/_stress/lebedev_info_SPB.py @@ -1,21 +1,19 @@ # From https://github.com/campaslab/STRESS #! This class Generates All of the quadrature information that sph_func, k_form use -from numpy import * -from scipy import * +import numpy as np import mpmath -import cmath from scipy.special import sph_harm -from .lebedev_write_SPB import * # lists all Lebdv quadratures -from .charts_SPB import * # For Lebedev Point Conversion +from .lebedev_write_SPB import Lebedev # lists all Lebdv quadratures +from .charts_SPB import eta_A, Cart_To_Coor_A, Domain # for pickling: # import cPickle as pkl # BJG: py2pt7 version import pickle as pkl -import os, sys +import os -# Allows us to find appropriate quadrature: +# Allows us to find appropriate quadrature:s quad_deg_lookUp = { 6: 2, 14: 3, @@ -104,7 +102,7 @@ def get_quad_degree(quad_pts): return quad_deg_lookUp[quad_pts] -####################################################################################################################### +####################################################################################### def Eval_SPH_Basis(M_Coef, N_Coef, Theta, Phi): @@ -121,15 +119,15 @@ def Der_Phi_Basis_Fn(M_Coef, N_Coef, Theta, Phi): # M_Coef < 0 Corresonds to Z^ Der_Phi_Val = [] # For Scalar Case, we use usual vectorization: - if isscalar(Theta): + if np.isscalar(Theta): # COPIED FROM SPH_DER_PHI_FN Der_Phi_Val = 0 if M_Coef == 0: # No Cotangent terms: if N_Coef > 0: return ( - sqrt((N_Coef) * (N_Coef + 1)) - * (((e ** (-1j * Theta)) * sph_harm(1, N_Coef, Theta, Phi))).real + np.sqrt((N_Coef) * (N_Coef + 1)) + * (((np.e ** (-1j * Theta)) * sph_harm(1, N_Coef, Theta, Phi))).real ) else: return 0 # d_phi Y^0_0 = 0 @@ -143,9 +141,12 @@ def Der_Phi_Basis_Fn(M_Coef, N_Coef, Theta, Phi): # M_Coef < 0 Corresonds to Z^ if m_sph < N_Coef: Der_Phi_Val += ( - sqrt((N_Coef - m_sph) * (N_Coef + m_sph + 1)) + np.sqrt((N_Coef - m_sph) * (N_Coef + m_sph + 1)) * ( - ((e ** (-1j * Theta)) * sph_harm(m_sph + 1, N_Coef, Theta, Phi)) + ( + (np.e ** (-1j * Theta)) + * sph_harm(m_sph + 1, N_Coef, Theta, Phi) + ) ).imag ) @@ -158,9 +159,12 @@ def Der_Phi_Basis_Fn(M_Coef, N_Coef, Theta, Phi): # M_Coef < 0 Corresonds to Z^ if m_sph < N_Coef: Der_Phi_Val += ( - sqrt((N_Coef - m_sph) * (N_Coef + m_sph + 1)) + np.sqrt((N_Coef - m_sph) * (N_Coef + m_sph + 1)) * ( - ((e ** (-1j * Theta)) * sph_harm(m_sph + 1, N_Coef, Theta, Phi)) + ( + (np.e ** (-1j * Theta)) + * sph_harm(m_sph + 1, N_Coef, Theta, Phi) + ) ).real ) @@ -172,7 +176,7 @@ def Der_Phi_Basis_Fn(M_Coef, N_Coef, Theta, Phi): # M_Coef < 0 Corresonds to Z^ if M_Coef == 0: # No Cotangent terms: if N_Coef > 0: return ( - sqrt((N_Coef) * (N_Coef + 1)) + np.sqrt((N_Coef) * (N_Coef + 1)) * (((np.exp(-1j * Theta)) * sph_harm(1, N_Coef, Theta, Phi))).real ) else: @@ -187,7 +191,7 @@ def Der_Phi_Basis_Fn(M_Coef, N_Coef, Theta, Phi): # M_Coef < 0 Corresonds to Z^ if m_sph < N_Coef: Der_Phi_Val += ( - sqrt((N_Coef - m_sph) * (N_Coef + m_sph + 1)) + np.sqrt((N_Coef - m_sph) * (N_Coef + m_sph + 1)) * ( ( (np.exp(-1j * Theta)) @@ -205,7 +209,7 @@ def Der_Phi_Basis_Fn(M_Coef, N_Coef, Theta, Phi): # M_Coef < 0 Corresonds to Z^ if m_sph < N_Coef: Der_Phi_Val += ( - sqrt((N_Coef - m_sph) * (N_Coef + m_sph + 1)) + np.sqrt((N_Coef - m_sph) * (N_Coef + m_sph + 1)) * ( ( (np.exp(-1j * Theta)) @@ -235,24 +239,24 @@ def Der_Phi_Phi_Basis_Fn( if m_sph < N_Coef: Der_Phi_Phi_Val += ( - sqrt((N_Coef - m_sph) * (N_Coef + m_sph + 1)) + np.sqrt((N_Coef - m_sph) * (N_Coef + m_sph + 1)) * (2 * m_sph + 1) * mpmath.cot(Phi) * ( - ((e ** (-1j * Theta)) * sph_harm(m_sph + 1, N_Coef, Theta, Phi)) + ((np.e ** (-1j * Theta)) * sph_harm(m_sph + 1, N_Coef, Theta, Phi)) ).imag ) if m_sph < (N_Coef - 1): Der_Phi_Phi_Val += ( - sqrt( + np.sqrt( (N_Coef - m_sph) * (N_Coef - m_sph - 1) * (N_Coef + m_sph + 1) * (N_Coef + m_sph + 2) ) * ( - ((e ** (-2j * Theta)) * sph_harm(m_sph + 2, N_Coef, Theta, Phi)) + ((np.e ** (-2j * Theta)) * sph_harm(m_sph + 2, N_Coef, Theta, Phi)) ).imag ) @@ -267,23 +271,23 @@ def Der_Phi_Phi_Basis_Fn( if m_sph < N_Coef: Der_Phi_Phi_Val += ( - sqrt((N_Coef - m_sph) * (N_Coef + m_sph + 1)) + np.sqrt((N_Coef - m_sph) * (N_Coef + m_sph + 1)) * (2 * m_sph + 1) * mpmath.cot(Phi) * ( - ((e ** (-1j * Theta)) * sph_harm(m_sph + 1, N_Coef, Theta, Phi)) + ((np.e ** (-1j * Theta)) * sph_harm(m_sph + 1, N_Coef, Theta, Phi)) ).real ) if m_sph < (N_Coef - 1): Der_Phi_Phi_Val += ( - sqrt( + np.sqrt( (N_Coef - m_sph) * (N_Coef - m_sph - 1) * (N_Coef + m_sph + 1) * (N_Coef + m_sph + 2) ) * ( - ((e ** (-2j * Theta)) * sph_harm(m_sph + 2, N_Coef, Theta, Phi)) + ((np.e ** (-2j * Theta)) * sph_harm(m_sph + 2, N_Coef, Theta, Phi)) ).real ) @@ -294,9 +298,9 @@ def Lbdv_Cart_To_Sph( Cart_Pts_Wts, ): # takes matrix with rows [x,y,z,w] -> [theta, phi, w] (r=1) - num_lbdv_pts = shape(Cart_Pts_Wts)[0] + num_lbdv_pts = np.shape(Cart_Pts_Wts)[0] - Sph_Pts_Wts = zeros((num_lbdv_pts, 3)) + Sph_Pts_Wts = np.zeros((num_lbdv_pts, 3)) for pt in range(num_lbdv_pts): @@ -321,13 +325,13 @@ def get_5810_quad_pts(): euc_quad_pts = Lebedev(5810) sph_quad_pts = Lbdv_Cart_To_Sph(euc_quad_pts) - x_pts, y_pts, z_pts, w_pts = hsplit(euc_quad_pts, 4) - theta_pts, phi_pts, w_pts = hsplit(sph_quad_pts, 3) + x_pts, y_pts, z_pts, w_pts = np.hsplit(euc_quad_pts, 4) + theta_pts, phi_pts, w_pts = np.hsplit(sph_quad_pts, 3) return x_pts, y_pts, z_pts, theta_pts, phi_pts -###################################################################################################################################################### +####################################################################################### class lbdv_info(object): # Generates (ONCE) and stores Lebedev Info @@ -347,21 +351,23 @@ def __init__( os.makedirs(PICKLE_DIR) ### GENERATE 5810 Quadrature ONCE ####### - # print("generating quad pts") # BJG: only notify if NEW mats are needed (time-consuming) + # print("generating quad pts") + # # BJG: only notify if NEW mats are needed (time-consuming) self.lbdv_quad_pts = Num_Quad_Pts # Needs to be appropriate number up to 5810 #### To see if there are errors in assigning points ############### self.Lbdv_Cart_Pts_Quad = Lebedev(self.lbdv_quad_pts) - self.X, self.Y, self.Z, self.W = hsplit(self.Lbdv_Cart_Pts_Quad, 4) + self.X, self.Y, self.Z, self.W = np.hsplit(self.Lbdv_Cart_Pts_Quad, 4) ################################################################### self.Lbdv_Sph_Pts_Quad = Lbdv_Cart_To_Sph(self.Lbdv_Cart_Pts_Quad) - self.theta_pts, self.phi_pts, self.weight_pts = hsplit( + self.theta_pts, self.phi_pts, self.weight_pts = np.hsplit( self.Lbdv_Sph_Pts_Quad, 3 ) - # print("quad pts done") # BJG: only notify if NEW mats are needed (time-consuming) + # print("quad pts done") + # # BJG: only notify if NEW mats are needed (time-consuming) ######################################### LBDV_Basis_at_Quad_Pts_Mats_filename = "LBDV_Basis_at_Quad_Pts" + LBDV_name @@ -373,7 +379,8 @@ def __init__( LBDV_Basis_at_Quad_Pts_Mats_filepath ): # If already pickled, we load it, and split it into the needed arrays: - # print("\n"+"Loading Pickled LBDV data Mats"+"\n") # BJG: only notify if NEW mats are needed (time-consuming) + # print("\n"+"Loading Pickled LBDV data Mats"+"\n") + # # BJG: only notify if NEW mats are needed (time-consuming) Pickled_LBDV_Basis_at_Quad_Pts_Mats = [] @@ -400,11 +407,11 @@ def __init__( print("generating basis vals") # Store for W_pt*Y^m_n at each point, in same format as coef mat - self.SPH_Basis_Wt_At_Quad_Pts = zeros( + self.SPH_Basis_Wt_At_Quad_Pts = np.zeros( (Max_SPH_Deg + 1, Max_SPH_Deg + 1, self.lbdv_quad_pts) ) # includes weights - self.SPH_Basis_At_Quad_Pts = zeros( + self.SPH_Basis_At_Quad_Pts = np.zeros( (Max_SPH_Deg + 1, Max_SPH_Deg + 1, self.lbdv_quad_pts) ) # does NOT inculude weights @@ -447,22 +454,20 @@ def __init__( ) print("generated basis vals") - ####################### Der Phi Fns To Speed Up Code ############################## + ####################### Der Phi Fns To Speed Up Code ################## print("generating dphi/ dphi_phi vals") # Create matrix to store Phi Der of all degrees used - self.SPH_Phi_Der_At_Quad_Pts = zeros( + self.SPH_Phi_Der_At_Quad_Pts = np.zeros( (Max_SPH_Deg + 1, Max_SPH_Deg + 1, self.lbdv_quad_pts) ) # Create matrix to store 2nd Phi Der of all degrees used - self.SPH_Phi_Phi_Der_At_Quad_Pts = zeros( + self.SPH_Phi_Phi_Der_At_Quad_Pts = np.zeros( (Max_SPH_Deg + 1, Max_SPH_Deg + 1, self.lbdv_quad_pts) ) - # eta_A(lambda Theta, Phi: SPH_Der_Phi_Fn(SPH_Deg, Coef_Mat, Theta, Phi), Theta, Phi) - # Fill up matrix ONCE, with above function, composed with eta_A for N_Coef in range(Max_SPH_Deg + 1): # 0,...,Max_SPH_Deg for M_Coef in range(-1 * N_Coef, N_Coef + 1): # -N,...,N @@ -536,7 +541,7 @@ def __init__( print("done with dphi/ dphi_phi vals" + "\n") ###!!! PICLKLE RESULTS FOR FUTURE USE !!!### - To_Pickle_LBDV_Basis_at_Quad_Pts_Mats = zeros( + To_Pickle_LBDV_Basis_at_Quad_Pts_Mats = np.zeros( (Max_SPH_Deg + 1, Max_SPH_Deg + 1, self.lbdv_quad_pts, 4) ) @@ -556,7 +561,7 @@ def __init__( with open(LBDV_Basis_at_Quad_Pts_Mats_filepath, "wb") as f_lbdv_basis: pkl.dump(To_Pickle_LBDV_Basis_at_Quad_Pts_Mats, f_lbdv_basis) - ####################### LBDV Rotation To Speed Up Code ############################## + ####################### LBDV Rotation To Speed Up Code ######################## LBDV_Chart_of_Quad_Pts_Mats_filename = "LBDV_Chart_of_Quad_Pts" + LBDV_name LBDV_Chart_of_Quad_Pts_Mats_filepath = os.path.join( @@ -567,7 +572,8 @@ def __init__( LBDV_Chart_of_Quad_Pts_Mats_filepath ): # If already pickled, we load it, and split it into the needed arrays: - # print("\n"+"Loading Pickled LBDV Chart Mats"+"\n") # BJG: only notify if NEW mats are needed (time-consuming) + # print("\n"+"Loading Pickled LBDV Chart Mats"+"\n") + # # BJG: only notify if NEW mats are needed (time-consuming) Pickled_LBDV_Charts_Quad_Pts_Mats = [] @@ -589,21 +595,14 @@ def __init__( print("generating lbdv rotation vals") - self.Rot_Lbdv_Quad_vals = zeros( + self.Rot_Lbdv_Quad_vals = np.zeros( (self.lbdv_quad_pts, 1) ) # Stores equivlent Quad_pt in Chart B, for input Quad_Pt in Chart A - self.Inv_Rot_Lbdv_Quad_vals = zeros( + self.Inv_Rot_Lbdv_Quad_vals = np.zeros( (self.lbdv_quad_pts, 1) ) # Stores equivlent Quad_pt in Chart A, for input Quad_Pt in Chart B - """ - Example: if quad_pt = i corresponds to (theta, phi) = (0, pi/2), and quad_pt = j corresponds to (0, pi), - then self.Rot_Lbdv_Quad_vals[j] = i, - and self.Inv_Rot_Lbdv_Quad_vals[i] = j, - since (theta, phi) = (0, pi) has the same euclidean coors as (theta_bar, phi_bar) = (0, pi/2) - """ - - self.Chart_of_Quad_Pts = zeros( + self.Chart_of_Quad_Pts = np.zeros( (self.lbdv_quad_pts, 1) ) # 1 if pt is in Chart A, -1 if not @@ -624,9 +623,9 @@ def __init__( # If we are able to identify rotated quad pt in Chart B rot_pt_found = False - x_pt = self.X[quad_pt] # cos(theta_pt)*sin(phi_pt) - y_pt = self.Y[quad_pt] # sin(theta_pt)*sin(phi_pt) - z_pt = self.Z[quad_pt] # cos(phi_pt) + x_pt = self.X[quad_pt] # np.cos(theta_pt)*sin(phi_pt) + y_pt = self.Y[quad_pt] # np.sin(theta_pt)*sin(phi_pt) + z_pt = self.Z[quad_pt] # np.cos(phi_pt) # Find Rotated Quad Pt at same location: for quad_pt_rot in range(self.lbdv_quad_pts): @@ -634,21 +633,21 @@ def __init__( theta_bar_pt_rot = self.theta_pts[quad_pt_rot] phi_bar_pt_rot = self.phi_pts[quad_pt_rot] - x_pt_rot = cos(phi_bar_pt_rot) - y_pt_rot = sin(theta_bar_pt_rot) * sin(phi_bar_pt_rot) - z_pt_rot = -1 * cos(theta_bar_pt_rot) * sin(phi_bar_pt_rot) + x_pt_rot = np.cos(phi_bar_pt_rot) + y_pt_rot = np.sin(theta_bar_pt_rot) * np.sin(phi_bar_pt_rot) + z_pt_rot = -1 * np.cos(theta_bar_pt_rot) * np.sin(phi_bar_pt_rot) if abs(x_pt - x_pt_rot) < 1e-7: if abs(y_pt - y_pt_rot) < 1e-7: if abs(z_pt - z_pt_rot) < 1e-7: - if rot_pt_found == False: + if rot_pt_found is False: rot_pt_found = True self.Rot_Lbdv_Quad_vals[quad_pt] = quad_pt_rot self.Inv_Rot_Lbdv_Quad_vals[quad_pt_rot] = quad_pt - if rot_pt_found == False: + if rot_pt_found is False: print("!!ROTATED QUAD PT NOT FOUND!!") print("done with lbdv rotation vals" + "\n") @@ -695,7 +694,8 @@ def Eval_SPH_Der_Phi_Phi_At_Quad_Pts(self, M, N, Quad_Pt): # Return in Matrix format for VECTORIZATION in Sph_Func: - # Fn that will retrieve all values to be used in quadrature, in nice format (of All quad points, for Y^M_N) + # Fn that will retrieve all values to be used in quadrature, + # in nice format (of All quad points, for Y^M_N) def Eval_SPH_Basis_Wt_M_N(self, M, N): if M >= 0: diff --git a/src/napari_stress/_stress/lebedev_write_SPB.py b/src/napari_stress/_stress/lebedev_write_SPB.py index 70bbfd6d..2dae4c66 100644 --- a/src/napari_stress/_stress/lebedev_write_SPB.py +++ b/src/napari_stress/_stress/lebedev_write_SPB.py @@ -46,9 +46,7 @@ 1975, pp. 44-51. """ -from math import * import numpy as np -from numpy import * def genOh_a00(v): @@ -67,7 +65,7 @@ def genOh_a00(v): def genOh_aa0(v): "(0,a,a) etc, a=1/sqrt(2) (12 points)" - a = sqrt(0.5) + a = np.sqrt(0.5) v = 4.0 * np.pi * v return [ (0, a, a, v), @@ -87,7 +85,7 @@ def genOh_aa0(v): def genOh_aaa(v): "(a,a,a) etc, a=1/sqrt(3) (8 points)" - a = sqrt(1.0 / 3.0) + a = np.sqrt(1.0 / 3.0) v = 4.0 * np.pi * v return [ (a, a, a, v), @@ -103,7 +101,7 @@ def genOh_aaa(v): def genOh_aab(v, a): "(a,a,b) etc, b=sqrt(1-2 a^2), a input (24 points)" - b = sqrt(1.0 - 2.0 * a * a) + b = np.sqrt(1.0 - 2.0 * a * a) v = 4.0 * np.pi * v return [ (a, a, b, v), @@ -135,7 +133,7 @@ def genOh_aab(v, a): def genOh_ab0(v, a): "(a,b,0) etc, b=sqrt(1-a^2), a input (24 points)" - b = sqrt(1.0 - a * a) + b = np.sqrt(1.0 - a * a) v = 4.0 * np.pi * v return [ (a, b, 0, v), @@ -167,7 +165,7 @@ def genOh_ab0(v, a): def genOh_abc(v, a, b): "(a,b,c) etc, c=sqrt(1-a^2-b^2), a,b input (48 points)" - c = sqrt(1.0 - a * a - b * b) + c = np.sqrt(1.0 - a * a - b * b) v = 4.0 * np.pi * v return [ (a, b, c, v), @@ -4830,7 +4828,7 @@ def Lebedev(n): try: lebPoints_cart_w = LebFunc[n]() return np.array(lebPoints_cart_w) - except: + except Exception: raise "No grid available for %d" % n return None @@ -4888,7 +4886,7 @@ def Lebedev(n): # self.theta = []; # # def cart2sph(): -# self.r = sqrt(self.x**2 + self.y**2 + self.z**2) +# self.r = np.sqrt(self.x**2 + self.y**2 + self.z**2) # self.phi = atan(self.y/self.x) # self.theta = atan(self.z/sqrt(self.x**2+self.y**2) # diff --git a/src/napari_stress/_stress/manifold_SPB.py b/src/napari_stress/_stress/manifold_SPB.py index 3d20d217..3616a871 100644 --- a/src/napari_stress/_stress/manifold_SPB.py +++ b/src/napari_stress/_stress/manifold_SPB.py @@ -1,6 +1,5 @@ # From https://github.com/campaslab/STRESS #!!! WILL DEPEND ON DIFF GEO WHEN WE GO OFF SPHERE !!! # -from numpy import * import numpy as np # import mpmath as mp @@ -8,7 +7,7 @@ # for picking manny inv mats # import cPickle as pkl # BJG: py2pt7 version import pickle as pkl -import os, sys +import os from . import sph_func_SPB as sph_f @@ -31,7 +30,7 @@ def Manny_Fn_Def(theta, phi, r_0, Manny_Name, IsRadial): # We may want seperate defs for each coordinate: def Manny_Fn_Def_X(theta, phi, r_0, Manny_Name, IsRadial): - if IsRadial == True: + if IsRadial is True: return ( Radial_Manifold_R_Def(theta, phi, r_0, Manny_Name) * np.cos(theta) @@ -43,7 +42,7 @@ def Manny_Fn_Def_X(theta, phi, r_0, Manny_Name, IsRadial): def Manny_Fn_Def_Y(theta, phi, r_0, Manny_Name, IsRadial): - if IsRadial == True: + if IsRadial is True: return ( Radial_Manifold_R_Def(theta, phi, r_0, Manny_Name) * np.sin(theta) @@ -55,7 +54,7 @@ def Manny_Fn_Def_Y(theta, phi, r_0, Manny_Name, IsRadial): def Manny_Fn_Def_Z(theta, phi, r_0, Manny_Name, IsRadial): - if IsRadial == True: + if IsRadial is True: return Radial_Manifold_R_Def(theta, phi, r_0, Manny_Name) * np.cos(phi) else: return Non_Radial_Manifold_Z_Def(theta, phi, r_0, Manny_Name) @@ -98,7 +97,7 @@ def Non_Radial_Manifold_X_Def(theta, phi, r_0, Manny_Name): ) # normalized arcen up to bottom hemi omega = np.pi / (r_0 + np.pi * R) # normalized speed - if isscalar(phi): + if np.isscalar(phi): if phi < phi_1: return R * np.sin(phi / (R * omega)) * np.cos(theta) elif phi < phi_2: @@ -161,7 +160,7 @@ def Non_Radial_Manifold_Y_Def(theta, phi, r_0, Manny_Name): ) # normalized arcen up to bottom hemi omega = np.pi / (r_0 + np.pi * R) # normalized speed - if isscalar(phi): + if np.isscalar(phi): if phi < phi_1: return R * np.sin(phi / (R * omega)) * np.sin(theta) elif phi < phi_2: @@ -222,7 +221,7 @@ def Non_Radial_Manifold_Z_Def(theta, phi, r_0, Manny_Name): ) # normalized arcen up to bottom hemi omega = np.pi / (r_0 + np.pi * R) # normalized speed - if isscalar(phi): + if np.isscalar(phi): if phi < phi_1: return R * np.cos(phi / (R * omega)) + r_0 / 2.0 elif phi < phi_2: @@ -303,7 +302,7 @@ def Radial_Manifold_R_Def(theta, phi, r_0, Manny_Name): return 1.0 + r_0 * np.sin(7 * phi) * np.cos(theta) elif Manny_Name == "Quint_Spikes": - c_shape = 0.5 # distortion magnitude + # c_shape = 0.5 # distortion magnitude k_shape = 10.0 # inv. length-scale of distortion decay tol_shape = 1.0e-3 @@ -487,7 +486,7 @@ def Radial_Manifold_R_Def(theta, phi, r_0, Manny_Name): phi_1 = np.arctan(R / (r_0 / 2.0)) phi_2 = np.pi - np.arctan(R / (r_0 / 2.0)) - if isscalar(phi): + if np.isscalar(phi): if phi < phi_1: return (r_0 / 2.0) * np.cos(phi) + np.sqrt( R**2 - ((r_0 / 2) * np.sin(phi)) ** 2 @@ -513,7 +512,7 @@ def Radial_Manifold_R_Def(theta, phi, r_0, Manny_Name): elif Manny_Name == "Divet": - if isscalar(phi): + if np.isscalar(phi): if np.cos(phi) > 0.9: return 1.0 - r_0 * np.exp( -1.0 * (0.19) / (np.cos(phi) ** 2 - (0.9) ** 2) + 1.0 @@ -532,7 +531,7 @@ def Radial_Manifold_R_Def(theta, phi, r_0, Manny_Name): elif Manny_Name == "Double_Divet": - if isscalar(phi): + if np.isscalar(phi): if np.cos(phi) > 0.9: return 1.0 - r_0 * np.exp( -1.0 * (0.19) / (np.cos(phi) ** 2 - (0.9) ** 2) + 1.0 @@ -566,7 +565,7 @@ def Radial_Manifold_R_Def(theta, phi, r_0, Manny_Name): elif Manny_Name == "Divets_Around_Pimple": - if isscalar(phi): + if np.isscalar(phi): if abs(np.cos(phi)) > 0.9: return 1.0 - r_0 * np.exp( -1.0 * (0.19) / (np.cos(phi) ** 2 - (0.9) ** 2) + 1.0 @@ -618,7 +617,8 @@ def Radial_Manifold_R_Def(theta, phi, r_0, Manny_Name): ) -# Takes an array of R0 Values and returns string format for Rayleigh Dissipation Studies (!!!!VTU NOT PICKLE!!!!) +# Takes an array of R0 Values and returns string format +# for Rayleigh Dissipation Studies (!!!!VTU NOT PICKLE!!!!) def Max_Decimal_R0_Array(R0_Values_Array): max_dec_length = 0 @@ -646,17 +646,19 @@ def Max_Decimal_R0_Array(R0_Values_Array): return new_R0_array_labels -####################################################################################################################### +######################################################################################## -class manifold( - object -): # This now represents geo of S^2, will later be adapted to other manifolds with same topology +class manifold(object): + # This now represents geo of S^2, + # will later be adapted to other manifolds with same topology """ Use Manifold name to automatically load/pickle manny inv mats: - Format: Maniold_Official_Name = Man_Shape_Name+"R_0_"+R_0_str+"_Pdeg_"+str(deg_basis)+"_Q"+str(num_quad_pts) - Filename "Manny_Inv_Mats_" + Maniold_Official_Name + ".p", goes in 'Pickled_Manny_Inv_Mat_Files' sub-directory + Format: Maniold_Official_Name = + Man_Shape_Name+"R_0_"+R_0_str+"_Pdeg_"+str(deg_basis)+"_Q"+str(num_quad_pts) + Filename "Manny_Inv_Mats_" + + Maniold_Official_Name + ".p", goes in 'Pickled_Manny_Inv_Mat_Files' sub-directory Man_Shape_Name = "S2", "Chew_Toy", "Gen_R0_Pill", "Dog_Shit", etc R_0_str = "0pt3", "0pt0" for example. """ @@ -666,14 +668,18 @@ def __init__( Manifold_Constr_Dict: dict, manifold_type: str = "cartesian", raw_coordinates: np.ndarray = None, - ): # BJG: Need to add option to initialize from point cloud of lbdv point vals, and from named manifold + ): + # BJG: Need to add option to initialize from + # point cloud of lbdv point vals, and from named manifold # old constructor: (self, R_func, R_deg, lbdv, Maniold_Official_Name = []) # print("Constructing Manifold") # BJG: should add verbose option self.manifold_type = manifold_type self.raw_coordinates = raw_coordinates - self.Tol = 1.0e-3 # Threshold for considering a quantity as 0, in terms of where to switch charts (replaces using condtion: lbdv.Chart_of_Quad_Pts > 0 ) + # Threshold for considering a quantity as 0, in terms of where to + # switch charts (replaces using condtion: lbdv.Chart_of_Quad_Pts > 0 ) + self.Tol = 1.0e-3 self.pickling = Manifold_Constr_Dict[ "Pickle_Manny_Data" ] # BJG: May not want to pickle for moving surface and debugging @@ -684,12 +690,12 @@ def __init__( self.Man_SPH_Deg = Manifold_Constr_Dict["Manifold_SPH_deg"] self.Use_Man_Name = Manifold_Constr_Dict["use_manifold_name"] - self.Man_Shape_Dict = Manifold_Constr_Dict[ - "Maniold_Name_Dict" - ] # this is a (possibly trivial) dictionary of manifold name and r_0 value, OR {x,y,z} at quad pts + self.Man_Shape_Dict = Manifold_Constr_Dict["Maniold_Name_Dict"] + # this is a (possibly trivial) dictionary of manifold name + # and r_0 value, OR {x,y,z} at quad pts # BJG: given name and r0 value, we can use function to get points we need: - if self.Use_Man_Name == True: + if self.Use_Man_Name is True: self.IsManRad = self.Man_Shape_Dict["Is_Manifold_Radial"] self.Man_R0_Val = self.Man_Shape_Dict["Maniold_R0_Value"] @@ -763,7 +769,8 @@ def __init__( self.Z_A_Pts, self.Man_SPH_Deg, lbdv ) - # BJG: Lists of Quad Pt Vals, so we can evaluate, with necesary derivatives at these points: + # BJG: Lists of Quad Pt Vals, so we can evaluate, + # with necesary derivatives at these points: self.X_theta = self.X.Quick_Theta_Der() self.X_theta_A_Pts = self.X_theta.Eval_SPH_Coef_Mat(self.quad_pts, lbdv) @@ -1132,8 +1139,6 @@ def __init__( 0, ) - # d_k(g_ik/met_fac): - # self.E_over_Metric_Factor_dTheta_A = np.where(lbdv.Chart_of_Quad_Pts > 0, (self.Metric_Factor_A_pts*self.E_theta_A_pts - self.E_A_pts*self.Metric_Factor_dTheta_A_pts)/self.Metric_Factor_Squared_A, 0) self.F_over_Metric_Factor_dTheta_A = np.where( lbdv.Chart_of_Quad_Pts > 0, ( @@ -1171,9 +1176,7 @@ def __init__( / self.Metric_Factor_Squared_A, 0, ) - # self.G_over_Metric_Factor_dPhi_A = np.where(lbdv.Chart_of_Quad_Pts > 0, (self.Metric_Factor_A_pts*self.G_phi_A_pts - self.G_A_pts*self.Metric_Factor_dPhi_A_pts)/self.Metric_Factor_Squared_A, 0) - # self.E_over_Metric_Factor_dTheta_B = np.where(lbdv.Chart_of_Quad_Pts > 0, (self.Metric_Factor_B_pts*self.E_theta_B_pts - self.E_B_pts*self.Metric_Factor_dTheta_B_pts)/self.Metric_Factor_Squared_B, 0) self.F_over_Metric_Factor_dTheta_B = np.where( lbdv.Chart_of_Quad_Pts > 0, ( @@ -1211,7 +1214,6 @@ def __init__( / self.Metric_Factor_Squared_B, 0, ) - # self.G_over_Metric_Factor_dPhi_B = np.where(lbdv.Chart_of_Quad_Pts > 0, (self.Metric_Factor_B_pts*self.G_phi_B_pts - self.G_B_pts*self.Metric_Factor_dPhi_B_pts)/self.Metric_Factor_Squared_B, 0) ### ### From Sharp!/*(1-forms): @@ -1267,7 +1269,8 @@ def __init__( (self.X_phi_phi_B_Pts, self.Y_phi_phi_B_Pts, self.Z_phi_phi_B_Pts) ) - # Normal Vectors and II can be computed here as well (within Chart, for pickled fields below): + # Normal Vectors and II can be computed here as well + # (within Chart, for pickled fields below): self.Normal_Dir_X_A_Pts = ( self.Y_theta_A_Pts * self.Z_phi_A_Pts - self.Z_theta_A_Pts * self.Y_phi_A_Pts @@ -1375,7 +1378,8 @@ def __init__( + self.Normal_Vec_Z_B_Pts * self.Z_phi_phi_B_Pts ) - # We can use this to compute entries of the Weingarten Map Directly, where W = [[W_11, W_12], [W_21, W_22]]: + # We can use this to compute entries of the Weingarten Map Directly, + # where W = [[W_11, W_12], [W_21, W_22]]: self.Wein_11_A_Pts = np.where( lbdv.Chart_of_Quad_Pts > 0, (self.L_A_Pts * self.G_A_pts - self.M_A_Pts * self.F_A_pts) @@ -1460,9 +1464,6 @@ def __init__( lbdv.Chart_of_Quad_Pts > 0, -self.X_A_Pts / Denom_A, 0 ) - # print("self.dx_dy_A_Vals_From_Polar_NEW = "+str(self.dx_dy_A_Vals_From_Polar_NEW)) - # print("self.dx_dy_A_Vals_From_Polar_OLD = "+str(self.dx_dy_A_Vals_From_Polar)) - Denom_B = np.multiply( (self.R_sq_B_Pts), np.sqrt(self.Y_B_Pts**2 + self.Z_B_Pts**2) ) @@ -1500,17 +1501,17 @@ def zero_vector_of_basis_mats(): # If we are given a name for the manifold, we can use pickling Manny_Inv_Mats_filepath = [] - if Manifold_Constr_Dict["use_manifold_name"] == True: + if Manifold_Constr_Dict["use_manifold_name"] is True: Inv_Mats_Name = ( "Manny_Inv_Mats_" + self.Man_Official_Name + ".p" ) # name of file we dump/load the inv_mats from Manny_Inv_Mats_filepath = os.path.join(PICKLE_Manny_DIR, Inv_Mats_Name) if ( - Manifold_Constr_Dict["use_manifold_name"] == False - or os.path.isfile(Manny_Inv_Mats_filepath) == False + Manifold_Constr_Dict["use_manifold_name"] is False + or os.path.isfile(Manny_Inv_Mats_filepath) is False or self.Man_Official_Name == [] - or self.pickling == False + or self.pickling is False ): # If we need to (re)generate these: self.rho_A_Mats = zero_vector_of_basis_mats() # rho = G*(A^-1) @@ -1590,7 +1591,8 @@ def zero_vector_of_basis_mats(): G_phi_A_Mat_pt = self.dG_Mat_phi(quad_pt, "A") G_phi_B_Mat_pt = self.dG_Mat_phi(quad_pt, "B") - # use these to compute matrcies for flat, and derivative of cotangent components + # use these to compute matrcies for flat, + # and derivative of cotangent components rho_A_pt = np.linalg.solve(A_Mat_pt.T, G_A_Mat_pt.T).T rho_B_pt = np.linalg.solve(B_Mat_pt.T, G_B_Mat_pt.T).T @@ -1634,11 +1636,12 @@ def zero_vector_of_basis_mats(): # self.K_A_pts[quad_pt] = linalg.det(self.Wein_Map(quad_pt, 'A')) # self.K_B_pts[quad_pt] = linalg.det(self.Wein_Map(quad_pt, 'B')) - # If we know the name, (and we allow pickling) we pickle inv_mats we just generated + # If we know the name, (and we allow pickling) + # we pickle inv_mats we just generated if ( - Manifold_Constr_Dict["use_manifold_name"] == True + Manifold_Constr_Dict["use_manifold_name"] is True and self.Man_Official_Name != [] - and self.pickling == True + and self.pickling is True ): print("pickling Manny Inv Mats for re-use" + "\n") @@ -1657,8 +1660,6 @@ def zero_vector_of_basis_mats(): To_Pickle_inv_Manny_Mats[:, :, :, 8] = self.xi_phi_A_Mats To_Pickle_inv_Manny_Mats[:, :, :, 9] = self.xi_phi_B_Mats - # print("To_Pickle_inv_Manny_Mats.shape = "+str(To_Pickle_inv_Manny_Mats.shape)) - Manny_Info_Dict = {} Manny_Info_Dict["Inv_Mats"] = To_Pickle_inv_Manny_Mats Manny_Info_Dict["K_A"] = self.K_A_pts @@ -1671,7 +1672,8 @@ def zero_vector_of_basis_mats(): pkl.dump(Manny_Info_Dict, f_manny) """ else: - #print("NOT pickling Manny Inv Mats for later re-use"+"\n") # BJG: should add verbose option + #print("NOT pickling Manny Inv Mats for later re-use"+"\n") + # BJG: should add verbose option """ # If we have already pickled the above matricies, we load them: @@ -1706,44 +1708,22 @@ def zero_vector_of_basis_mats(): self.xi_phi_B_Mats, ) = np.squeeze(np.split(Pickled_inv_Manny_Mats, 10, 3)) - """ - self.rho_A_Mats = Pickled_Inverse_Mats[:,:,:, 0] - self.rho_B_Mats = Pickled_Inverse_Mats[:,:,:, 1] - self.rho_theta_A_Mats = Pickled_Inverse_Mats[:,:,:, 2] - self.rho_theta_B_Mats = Pickled_Inverse_Mats[:,:,:, 3] - self.rho_phi_A_Mats = Pickled_Inverse_Mats[:,:,:, 4] - self.rho_phi_B_Mats = Pickled_Inverse_Mats[:,:,:, 5] - self.xi_theta_A_Mats = Pickled_Inverse_Mats[:,:,:, 6] - self.xi_theta_B_Mats = Pickled_Inverse_Mats[:,:,:, 7] - self.xi_phi_A_Mats = Pickled_Inverse_Mats[:,:,:, 8] - self.xi_phi_B_Mats = Pickled_Inverse_Mats[:,:,:, 9] - """ - # print("Done Computing Flat Tensors") # BJG: should add verbose option - """ - ### From *(2-forms) - inv_met_fac_A_pts = np.where(lbdv.Chart_of_Quad_Pts > 0, 1/self.Manifold.Metric_Factor_Quad_Pt(self.quad_pts, lbdv, 'A'), 0) - inv_met_fac_B_pts = np.where(lbdv.Chart_of_Quad_Pts > 0, 1/self.Manifold.Metric_Factor_Quad_Pt(self.quad_pts, lbdv, 'B'), 0) - - dx_dy_to_dtheta_dphi_A_pts, dx_dz_to_dtheta_dphi_A_pts, dy_dz_to_dtheta_dphi_A_pts = Two_Form_Conv_to_Polar_pt(self.quad_pts, lbdv, self.Manifold, 'A') - dx_dy_to_dtheta_dphi_B_pts, dx_dz_to_dtheta_dphi_B_pts, dy_dz_to_dtheta_dphi_B_pts = Two_Form_Conv_to_Polar_pt(self.quad_pts, lbdv, self.Manifold, 'B') - """ - # print("Manifold Constucted"+"\n") # BJG: should add verbose option def get_coordinates(self): return np.stack([self.X_A_Pts, self.Y_A_Pts, self.Z_A_Pts]).transpose() - ######### Vector (& Derivs) of Manifold, Normals, 2-form Convs ################################################################################### + ######### Vector (& Derivs) of Manifold, Normals, 2-form Convs ##################### - def Cart_Coors(quad_pt, Chart): + def Cart_Coors(self, quad_pt, Chart): if Chart == "A": return self.Cart_Coors_A[quad_pt, :] if Chart == "B": return self.Cart_Coors_B[quad_pt, :] - def R_Sq_Val(quad_pt, Chart): + def R_Sq_Val(self, quad_pt, Chart): if Chart == "A": return self.R_sq_A_Pts[quad_pt, :] if Chart == "B": @@ -1811,7 +1791,7 @@ def Polar_Two_Form_to_Euc_dy_dz(self, quad_pt, Chart): if Chart == "B": return self.dy_dz_B_Vals_From_Polar[quad_pt, :] - ######### Elements of First Fundamental Form and Its Ders, and W: ################################################################### + ######### Elements of First Fundamental Form and Its Ders, and W: ################## def E(self, quad_pt, Chart): if Chart == "A": @@ -1892,7 +1872,7 @@ def W_22(self, quad_pt, Chart): if Chart == "B": return self.Wein_22_B_Pts[quad_pt, 0] - ####### Fundamental Forms and Weingarten Map ########################################################################## + ####### Fundamental Forms and Weingarten Map ###################################### # First Fundamental Form def I_Mat(self, quad_pt, Chart): @@ -1924,7 +1904,7 @@ def I_phi_Mat(self, quad_pt, Chart): ] ) - ##### Weingarten Map/ Change of BASIS ################################################################################# + ##### Weingarten Map/ Change of BASIS ############################################# # Weingarten Map, derivative of (inward) normal vector def Wein_Map(self, quad_pt, Chart): @@ -1944,15 +1924,14 @@ def Normal_vec_dtheta(self, quad_pt, Chart): sigma_phi_C_pt = self.sigma_phi(quad_pt, Chart) # For Scalar Case, we use usual vectorization: - if isscalar(quad_pt): + if np.isscalar(quad_pt): return -1.0 * ( W_map[0, 0] * sigma_theta_C_pt + W_map[1, 0] * sigma_phi_C_pt ) - # For multiple quad pts, we use einstein sumation to output vector of solutions at each point: + # For multiple quad pts, we use einstein sumation + # to output vector of solutions at each point: else: - # print("W_map[0,0].shape = "+str( W_map[0,0].shape )) - # print("sigma_theta_C_pt.shape = "+str( sigma_theta_C_pt.shape )) return -1.0 * ( W_map[0, 0].reshape(len(quad_pt), 1) * sigma_theta_C_pt + W_map[1, 0].reshape(len(quad_pt), 1) * sigma_phi_C_pt @@ -1966,12 +1945,13 @@ def Normal_vec_dphi(self, quad_pt, Chart): sigma_phi_C_pt = self.sigma_phi(quad_pt, Chart) # For Scalar Case, we use usual vectorization: - if isscalar(quad_pt): + if np.isscalar(quad_pt): return -1.0 * ( W_map[0, 1] * sigma_theta_C_pt + W_map[1, 1] * sigma_phi_C_pt ) - # For multiple quad pts, we use einstein sumation to output vector of solutions at each point: + # For multiple quad pts, we use einstein + # sumation to output vector of solutions at each point: else: return -1.0 * ( W_map[0, 1].reshape(len(quad_pt), 1) * sigma_theta_C_pt @@ -1981,18 +1961,7 @@ def Normal_vec_dphi(self, quad_pt, Chart): # Matrix Used to Convert to Polar def Change_Basis_Mat(self, quad_pt, Chart): - Basis_mat = zeros((3, 3)) - """ - if(Chart == 'A'): - Basis_mat[:, 0] = self.Sigma_Theta_A_Pts[quad_pt, :].T #self.sigma_theta(theta_pt, phi_pt, Chart).T - Basis_mat[:, 1] = self.Sigma_Phi_A_Pts[quad_pt, :].T #self.sigma_phi(theta_pt, phi_pt, Chart).T - Basis_mat[:, 2] = self.Normal_Vecs_A_Pts[quad_pt, :].T #self.Normal_Vec(theta_pt, phi_pt, Chart).T - - if(Chart == 'B'): - Basis_mat[:, 0] = self.Sigma_Theta_B_Pts[quad_pt, :].T - Basis_mat[:, 1] = self.Sigma_Phi_B_Pts[quad_pt, :].T - Basis_mat[:, 2] = self.Normal_Vecs_B_Pts[quad_pt, :].T - """ + Basis_mat = np.zeros((3, 3)) Basis_mat[:, 0] = self.sigma_theta(quad_pt, Chart).T Basis_mat[:, 1] = self.sigma_phi(quad_pt, Chart).T @@ -2003,7 +1972,7 @@ def Change_Basis_Mat(self, quad_pt, Chart): # Entry-wise deriv of Basis mat, dtheta def dChange_Basis_Mat_theta(self, quad_pt, Chart): - dBasis_mat_theta = zeros((3, 3)) + dBasis_mat_theta = np.zeros((3, 3)) dBasis_mat_theta[:, 0] = self.sigma_theta_theta(quad_pt, Chart).T dBasis_mat_theta[:, 1] = self.sigma_theta_phi(quad_pt, Chart).T @@ -2014,7 +1983,7 @@ def dChange_Basis_Mat_theta(self, quad_pt, Chart): # Entry-wise deriv of Basis mat, dphi def dChange_Basis_Mat_phi(self, quad_pt, Chart): - dBasis_mat_phi = zeros((3, 3)) + dBasis_mat_phi = np.zeros((3, 3)) dBasis_mat_phi[:, 0] = self.sigma_theta_phi(quad_pt, Chart).T dBasis_mat_phi[:, 1] = self.sigma_phi_phi(quad_pt, Chart).T @@ -2025,21 +1994,21 @@ def dChange_Basis_Mat_phi(self, quad_pt, Chart): # I mat is embedded: def G_Mat(self, quad_pt, Chart): - G_mat = zeros((3, 3)) + G_mat = np.zeros((3, 3)) G_mat[0:2, 0:2] = self.I_Mat(quad_pt, Chart) return G_mat def dG_Mat_theta(self, quad_pt, Chart): - G_theta_mat = zeros((3, 3)) + G_theta_mat = np.zeros((3, 3)) G_theta_mat[0:2, 0:2] = self.I_theta_Mat(quad_pt, Chart) return G_theta_mat def dG_Mat_phi(self, quad_pt, Chart): - G_phi_mat = zeros((3, 3)) + G_phi_mat = np.zeros((3, 3)) G_phi_mat[0:2, 0:2] = self.I_phi_Mat(quad_pt, Chart) return G_phi_mat diff --git a/src/napari_stress/_stress/sph_func_SPB.py b/src/napari_stress/_stress/sph_func_SPB.py index 84692ac6..dcdd3001 100644 --- a/src/napari_stress/_stress/sph_func_SPB.py +++ b/src/napari_stress/_stress/sph_func_SPB.py @@ -1,15 +1,11 @@ # From https://github.com/campaslab/STRESS -#! We use spherical_harmonics_function to encapsulate data from SPH Basis of functions, regardless of chart +#! We use spherical_harmonics_function to encapsulate data +# from SPH Basis of functions, regardless of chart -from numpy import * import numpy as np -from scipy import * -import mpmath -import cmath -from scipy.special import sph_harm -from .lebedev_info_SPB import * -from .charts_SPB import * +from .lebedev_info_SPB import Der_Phi_Basis_Fn, Der_Phi_Phi_Basis_Fn, Eval_SPH_Basis +from .charts_SPB import Chart_Min_Polar, Chart_Max_Polar, Coor_B_To_A import pyshtools @@ -40,7 +36,7 @@ def Inv_Mass_Mat_on_Pullback(SPH_Deg): # Creates a SPH representation of Y^m_n def Create_Basis_Fn(m, n, basis_degree): - Coef_Mat = zeros((basis_degree + 1, basis_degree + 1)) + Coef_Mat = np.zeros((basis_degree + 1, basis_degree + 1)) if m >= 0: Coef_Mat[n - m][n] = 1.0 @@ -53,7 +49,7 @@ def Create_Basis_Fn(m, n, basis_degree): # Approximate func(theta, phi) in FE space def Proj_Func(func, SPH_Deg, lbdv): - Proj_Coef = zeros([SPH_Deg + 1, SPH_Deg + 1]) + Proj_Coef = np.zeros([SPH_Deg + 1, SPH_Deg + 1]) theta_quad_pts = lbdv.Lbdv_Sph_Pts_Quad[:, 0] phi_quad_pts = lbdv.Lbdv_Sph_Pts_Quad[:, 1] @@ -70,7 +66,7 @@ def Proj_Func(func, SPH_Deg, lbdv): for m in range(-1 * n, n + 1): Proj_Coef_mn = sum( - multiply(lbdv.Eval_SPH_Basis_Wt_M_N(m, n), func_vals_at_quad_pts) + np.multiply(lbdv.Eval_SPH_Basis_Wt_M_N(m, n), func_vals_at_quad_pts) ) / Mass_Mat_Exact(m, n) if m > 0: @@ -89,9 +85,6 @@ def Proj_Into_SPH_Charts(func, Coef_Deg, lbdv): # Coef_A = Proj_Func(lambda theta, phi: eta_A(func, theta, phi), Coef_Deg) sph_func_A = Proj_Func(func, Coef_Deg, lbdv) # using class structure - # func for chart B - # Coef_B = Proj_Func(lambda theta_bar, phi_bar: eta_B(func, theta_bar, phi_bar), Coef_Deg) - # rotate func to Chart B Coors def func_rot(theta_bar, phi_bar): @@ -100,7 +93,7 @@ def func_rot(theta_bar, phi_bar): Theta_Vals = A_Coors[0] Phi_Vals = A_Coors[1] - func_vals = func(Theta_Vals, Phi_Vals) # zeros(shape(theta_bar)) + func_vals = func(Theta_Vals, Phi_Vals) # np.zeros(shape(theta_bar)) return func_vals @@ -119,7 +112,7 @@ def Proj_Into_SPH_Charts_At_Quad_Pts(func_quad_vals, Proj_Deg, lbdv): # vals for chart B - quad_pt_vals_B = zeros((lbdv.lbdv_quad_pts, 1)) + quad_pt_vals_B = np.zeros((lbdv.lbdv_quad_pts, 1)) # rotate func to Chart B Coors for quad_pt in range(lbdv.lbdv_quad_pts): @@ -155,7 +148,8 @@ def Faster_Double_Proj(func_quad_vals, Proj_Deg, lbdv): I_mn += func_quad_vals[quad_pt] * lbdv.Eval_SPH_Basis_Wt_At_Quad_Pts( m, n, quad_pt ) - # Above fn sums basis vals for proj, times func, times weight at each quad pt + # Above fn sums basis vals for proj, times func, + # times weight at each quad pt Proj_mn = I_mn / Mass_Mat_Exact(m, n) @@ -168,10 +162,11 @@ def Faster_Double_Proj(func_quad_vals, Proj_Deg, lbdv): return spherical_harmonics_function(Proj_Coef, Proj_Deg) -# TAKES VALS AT QUAD PTS, to Approximate func(theta, phi)*Coef_Mat(theta, phi) in SPH Basis +# TAKES VALS AT QUAD PTS, to Approximate +# func(theta, phi)*Coef_Mat(theta, phi) in SPH Basis def Faster_Double_Proj_Product(func1_quad_vals, func2_quad_vals, Proj_Deg, lbdv): - Proj_Product_Coef = zeros( + Proj_Product_Coef = np.zeros( [Proj_Deg + 1, Proj_Deg + 1] ) # Size of basis used to represent derivative @@ -190,7 +185,8 @@ def Faster_Double_Proj_Product(func1_quad_vals, func2_quad_vals, Proj_Deg, lbdv) * func2_quad_vals[quad_pt] * lbdv.Eval_SPH_Basis_Wt_At_Quad_Pts(m, n, quad_pt) ) - # Above fn sums basis vals for proj, times func*Coef_Mat, times weight at each quad pt + # Above fn sums basis vals for proj, times func*Coef_Mat, + # times weight at each quad pt Proj_Product_mn = I_mn / Mass_Mat_Exact(m, n) @@ -216,14 +212,6 @@ def Lp_Rel_Error_At_Quad(approx_f_vals, f_vals, lbdv, p): # Assumes f NOT 0 Lp_Err_Pt = abs((approx_f_vals[quad_pt] - f_vals[quad_pt]) ** p) * weight_pt Lp_f_Pt = abs(f_vals[quad_pt] ** p) * weight_pt - """ - if(Lp_Err_Pt < 0): - print("Lp_Err_Pt = "+str(Lp_Err_Pt)+" at pt = "+str(quad_pt)) - print("(approx_f_vals[quad_pt] - f_vals[quad_pt])**p = "+str( (approx_f_vals[quad_pt] - f_vals[quad_pt])**p )) - print("abs((approx_f_vals[quad_pt] - f_vals[quad_pt])**p) = "+str( abs((approx_f_vals[quad_pt] - f_vals[quad_pt])**p) )) - print("weight_pt = "+str(weight_pt)) - print("\n") - """ Lp_Err += Lp_Err_Pt Lp_f += Lp_f_Pt @@ -262,7 +250,7 @@ def Const_SPH_Mode_Of_Func(func, lbdv): func_vals_at_quad_pts = func(theta_quad_pts, phi_quad_pts) Proj_Coef_Const_Mode = sum( - multiply(lbdv.Eval_SPH_Basis_Wt_M_N(0, 0), func_vals_at_quad_pts) + np.multiply(lbdv.Eval_SPH_Basis_Wt_M_N(0, 0), func_vals_at_quad_pts) ) / Mass_Mat_Exact(0, 0) return Proj_Coef_Const_Mode @@ -272,11 +260,11 @@ def Const_SPH_Mode_Of_Func(func, lbdv): def Avg_of_SPH_Proj_of_Func(func_vals_at_quad_pts, lbdv): Proj_Coef_Const_Mode = sum( - multiply(lbdv.Eval_SPH_Basis_Wt_M_N(0, 0), func_vals_at_quad_pts.flatten()) + np.multiply(lbdv.Eval_SPH_Basis_Wt_M_N(0, 0), func_vals_at_quad_pts.flatten()) ) / Mass_Mat_Exact(0, 0) Avg_of_SPH_Proj = ( Proj_Coef_Const_Mode * 1.0 / (2 * np.sqrt(np.pi)) - ) # multiply by height of Y^0_0 + ) # np.multiply by height of Y^0_0 return Avg_of_SPH_Proj @@ -393,7 +381,7 @@ def S2_Integral(f_quad_vals, lbdv): return S2_Int -############################################################################################################################################################# +####################################################################################### class spherical_harmonics_function( @@ -462,37 +450,43 @@ def Eval_SPH_Der_Phi_Phi(self, Theta, Phi): return Sec_Der_SPH_Val - # Should Evalaute Phi Der of Matrix at Quad_Pt (times weight), to be used in quadrature, + # Should Evalaute Phi Der of Matrix at Quad_Pt (times weight), + # to be used in quadrature, def Eval_SPH_Der_Phi_Coef(self, Quad_Pt, lbdv): # For Scalar Case, we use usual vectorization: - if isscalar(Quad_Pt): + if np.isscalar(Quad_Pt): return sum( - multiply(self.sph_coef, lbdv.Eval_SPH_Der_Phi_At_Quad_Pt_Mat(Quad_Pt)) + np.multiply( + self.sph_coef, lbdv.Eval_SPH_Der_Phi_At_Quad_Pt_Mat(Quad_Pt) + ) ) - # For multiple quad pts, we use einstein sumation to output vector of solutions at each point: + # For multiple quad pts, we use einstein sumation to + # output vector of solutions at each point: else: - return einsum( + return np.einsum( "ij, ijk -> k", self.sph_coef, lbdv.Eval_SPH_Der_Phi_At_Quad_Pt_Mat(Quad_Pt), ).reshape((len(Quad_Pt), 1)) - # Should Evalaute 2nd Phi Der of Matrix at Quad_Pt (times weight), to be used in quadrature, + # Should Evalaute 2nd Phi Der of Matrix at Quad_Pt (times weight), + # to be used in quadrature, def Eval_SPH_Der_Phi_Phi_Coef(self, Quad_Pt, lbdv): # For Scalar Case, we use usual vectorization: - if isscalar(Quad_Pt): + if np.isscalar(Quad_Pt): return sum( - multiply( + np.multiply( self.sph_coef, lbdv.Eval_SPH_Der_Phi_Phi_At_Quad_Pt_Mat(Quad_Pt) ) ) - # For multiple quad pts, we use einstein sumation to output vector of solutions at each point: + # For multiple quad pts, we use einstein sumation to + # output vector of solutions at each point: else: - return einsum( + return np.einsum( "ij, ijk -> k", self.sph_coef, lbdv.Eval_SPH_Der_Phi_Phi_At_Quad_Pt_Mat(Quad_Pt), @@ -502,21 +496,24 @@ def Eval_SPH_Der_Phi_Phi_Coef(self, Quad_Pt, lbdv): def Eval_SPH_Coef_Mat(self, Quad_Pt, lbdv): # For Scalar Case, we use usual vectorization: - if isscalar(Quad_Pt): + if np.isscalar(Quad_Pt): - return sum(multiply(self.sph_coef, lbdv.Eval_SPH_At_Quad_Pt_Mat(Quad_Pt))) + return sum( + np.multiply(self.sph_coef, lbdv.Eval_SPH_At_Quad_Pt_Mat(Quad_Pt)) + ) - # For multiple quad pts, we use einstein sumation to output vector of solutions at each point: + # For multiple quad pts, we use einstein sumation to + # output vector of solutions at each point: else: - return einsum( + return np.einsum( "ij, ijk -> k", self.sph_coef, lbdv.Eval_SPH_At_Quad_Pt_Mat(Quad_Pt) ).reshape((len(Quad_Pt), 1)) # Use the fact that Theta Der Formula is EXACT for our basis def Quick_Theta_Der(self): - Der_Coef = zeros(shape(self.sph_coef)) + Der_Coef = np.zeros(np.shape(self.sph_coef)) for n_coef in range(self.sph_deg + 1): for m_coef in range(1, n_coef + 1): # Theta Der of Y^0_n = 0 @@ -539,7 +536,7 @@ def Quick_Theta_Bar_Der(self): # For rotated coordinate frame # Approximate func(theta, phi)*Coef_Mat(theta, phi) in SPH Basis def Fast_Proj_Product(self, func, Proj_Deg, lbdv): - Proj_Product_Coef = zeros( + Proj_Product_Coef = np.zeros( [Proj_Deg + 1, Proj_Deg + 1] ) # Size of basis used to represent derivative @@ -558,7 +555,8 @@ def Fast_Proj_Product(self, func, Proj_Deg, lbdv): * self.Eval_SPH_Coef_Mat(quad_pt, lbdv) * lbdv.Eval_SPH_Basis_Wt_At_Quad_Pts(m, n, quad_pt) ) - # Above fn sums basis vals for proj, times func*Coef_Mat, times weight at each quad pt + # Above fn sums basis vals for proj, times func*Coef_Mat, + # times weight at each quad pt Proj_Product_mn = I_mn / Mass_Mat_Exact(m, n) @@ -570,10 +568,11 @@ def Fast_Proj_Product(self, func, Proj_Deg, lbdv): return spherical_harmonics_function(Proj_Product_Coef, Proj_Deg) - # TAKES VALS AT QUAD PTS, to Approximate func(theta, phi)*Coef_Mat(theta, phi) in SPH Basis + # TAKES VALS AT QUAD PTS, to Approximate + # func(theta, phi)*Coef_Mat(theta, phi) in SPH Basis def Faster_Proj_Product(self, func_quad_vals, Proj_Deg, lbdv): - Proj_Product_Coef = zeros( + Proj_Product_Coef = np.zeros( [Proj_Deg + 1, Proj_Deg + 1] ) # Size of basis used to represent derivative @@ -592,7 +591,8 @@ def Faster_Proj_Product(self, func_quad_vals, Proj_Deg, lbdv): * self.Eval_SPH_Coef_Mat(quad_pt, lbdv) * lbdv.Eval_SPH_Basis_Wt_At_Quad_Pts(m, n, quad_pt) ) - # Above fn sums basis vals for proj, times func*Coef_Mat, times weight at each quad pt + # Above fn sums basis vals for proj, + # times func*Coef_Mat, times weight at each quad pt Proj_Product_mn = I_mn / Mass_Mat_Exact(m, n) @@ -605,9 +605,9 @@ def Faster_Proj_Product(self, func_quad_vals, Proj_Deg, lbdv): return spherical_harmonics_function(Proj_Product_Coef, Proj_Deg) # Approximate Coef_Mat2(theta, phi)*Coef_Mat(theta, phi) in SPH Basis - def Fast_Proj_Product_SPH(self, spherical_harmonics_function2, Proj_Deg, lbdv): + def Fast_Proj_Product_SPH(self, SPH_Func2, Proj_Deg, lbdv): - Proj_Product_Coef = zeros( + Proj_Product_Coef = np.zeros( [Proj_Deg + 1, Proj_Deg + 1] ) # Size of basis used to represent derivative @@ -618,15 +618,16 @@ def Fast_Proj_Product_SPH(self, spherical_harmonics_function2, Proj_Deg, lbdv): I_mn = 0 for quad_pt in range(lbdv.lbdv_quad_pts): - theta_pt = lbdv.Lbdv_Sph_Pts_Quad[quad_pt][0] - phi_pt = lbdv.Lbdv_Sph_Pts_Quad[quad_pt][1] + # theta_pt = lbdv.Lbdv_Sph_Pts_Quad[quad_pt][0] + # phi_pt = lbdv.Lbdv_Sph_Pts_Quad[quad_pt][1] I_mn += ( SPH_Func2.Eval_SPH_Coef_Mat(quad_pt, lbdv) * self.Eval_SPH_Coef_Mat(quad_pt, lbdv) * lbdv.Eval_SPH_Basis_Wt_At_Quad_Pts(m, n, quad_pt) ) - # Above fn sums basis vals for proj, times func*Coef_Mat, times weight at each quad pt + # Above fn sums basis vals for proj, times func*Coef_Mat, + # times weight at each quad pt Proj_Product_mn = I_mn / Mass_Mat_Exact(m, n) @@ -643,15 +644,20 @@ def Inner_Product_SPH(self, Other_SPH): Vec1 = self.sph_coef.flatten() Vec2 = Other_SPH.sph_coef.flatten() - I_Vec = eye((self.sph_deg + 1) ** 2) + I_Vec = np.eye((self.sph_deg + 1) ** 2) return ( - sum(multiply(abs(Vec1), abs(Vec2))) / 2 - + sum(multiply(multiply(abs(Vec1), I_Vec), multiply(abs(Vec2), I_Vec))) / 2 + sum(np.multiply(abs(Vec1), abs(Vec2))) / 2 + + sum( + np.multiply( + np.multiply(abs(Vec1), I_Vec), np.multiply(abs(Vec2), I_Vec) + ) + ) + / 2 ) def L2_Norm_SPH(self): - return sqrt(self.Inner_Product_SPH(self)) + return np.sqrt(self.Inner_Product_SPH(self)) def Lp_Rel_Error_in_Chart(self, f, lbdv, p): # Assumes f NOT 0 @@ -703,7 +709,7 @@ def Flatten_Coef_Mat(self): coef_mat = self.sph_coef sph_degree = self.sph_deg - coef_vec = zeros(((sph_degree + 1) ** 2, 1)) + coef_vec = np.zeros(((sph_degree + 1) ** 2, 1)) row = 0 for n in range(sph_degree + 1): diff --git a/src/napari_stress/_surface.py b/src/napari_stress/_surface.py index 858d042d..97783b3e 100644 --- a/src/napari_stress/_surface.py +++ b/src/napari_stress/_surface.py @@ -1,8 +1,7 @@ # -*- coding: utf-8 -*- import numpy as np -import napari_process_points_and_surfaces as nppas -from napari.types import LabelsData, SurfaceData, PointsData, VectorsData +from napari.types import SurfaceData, PointsData, VectorsData from napari_stress._utils.frame_by_frame import frame_by_frame from napari_tools_menu import register_function diff --git a/src/napari_stress/_tests/test_approximation.py b/src/napari_stress/_tests/test_approximation.py index d6a1c69c..0d6135be 100644 --- a/src/napari_stress/_tests/test_approximation.py +++ b/src/napari_stress/_tests/test_approximation.py @@ -10,6 +10,8 @@ def test_lsq_ellipsoid(): fitted_points = approximation.expand_points_on_ellipse(ellipsoid, pointcloud) + assert fitted_points is not None + def test_lsq_ellipsoid2(): import vedo @@ -71,6 +73,8 @@ def test_ellipse_normals(): normals = approximation.normals_on_ellipsoid(pointcloud) + assert normals is not None + def test_curvature_on_ellipsoid(make_napari_viewer): from napari_stress import ( @@ -89,6 +93,10 @@ def test_curvature_on_ellipsoid(make_napari_viewer): ellipsoid_stress, fitted_points_stress ) + assert data is not None + assert features is not None + assert metadata is not None + viewer = make_napari_viewer() viewer.add_points(fitted_points_stress) viewer.add_vectors(ellipsoid_stress) diff --git a/src/napari_stress/_tests/test_measurements.py b/src/napari_stress/_tests/test_measurements.py index 05bb78c5..a2f19739 100644 --- a/src/napari_stress/_tests/test_measurements.py +++ b/src/napari_stress/_tests/test_measurements.py @@ -360,7 +360,7 @@ def test_comprehenive_stress_toolbox(make_napari_viewer): def test_comprehensive_stress_toolbox_4d(make_napari_viewer): - from napari_stress import get_droplet_point_cloud_4d, measurements + from napari_stress import get_droplet_point_cloud_4d import napari_stress import os @@ -419,7 +419,6 @@ def test_curvature(make_napari_viewer): fit_spherical_harmonics, types, ) - from magicgui import magicgui from napari.layers import Layer # We'll first create a spherical harmonics expansion and a lebedev @@ -433,8 +432,8 @@ def test_curvature(make_napari_viewer): viewer.add_points(expansion[0], **expansion[1]) lebedev_points = perform_lebedev_quadrature(viewer.layers[-1], viewer=viewer) - l = Layer.create(lebedev_points[0], lebedev_points[1], lebedev_points[2]) - viewer.add_layer(l) + lay = Layer.create(lebedev_points[0], lebedev_points[1], lebedev_points[2]) + viewer.add_layer(lay) results_layer = viewer.layers[-1] assert types._METADATAKEY_MANIFOLD in results_layer.metadata @@ -443,10 +442,19 @@ def test_curvature(make_napari_viewer): H, H0_arithmetic, H0_surface = measurements.calculate_mean_curvature_on_manifold( results_layer.metadata[types._METADATAKEY_MANIFOLD] ) + + assert H is not None + assert H0_arithmetic is not None + assert H0_surface is not None + H, H0_arithmetic, H0_surface = measurements.calculate_mean_curvature_on_manifold( results_layer ) + assert H is not None + assert H0_arithmetic is not None + assert H0_surface is not None + assert types._METADATAKEY_H0_ARITHMETIC in results_layer.metadata assert types._METADATAKEY_H0_SURFACE_INTEGRAL in results_layer.metadata assert types._METADATAKEY_MEAN_CURVATURE in results_layer.features @@ -460,10 +468,16 @@ def test_curvature(make_napari_viewer): assert types._METADATAKEY_GAUSS_BONNET_ABS in results_layer.metadata.keys() assert types._METADATAKEY_GAUSS_BONNET_REL in results_layer.metadata.keys() + assert absolute is not None + assert relative is not None + results = measurements.average_mean_curvatures_on_manifold( results_layer.metadata[types._METADATAKEY_MANIFOLD] ) + assert results is not None + results = measurements.average_mean_curvatures_on_manifold(results_layer) + assert results is not None assert types._METADATAKEY_H0_ARITHMETIC in results_layer.metadata assert types._METADATAKEY_H0_SURFACE_INTEGRAL in results_layer.metadata @@ -489,7 +503,6 @@ def test_curvature2(make_napari_viewer): from napari_stress._spherical_harmonics.spherical_harmonics_napari import ( expansion_types, ) - from magicgui import magicgui from napari.layers import Layer # We'll first create a spherical harmonics expansion and a lebedev @@ -506,8 +519,8 @@ def test_curvature2(make_napari_viewer): viewer.add_points(expansion[0], **expansion[1]) lebedev_points = perform_lebedev_quadrature(viewer.layers[-1], viewer=viewer) - l = Layer.create(lebedev_points[0], lebedev_points[1], lebedev_points[2]) - viewer.add_layer(l) + lay = Layer.create(lebedev_points[0], lebedev_points[1], lebedev_points[2]) + viewer.add_layer(lay) results_layer = viewer.layers[-1] assert types._METADATAKEY_MANIFOLD in results_layer.metadata @@ -515,14 +528,22 @@ def test_curvature2(make_napari_viewer): H, H0_arithmetic, H0_surface = measurements.calculate_mean_curvature_on_manifold( results_layer.metadata[types._METADATAKEY_MANIFOLD] ) + assert H is not None + assert H0_arithmetic is not None + assert H0_surface is not None H, H0_arithmetic, H0_surface = measurements.calculate_mean_curvature_on_manifold( results_layer ) + assert H is not None + assert H0_arithmetic is not None + assert H0_surface is not None results = measurements.average_mean_curvatures_on_manifold( results_layer.metadata[types._METADATAKEY_MANIFOLD] ) + assert results is not None results = measurements.average_mean_curvatures_on_manifold(results_layer) + assert results is not None assert types._METADATAKEY_H0_ARITHMETIC in results_layer.metadata assert types._METADATAKEY_H0_SURFACE_INTEGRAL in results_layer.metadata @@ -548,6 +569,8 @@ def test_curvature3(): ) curvature = measurements.curvature_on_ellipsoid(ellipsoid, approximated_pointcloud) + assert curvature is not None + def test_stresses(): from napari_stress import lebedev_quadrature @@ -556,7 +579,6 @@ def test_stresses(): approximation, get_droplet_point_cloud, create_manifold, - types, ) from napari_stress._spherical_harmonics.spherical_harmonics import ( stress_spherical_harmonics_expansion, @@ -582,6 +604,7 @@ def test_stresses(): curvature_ellipsoid = measurements.curvature_on_ellipsoid( ellipsoid, ellipsoid_points ) + assert curvature_ellipsoid is not None _, ellipsoid_coefficients = stress_spherical_harmonics_expansion( ellipsoid_points, max_degree=max_degree @@ -603,15 +626,16 @@ def test_stresses(): ellipsoid, H0_ellipsoid, gamma=gamma ) + assert ellptical is not None + assert cartesian is not None + stress, stress_tissue, stress_cell = measurements.anisotropic_stress( H_i, H0, H_i_ellipsoid, H0_ellipsoid, gamma ) + assert stress is not None + assert stress_tissue is not None + assert stress_cell is not None + measurements.anisotropic_stress(H_i, H0, H_i_ellipsoid, H0_ellipsoid, gamma) measurements.maximal_tissue_anisotropy(ellipsoid) - - -if __name__ == "__main__": - import napari - - test_comprehensive_stress_toolbox_4d(napari.Viewer) diff --git a/src/napari_stress/_tests/test_sample_data.py b/src/napari_stress/_tests/test_sample_data.py index 29674ff2..7c8ed2d6 100644 --- a/src/napari_stress/_tests/test_sample_data.py +++ b/src/napari_stress/_tests/test_sample_data.py @@ -3,8 +3,14 @@ def test_dropplet_point_cloud(): from napari_stress import sample_data + import numpy as np data = sample_data.get_droplet_point_cloud()[0] data_4d = sample_data.get_droplet_point_cloud_4d()[0] image_data_4d = sample_data.get_droplet_4d()[0] ellipsoid = sample_data.make_blurry_ellipsoid()[0] + + assert np.array_equal(data[0].shape, (1024, 4)) + assert data_4d[0].shape[0] == 26704 + assert len(image_data_4d[0]) == 21 + assert np.array_equal(ellipsoid.shape, (64, 64, 64)) diff --git a/src/napari_stress/_tests/test_spherical_harmonic_fit.py b/src/napari_stress/_tests/test_spherical_harmonic_fit.py index a00048a0..badce9dd 100644 --- a/src/napari_stress/_tests/test_spherical_harmonic_fit.py +++ b/src/napari_stress/_tests/test_spherical_harmonic_fit.py @@ -36,6 +36,7 @@ def test_frontend_spherical_harmonics(make_napari_viewer): lebedev_points = perform_lebedev_quadrature(points_layer, viewer=viewer) results_layer = viewer.layers[-1] assert "manifold" in lebedev_points[1]["metadata"] + assert "spherical_harmonics_coefficients" in results_layer.metadata def test_front_spherical_harmonics_4d(make_napari_viewer): @@ -94,7 +95,6 @@ def test_interoperatibility(): from napari_stress._stress.sph_func_SPB import ( convert_coeffcients_stress_to_pyshtools, convert_coefficients_pyshtools_to_stress, - spherical_harmonics_function, ) deg = 10 @@ -118,30 +118,6 @@ def test_interoperatibility(): _coeffs_stress = np.stack([coeffs_str_x, coeffs_str_y, coeffs_str_z]) assert all((_coeffs_stress - coeffs_stress).flatten() == 0) - # # Let's check whether these two give the same values when evaluated at the - # # same coordinates - - # theta = np.linspace(0, 2*np.pi, 50) - # phi = np.linspace(0, 2*np.pi, 100) - # THETA, PHI = np.meshgrid(theta, phi) - # # Stress: - # SH_stress_x = spherical_harmonics_function(_coeffs_stress[0], SPH_Deg=deg) - # stress_x = SH_stress_x.Eval_SPH(THETA, PHI) - - # SH_stress_y = spherical_harmonics_function(_coeffs_stress[1], SPH_Deg=deg) - # stress_y = SH_stress_y.Eval_SPH(THETA, PHI) - - # SH_stress_z = spherical_harmonics_function(_coeffs_stress[2], SPH_Deg=deg) - # stress_z = SH_stress_z.Eval_SPH(THETA, PHI) - - # stress_pts = np.stack([stress_z.flatten(), stress_y.flatten(), stress_x.flatten()]).transpose() - - # # Pyshtools: - # pysh_x = coeffs_pysh_x.expand(lon = list(np.rad2deg(THETA.flatten())), lat=list(np.rad2deg(PHI.flatten()))) - # pysh_y = coeffs_pysh_y.expand(lon = list(np.rad2deg(THETA.flatten())), lat=list(np.rad2deg(PHI.flatten()))) - # pysh_z = coeffs_pysh_z.expand(lon = list(np.rad2deg(THETA.flatten())), lat=list(np.rad2deg(PHI.flatten()))) - # pysh_pts = np.stack([pysh_x, pysh_y, pysh_z]).transpose() - def test_lebedev_points(): @@ -183,3 +159,5 @@ def test_lebedev_points(): ]: print(" %d : [" % i) lf = LebFunc[i]() + + assert lf is not None diff --git a/src/napari_stress/_tests/test_surface.py b/src/napari_stress/_tests/test_surface.py index 516228d0..244341ed 100644 --- a/src/napari_stress/_tests/test_surface.py +++ b/src/napari_stress/_tests/test_surface.py @@ -10,6 +10,8 @@ def test_reconstruction(): surface = napari_stress.reconstruct_surface(points) + assert isinstance(surface, tuple) + def test_surface_to_points(): ellipse = vedo.shapes.Ellipsoid() @@ -17,19 +19,29 @@ def test_surface_to_points(): surface = (ellipse.points(), np.asarray(ellipse.faces())) points = napari_stress.extract_vertex_points(surface) + assert isinstance(points, np.ndarray) + def test_ellipsoid_points(): pointcloud = np.random.normal(size=(1000, 3)) * 10 * np.array([1, 2, 3])[None, :] ellipse_points = napari_stress.fit_ellipsoid_to_pointcloud_points( pointcloud, inside_fraction=0.5 ) + + assert np.array_equal(ellipse_points.shape, (1058, 3)) + axis = napari_stress.fit_ellipsoid_to_pointcloud_vectors( pointcloud, inside_fraction=0.5 ) + + assert np.array_equal(axis.shape, (3, 2, 3)) + axis = napari_stress.fit_ellipsoid_to_pointcloud_vectors( pointcloud, inside_fraction=0.5, normalize=True ) + assert np.array_equal(axis.shape, (3, 2, 3)) + pointcloud = vedo.shapes.Ellipsoid().points() * 10 ellipse_points = napari_stress.fit_ellipsoid_to_pointcloud_points( pointcloud, inside_fraction=0.5 @@ -42,11 +54,4 @@ def test_ellipsoid_points(): ) vectors_4d = napari_stress.fit_ellipsoid_to_pointcloud_vectors(pointcloud_4d) - # directions_4d = np.zeros_like(pointcloud_4d) - # directions_4d[:, 0] = pointcloud_4d[:, 0] - # directions_4d[directions_4d[:,0] == 0, 1:] = pointcloud - pointcloud.mean(axis=0)[None, :] - # directions_4d[directions_4d[:,0] == 1, 1:] = pointcloud - (pointcloud + 1).mean(axis=0)[None, :] - - -if __name__ == "__main__": - test_ellipsoid_points() + assert np.array_equal(vectors_4d.shape, (6, 2, 4)) diff --git a/src/napari_stress/_tests/test_types.py b/src/napari_stress/_tests/test_types.py index 3c5df749..18d0d292 100644 --- a/src/napari_stress/_tests/test_types.py +++ b/src/napari_stress/_tests/test_types.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- from napari import layers -import numpy as np import magicgui @@ -37,17 +36,12 @@ def test_function(argument: manifold) -> manifold: viewer.add_points(expansion[0], **expansion[1]) lebedev_points = perform_lebedev_quadrature(viewer.layers[-1], viewer=viewer) - l = layers.Layer.create(lebedev_points[0], lebedev_points[1], lebedev_points[2]) - viewer.add_layer(l) + lay = layers.Layer.create(lebedev_points[0], lebedev_points[1], lebedev_points[2]) + viewer.add_layer(lay) widget = magicgui.magicgui(test_function) viewer.window.add_dock_widget(widget) results = widget(viewer.layers[-1]) assert _METADATAKEY_MEAN_CURVATURE in viewer.layers[-1].features.keys() - - -if __name__ == "__main__": - import napari - - test_custom_types(napari.Viewer) + assert isinstance(results, tuple) diff --git a/src/napari_stress/_tests/test_utils.py b/src/napari_stress/_tests/test_utils.py index c5f68cf7..f53b278d 100644 --- a/src/napari_stress/_tests/test_utils.py +++ b/src/napari_stress/_tests/test_utils.py @@ -1,9 +1,8 @@ import numpy as np from napari.types import ( - LayerData, PointsData, - SurfaceData, ImageData, + SurfaceData, VectorsData, LayerDataTuple, ) @@ -45,8 +44,10 @@ def test_decorator_points(): points_array_3d = Converter.list_of_data_to_data([points_list[0]], PointsData) points_list_conv = Converter.data_to_list_of_data(points_array_4d, PointsData) + assert np.array_equal(points_array_3d, points_list[0]) + for pts, _pts in zip(points_list, points_list_conv): - assert np.array_equal(points_list, points_list_conv) + assert np.array_equal(pts, _pts) def test_decorator_points_layerdatatuple(): @@ -77,6 +78,7 @@ def test_decorator_points_layerdatatuple(): [list_of_ldtuples[0]], layertype=PointsData ) + assert np.array_equal(ldtuple_3d[0][0], list_of_ldtuples[0][0]) assert "data1" in ldtuple_4d[1]["metadata"].keys() assert ldtuple_4d[0][-1, 0] == 9 @@ -110,11 +112,12 @@ def test_decorator_points_layers(): layer_4d = Layer.create(data=ldt[0], meta=ldt[1], layer_type=ldt[2]) list_of_layers = Converter.data_to_list_of_data(layer_4d, layertype=Points) + assert len(list_of_layers) == n_frames + def test_decorator_surfaces(): from napari_stress import TimelapseConverter, frame_by_frame from napari_process_points_and_surfaces import sample_points_from_surface - from napari.types import SurfaceData from vedo import Sphere Converter = TimelapseConverter() @@ -124,9 +127,11 @@ def test_decorator_surfaces(): (Sphere().points() * k, np.asarray(Sphere().faces()), k * np.ones(Sphere().N())) for k in np.arange(1.9, 2.1, 0.1) ] + n_frames = len(surface_list) surface_array_4d = Converter.list_of_data_to_data(surface_list, SurfaceData) surface_array_3d = Converter.list_of_data_to_data([surface_list[0]], SurfaceData) + assert np.array_equal(surface_array_3d[0], surface_list[0][0]) assert len(surface_array_4d) == 3 surface_list_conv = Converter.data_to_list_of_data(surface_array_4d, SurfaceData) @@ -135,8 +140,11 @@ def test_decorator_surfaces(): assert np.array_equal(surf[0], _surf[0]) assert np.array_equal(surf[1], _surf[1]) - points = frame_by_frame(sample_points_from_surface)(surface_array_4d) - points = frame_by_frame(sample_points_from_surface)(surface_list[0]) + points_4d = frame_by_frame(sample_points_from_surface)(surface_array_4d) + points_3d = frame_by_frame(sample_points_from_surface)(surface_list[0]) + + assert np.array_equal(points_3d, points_4d[points_4d[:, 0] == 0][:, 1:]) + assert np.array_equal(points_4d.shape, (1010 * n_frames, 4)) def test_decorator_images(): @@ -151,6 +159,7 @@ def test_decorator_images(): image_array_4d = Converter.list_of_data_to_data(image_list, ImageData) image_array_3d = Converter.list_of_data_to_data([image_list[0]], ImageData) + assert np.array_equal(image_array_3d.squeeze(), image_list[0]) assert image_array_4d.shape[0] == len(image_list) image_list_conv = Converter.list_of_data_to_data(image_array_4d, ImageData) @@ -177,8 +186,9 @@ def test_frame_by_frame_vectors(): vectors_data_4d = Converter.list_of_data_to_data(vectors_list, VectorsData) vectors_data_3d = Converter.list_of_data_to_data([vectors_list[0]], VectorsData) + assert np.array_equal(vectors_data_3d, vectors_list[0]) assert np.array_equal(vectors_data_4d, vectors_4d) if __name__ == "__main__": - test_decorator_points() + test_decorator_surfaces() diff --git a/src/napari_stress/_utils/__init__.py b/src/napari_stress/_utils/__init__.py index 82ab3ec8..8439aad1 100644 --- a/src/napari_stress/_utils/__init__.py +++ b/src/napari_stress/_utils/__init__.py @@ -3,3 +3,11 @@ from .import_export_settings import import_settings, export_settings from ._aggregate_measurements import compile_data_from_layers from .frame_by_frame import TimelapseConverter, frame_by_frame + +__all__ = [ + "import_settings", + "export_settings", + "compile_data_from_layers", + "TimelapseConverter", + "frame_by_frame", +] diff --git a/src/napari_stress/_utils/_aggregate_measurements.py b/src/napari_stress/_utils/_aggregate_measurements.py index 7c235a63..da73884d 100644 --- a/src/napari_stress/_utils/_aggregate_measurements.py +++ b/src/napari_stress/_utils/_aggregate_measurements.py @@ -156,7 +156,6 @@ def aggregate_singular_values( from ..types import ( _METADATAKEY_STRESS_TOTAL, _METADATAKEY_STRESS_TISSUE, - _METADATAKEY_STRESS_CELL, _METADATAKEY_AUTOCORR_TEMPORAL_TOTAL, _METADATAKEY_AUTOCORR_TEMPORAL_CELL, _METADATAKEY_AUTOCORR_TEMPORAL_TISSUE, @@ -306,7 +305,7 @@ def aggregate_extrema_results( # Find layer with ALL PAIR EXTREMA data for layer in results_stress_analysis: - if not "metadata" in layer[1].keys(): + if "metadata" not in layer[1].keys(): continue if _METADATAKEY_STRESS_CELL_ALL_PAIR_ANISO in layer[1]["metadata"].keys(): break diff --git a/src/napari_stress/_utils/coordinate_conversion.py b/src/napari_stress/_utils/coordinate_conversion.py index 6d6b051d..9ea5310e 100644 --- a/src/napari_stress/_utils/coordinate_conversion.py +++ b/src/napari_stress/_utils/coordinate_conversion.py @@ -1,11 +1,6 @@ # -*- coding: utf-8 -*- import numpy as np from napari.types import PointsData, VectorsData -from typing import Tuple - -from scipy.spatial.transform import Rotation - -import vedo def _center_from_ellipsoid(ellipsoid: VectorsData) -> np.ndarray: @@ -74,7 +69,7 @@ def polynomial_to_parameters3D(coefficients: np.ndarray): R = np.dot(Tofs, np.dot(Amat, Tofs.T)) R3 = R[0:3, 0:3] - R3test = R3 / R3[0, 0] + # R3test = R3 / R3[0, 0] # print('normed \n',R3test) s1 = -R[3, 3] R3S = R3 / s1 diff --git a/src/napari_stress/_utils/fit_utils.py b/src/napari_stress/_utils/fit_utils.py index acbc67e4..c1533c1a 100644 --- a/src/napari_stress/_utils/fit_utils.py +++ b/src/napari_stress/_utils/fit_utils.py @@ -1,6 +1,5 @@ import numpy as np import inspect -from napari.types import PointsData from .._stress import lebedev_info_SPB as lebedev_info diff --git a/src/napari_stress/_utils/frame_by_frame.py b/src/napari_stress/_utils/frame_by_frame.py index 30064aaf..fc4fd0e2 100644 --- a/src/napari_stress/_utils/frame_by_frame.py +++ b/src/napari_stress/_utils/frame_by_frame.py @@ -128,7 +128,8 @@ def __init__(self): "napari.types.ImageData": self._list_of_images_to_image, LabelsData: self._list_of_images_to_image, "napari.types.LabelsData": self._list_of_images_to_image, - LayerDataTuple: self._list_of_layerdatatuple_to_layerdatatuple, + LayerDataTuple: self._list_of_ldtuple_to_layerdatatuple, + "napari.types.LayerDataTuple": self._list_of_ldtuple_to_layerdatatuple, List[ LayerDataTuple ]: self._list_of_multiple_ldtuples_to_multiple_ldt_tuples, @@ -295,14 +296,12 @@ def _list_of_multiple_ldtuples_to_multiple_ldt_tuples( for idx, res_type in enumerate(layertypes): tuples_to_convert = data[:, idx] converted_tuples.append( - self._list_of_layerdatatuple_to_layerdatatuple(list(tuples_to_convert)) + self._list_of_ldtuple_to_layerdatatuple(list(tuples_to_convert)) ) return converted_tuples - def _list_of_layerdatatuple_to_layerdatatuple( - self, tuple_data: list - ) -> LayerDataTuple: + def _list_of_ldtuple_to_layerdatatuple(self, tuple_data: list) -> LayerDataTuple: """ Convert a list of 3D layerdatatuple objects to a single 4D LayerDataTuple """ diff --git a/src/napari_stress/types.py b/src/napari_stress/types.py index 4498fd58..e2a2768e 100644 --- a/src/napari_stress/types.py +++ b/src/napari_stress/types.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- from magicgui.widgets._bases import CategoricalWidget -from typing import List, NewType +from typing import List from napari import layers from napari.utils._magicgui import find_viewer_ancestor