From b05437956158845d867032ba87a6b7c80446d6e7 Mon Sep 17 00:00:00 2001 From: Mohamed Nasser Date: Thu, 5 Sep 2024 01:54:33 +0300 Subject: [PATCH] name refactoring, add imports - add imports as osipi dro - refactor function name to be snake case - added save function in utils --- .gitignore | 1 + src/osipi/DRO/DICOM_processing.py | 116 --------- src/osipi/DRO/Display.py | 35 --- src/osipi/DRO/Model.py | 226 ------------------ src/osipi/DRO/__init__.py | 18 ++ ...onc2DROSignal.py => _conc_2_dro_signal.py} | 2 - src/osipi/DRO/_dicom_processing.py | 55 +++++ ...ers_and_noise.py => _filters_and_noise.py} | 0 src/osipi/DRO/_model.py | 103 ++++++++ .../{roi_selection.py => _roi_selection.py} | 8 +- src/osipi/DRO/main.py | 213 +++++++---------- 11 files changed, 269 insertions(+), 508 deletions(-) delete mode 100644 src/osipi/DRO/DICOM_processing.py delete mode 100644 src/osipi/DRO/Display.py delete mode 100644 src/osipi/DRO/Model.py rename src/osipi/DRO/{Conc2DROSignal.py => _conc_2_dro_signal.py} (96%) create mode 100644 src/osipi/DRO/_dicom_processing.py rename src/osipi/DRO/{filters_and_noise.py => _filters_and_noise.py} (100%) create mode 100644 src/osipi/DRO/_model.py rename src/osipi/DRO/{roi_selection.py => _roi_selection.py} (92%) diff --git a/.gitignore b/.gitignore index 86b9bcf..b250319 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,4 @@ docs-mk/docs/generated src/osipi/DRO/data src/osipi/DRO/__pycache__/ src/osipi/DRO/ROI_saved/ +output diff --git a/src/osipi/DRO/DICOM_processing.py b/src/osipi/DRO/DICOM_processing.py deleted file mode 100644 index e2781d9..0000000 --- a/src/osipi/DRO/DICOM_processing.py +++ /dev/null @@ -1,116 +0,0 @@ -import os - -import numpy as np -import pydicom - -from osipi.DRO.filters_and_noise import median_filter - - -def read_dicom_slices_as_4d_signal(folder_path): - """ - Read a DICOM series from a folder path. - Returns the signal data as a 4D numpy array (x, y, z, t). - """ - slices = {} - for root, _, files in os.walk(folder_path): - for file in files: - if file.endswith(".dcm"): - dicom_file = os.path.join(root, file) - slice = pydicom.read_file(dicom_file) - if slice.SliceLocation not in slices: - slices[slice.SliceLocation] = [] - slices[slice.SliceLocation].append((slice.AcquisitionTime, slice)) - - # Sort each list of slices by the first element (AcquisitionTime) - for slice_location in slices: - slices[slice_location].sort(key=lambda x: x[0]) - - spatial_shape = slices[slice_location][0][1].pixel_array.shape - - data_shape = (spatial_shape[0], spatial_shape[1], len(slices), len(slices[slice_location])) - - signal = np.zeros(data_shape) - - for z, slice_location in enumerate(sorted(slices.keys())): # Sort by slice location - for t, (_, slice) in enumerate( - sorted(slices[slice_location], key=lambda x: x[0]) - ): # Sort by acquisition time - signal[:, :, z, t] = slice.pixel_array - - return signal, slices, slices[slice_location][0][1] - - -def SignalEnhancementExtract(S, datashape, baselinepoints): - # Take baseline average - S0 = np.average(S[:, :, :, 0:baselinepoints], axis=3) # Take baseline signal - E = np.zeros_like(S) - - # Calcualte siganl enhancement - for i in range(0, datashape[-1]): - E[:, :, :, i] = S[:, :, :, i] - S0 - E[:, :, :, i] = median_filter(E[:, :, :, i]) # Median filter size (3,3) - - return E, S0, S - - -def calculate_baseline(signal, baseline): - """ - Calculate the baseline signal (S0) from pre-contrast time points. - - Parameters: - signal (numpy.ndarray): The 4D signal data (x, y, z, t). - baseline (int): Number of time points before contrast injection. - - Returns: - numpy.ndarray: The baseline signal (S0). - """ - S0 = np.average(signal[:, :, :, :baseline], axis=3, keepdims=True) - return S0 - - -def signal_to_R1(signal, S0, TR): - """ - Convert signal to R1 values using the Ernst equation. - - Parameters: - signal (numpy.ndarray): The 4D signal data (x, y, z, t). - S0 (numpy.ndarray): The baseline signal (S0). - TR (float): Repetition time. - - Returns: - numpy.ndarray: The R1 values. - """ - epsilon = 1e-8 # Small constant to avoid division by zero and log of zero - R1 = -1 / TR * np.log((signal + epsilon) / (S0 + epsilon)) - return R1 - - -def calc_concentration(R1, R10, r1): - """ - Calculate the concentration of the contrast agent in tissue. - - Parameters: - R1 (numpy.ndarray): The R1 values. - R10 (numpy.ndarray): The pre-contrast R1 values. - r1 (float): Relaxivity of the contrast agent. - - Returns: - numpy.ndarray: The concentration of the contrast agent in the tissue. - """ - Ctiss = (R1 - R10) / r1 - return Ctiss - - -def signal_enhancement(signal, S0, R10, r1): - """ - Calculate the signal enhancement. - - Parameters: - signal (numpy.ndarray): The 4D signal data (x, y, z, t). - other parameters same as previous function - - Returns: - numpy.ndarray: The signal enhancement. - """ - E = (R10 / r1) * (signal - S0) / S0 - return E diff --git a/src/osipi/DRO/Display.py b/src/osipi/DRO/Display.py deleted file mode 100644 index be815af..0000000 --- a/src/osipi/DRO/Display.py +++ /dev/null @@ -1,35 +0,0 @@ -from matplotlib import pyplot as plt -from matplotlib.animation import FuncAnimation - - -def animate_mri(slices, mode="time", slice_index=0, time_index=0): - fig, ax = plt.subplots() - if mode == "time": - frames = slices.shape[-1] - - def init(): - ax.imshow(slices[:, :, slice_index, 0], cmap="gray") - ax.set_title(f"Slice: {slice_index}, Time: 0") - - def animate(t): - ax.clear() - ax.imshow(slices[:, :, slice_index, t], cmap="gray") - ax.set_title(f"Slice: {slice_index}, Time: {t}") - - elif mode == "slice": - frames = slices.shape[2] - - def init(): - ax.imshow(slices[:, :, 0, time_index], cmap="gray") - ax.set_title(f"Slice: 0, Time: {time_index}") - - def animate(z): - ax.clear() - ax.imshow(slices[:, :, z, time_index], cmap="gray") - ax.set_title(f"Slice: {z}, Time: {time_index}") - - anim = FuncAnimation( - fig=fig, func=animate, frames=frames, init_func=init, interval=100, blit=False - ) - plt.show() - return anim diff --git a/src/osipi/DRO/Model.py b/src/osipi/DRO/Model.py deleted file mode 100644 index 96cdb8e..0000000 --- a/src/osipi/DRO/Model.py +++ /dev/null @@ -1,226 +0,0 @@ -import multiprocessing as mp - -import numpy as np -from scipy.integrate import cumtrapz, trapz -from scipy.optimize import curve_fit - -import osipi - - -def modifiedToftsMurase(Cp, Ctiss, dt, datashape): - # Fit Modified Tofts (Linear from Murase, 2004) - # Cp = Ea/0.45, Ctis=E/0.45 - # Matrix equation C=AB (same notation as Murase) - # C: matrix of Ctis at distinct time steps - # A: 3 Coumns, rows of tk: - # (1) Integral up to tk of Cp - # (2) - Integral up to tk of Ctiss - # (3) Cp at tk - # B: Array length 3 of parameters: - # (1) K1 + k2 dot Vp - # (2) k2 - # (3) Vp - # Use np.linalg.solve for equations form Zx=y aimed to find x - # np.linalg.solve(Z,y)=x so need to use np.linalg.solve(A,C) - # solve only works for square matrices so use .lstsq for a least squares solve - # Allocate parameter holding arrays - - K1 = np.zeros(Ctiss.shape[:-1]) # only spatial maps - k2 = np.zeros(Ctiss.shape[:-1]) # only spatial maps - Vp = np.zeros(Ctiss.shape[:-1]) # only spatial maps - - # Allocate matrices used from solver as defined above - C = np.zeros(datashape[-1]) - A = np.zeros((datashape[-1], 3)) - - # iterate over slices - for k in range(0, datashape[2]): - # iterate over rows - for j in range(0, datashape[0]): - # iterate over columns - for i in range(0, datashape[1]): - # Build matrices for Modified Tofts for voxel - C = Ctiss[j, i, k, :] - A[:, 0] = cumtrapz(Cp, dx=dt, initial=0) - A[:, 1] = -cumtrapz(Ctiss[j, i, k, :], dx=dt, initial=0) - A[:, 2] = Cp - # Use least squares solver - sing_B1, sing_k2, sing_Vp = np.linalg.lstsq(A, C, rcond=None)[0] - sing_K1 = sing_B1 - (sing_k2 * sing_Vp) - # Assign Ouputs into parameter maps - K1[j, i, k] = sing_K1 - k2[j, i, k] = sing_k2 - Vp[j, i, k] = sing_Vp - - return K1, k2, Vp - - -def modifiedToftsMurase1Vox(Cp, Ctiss, dt, datashape): - K1 = np.zeros(Ctiss.shape[:-1]) # only spatial maps - k2 = np.zeros(Ctiss.shape[:-1]) # only spatial maps - Vp = np.zeros(Ctiss.shape[:-1]) # only spatial maps - - # Allocate matrices used from solver as defined above - C = np.zeros(datashape[-1]) - A = np.zeros((datashape[-1], 3)) - - # Build matrices for Modified Tofts for voxel - C = Ctiss - A[:, 0] = cumtrapz(Cp, dx=dt, initial=0) - A[:, 1] = -cumtrapz(Ctiss, dx=dt, initial=0) - A[:, 2] = Cp - # Use least squares solver - B1, k2, Vp = np.linalg.lstsq(A, C, rcond=None)[0] - K1 = B1 - (k2 * Vp) - - return K1, k2, Vp - - -def fit_single_voxel_extended_tofts(ct, ca, time): - def fit_func_ET(t, kt, ve, vp): - return osipi.extended_tofts(t, ca, kt, ve, vp) - - ini = [0, 0, 0] - popt, pcov = curve_fit(fit_func_ET, time, ct, p0=ini) - return popt - - -def fit_single_voxel_tofts(ct, ca, time): - def fit_func_T(t, kt, ve): - return osipi.tofts(t, ca, kt, ve) - - ini = [0, 0] - popt, pcov = curve_fit(fit_func_T, time, ct, p0=ini) - return popt - - -def process_voxel(j, i, k, c_tiss, ca, t, type="ET"): - ct = c_tiss[j, i, k, :] - if type == "ET": - popt = fit_single_voxel_extended_tofts(ct, ca, t) - elif type == "T": - popt = fit_single_voxel_tofts(ct, ca, t) - return j, i, k, popt - - -def extended_tofts_model(ca, c_tiss, t): - ktrans = np.zeros(c_tiss.shape[:-1]) - ve = np.zeros(c_tiss.shape[:-1]) - vp = np.zeros(c_tiss.shape[:-1]) - - tasks = [ - (j, i, k, c_tiss, ca, t) - for k in range(c_tiss.shape[2]) - for j in range(c_tiss.shape[0]) - for i in range(c_tiss.shape[1]) - ] - - with mp.Pool(processes=mp.cpu_count()) as pool: - results = pool.starmap(process_voxel, tasks) - - for j, i, k, popt in results: - ktrans[j, i, k], ve[j, i, k], vp[j, i, k] = popt - - return ktrans, ve, vp - - -def extended_tofts_model_1vox(ca, c_tiss, t): - """ - Extended Tofts model for DCE-MRI DRO - ca -- arterial input function - c_tiss -- 1D array of tissue concentration data (time) - dt -- time interval between samples - """ - - ct = c_tiss[:] - popt = fit_single_voxel_extended_tofts(ct, ca, t) - - return popt - - -def forward_extended_tofts(K1, Ve, Vp, Ca, time): - x, y, z = K1.shape - t = Ca.shape[0] - c_tiss = np.zeros((y, x, z, t)) - - for k in range(0, K1.shape[2]): - for j in range(0, K1.shape[0]): - for i in range(0, K1.shape[1]): - c_tiss[i, j, k, :] = osipi.extended_tofts( - time, Ca, K1[i, j, k], Ve[i, j, k], Vp[i, j, k] - ) - - return c_tiss - - -def tofts_model(ca, c_tiss, t): - ktrans = np.zeros(c_tiss.shape[:-1]) - ve = np.zeros(c_tiss.shape[:-1]) - - tasks = [ - (j, i, k, c_tiss, ca, t, "T") - for k in range(c_tiss.shape[2]) - for j in range(c_tiss.shape[0]) - for i in range(c_tiss.shape[1]) - ] - - with mp.Pool(processes=mp.cpu_count()) as pool: - results = pool.starmap(process_voxel, tasks) - - for j, i, k, popt in results: - ktrans[j, i, k], ve[j, i, k] = popt - - return ktrans, ve - - -def ForwardsModTofts(K1, k2, Vp, Cp, dt): - # To be carried out as matmul C=BA - # Where C is the output Ctiss and B the parameters - # With A a matrix of cumulative integrals - - x, y, z = K1.shape - t = Cp.shape[0] - - Ctiss = np.zeros((y, x, z, t)) - - b1 = K1 + np.multiply(k2, Vp) # define combined parameter - B = np.zeros((x, y, z, 1, 3)) - A = np.zeros((x, y, z, 3, 1)) - - B[:, :, :, 0, 0] = b1 - B[:, :, :, 0, 1] = -k2 - B[:, :, :, 0, 2] = Vp - - for tk in range(1, t): - A[:, :, :, 0, 0] = trapz(Cp[0 : tk + 1], dx=dt) - A[:, :, :, 1, 0] = trapz(Ctiss[:, :, :, 0 : tk + 1], dx=dt) - A[:, :, :, 2, 0] = Cp[tk] - - Ctiss[:, :, :, tk] = np.matmul(B, A).squeeze() - return Ctiss - - -def ForwardsModTofts_1vox(K1, k2, Vp, Cp, dt): - # To be carried out as matmul C=BA - # Where C is the output Ctiss and B the parameters - # With A a matrix of cumulative integrals - - t = Cp.shape[0] - - Ctiss = np.zeros(t) - - b1 = K1 + k2 * Vp # define combined parameter - B = np.zeros((1, 3)) - A = np.zeros((3, 1)) - - B[0][0] = b1 - B[0][1] = -k2 - B[0][2] = Vp - - for tk in range(1, t): - A[0][0] = trapz(Cp[0 : tk + 1], dx=dt) - A[1][0] = trapz(Ctiss[0 : tk + 1], dx=dt) - A[2][0] = Cp[tk] - - Ctiss[tk] = np.matmul(B, A).squeeze() - return Ctiss diff --git a/src/osipi/DRO/__init__.py b/src/osipi/DRO/__init__.py index e69de29..59bfd88 100644 --- a/src/osipi/DRO/__init__.py +++ b/src/osipi/DRO/__init__.py @@ -0,0 +1,18 @@ +from ._model import (extended_tofts_model, + tofts_model, + extended_tofts_model_1vox, + forward_extended_tofts) + +from _dicom_processing import (read_dicom_slices_as_4d_signal, + signal_enhancement_extract) + +from _roi_selection import (ic_from_roi, roi) + +from _utils import (animate_mri, save_dicoms) +from _filters_and_noise import median_filter + +from _conc_2_dro_signal import (Conc2Sig_Linear, + calcR1_R2, + STDmap, + createR10_withref, + addnoise) diff --git a/src/osipi/DRO/Conc2DROSignal.py b/src/osipi/DRO/_conc_2_dro_signal.py similarity index 96% rename from src/osipi/DRO/Conc2DROSignal.py rename to src/osipi/DRO/_conc_2_dro_signal.py index ad17259..5a9cdd1 100644 --- a/src/osipi/DRO/Conc2DROSignal.py +++ b/src/osipi/DRO/_conc_2_dro_signal.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- """ @author: eveshalom """ diff --git a/src/osipi/DRO/_dicom_processing.py b/src/osipi/DRO/_dicom_processing.py new file mode 100644 index 0000000..8ba2b10 --- /dev/null +++ b/src/osipi/DRO/_dicom_processing.py @@ -0,0 +1,55 @@ +import os + +import numpy as np +import pydicom + +from osipi.DRO._filters_and_noise import median_filter + + +def read_dicom_slices_as_4d_signal(folder_path): + """ + Read a DICOM series from a folder path. + Returns the signal data as a 4D numpy array (x, y, z, t). + """ + slices = {} + for root, _, files in os.walk(folder_path): + for file in files: + if file.endswith(".dcm"): + dicom_file = os.path.join(root, file) + slice = pydicom.read_file(dicom_file) + if slice.SliceLocation not in slices: + slices[slice.SliceLocation] = [] + slices[slice.SliceLocation].append((slice.AcquisitionTime, slice)) + + # Sort each list of slices by the first element (AcquisitionTime) + for slice_location in slices: + slices[slice_location].sort(key=lambda x: x[0]) + + spatial_shape = slices[slice_location][0][1].pixel_array.shape + + data_shape = (spatial_shape[0], spatial_shape[1], len(slices), len(slices[slice_location])) + + signal = np.zeros(data_shape) + + for z, slice_location in enumerate(sorted(slices.keys())): # Sort by slice location + slices[z] = slices.pop(slice_location) + for t, (_, slice) in enumerate( + sorted(slices[z], key=lambda x: x[0]) + ): # Sort by acquisition time + signal[:, :, z, t] = slice.pixel_array + slices[z][t] = slice + + return signal, slices, slices[0][0] + + +def signal_enhancement_extract(S, datashape, baselinepoints): + # Take baseline average + S0 = np.average(S[:, :, :, 0:baselinepoints], axis=3) # Take baseline signal + E = np.zeros_like(S) + + # Calcualte siganl enhancement + for i in range(0, datashape[-1]): + E[:, :, :, i] = S[:, :, :, i] - S0 + E[:, :, :, i] = median_filter(E[:, :, :, i]) # Median filter size (3,3) + + return E, S0, S diff --git a/src/osipi/DRO/filters_and_noise.py b/src/osipi/DRO/_filters_and_noise.py similarity index 100% rename from src/osipi/DRO/filters_and_noise.py rename to src/osipi/DRO/_filters_and_noise.py diff --git a/src/osipi/DRO/_model.py b/src/osipi/DRO/_model.py new file mode 100644 index 0000000..d6781dc --- /dev/null +++ b/src/osipi/DRO/_model.py @@ -0,0 +1,103 @@ +import multiprocessing as mp + +import numpy as np +from scipy.optimize import curve_fit + +import osipi + + +def fit_single_voxel_extended_tofts(ct, ca, time): + def fit_func_ET(t, kt, ve, vp): + return osipi.extended_tofts(t, ca, kt, ve, vp) + + ini = [0, 0, 0] + popt, pcov = curve_fit(fit_func_ET, time, ct, p0=ini) + return popt + + +def fit_single_voxel_tofts(ct, ca, time): + def fit_func_T(t, kt, ve): + return osipi.tofts(t, ca, kt, ve) + + ini = [0, 0] + popt, pcov = curve_fit(fit_func_T, time, ct, p0=ini) + return popt + + +def process_voxel(j, i, k, c_tiss, ca, t, type="ET"): + ct = c_tiss[j, i, k, :] + if type == "ET": + popt = fit_single_voxel_extended_tofts(ct, ca, t) + elif type == "T": + popt = fit_single_voxel_tofts(ct, ca, t) + return j, i, k, popt + + +def extended_tofts_model(ca, c_tiss, t): + ktrans = np.zeros(c_tiss.shape[:-1]) + ve = np.zeros(c_tiss.shape[:-1]) + vp = np.zeros(c_tiss.shape[:-1]) + + tasks = [ + (j, i, k, c_tiss, ca, t) + for k in range(c_tiss.shape[2]) + for j in range(c_tiss.shape[0]) + for i in range(c_tiss.shape[1]) + ] + + with mp.Pool(processes=mp.cpu_count()) as pool: + results = pool.starmap(process_voxel, tasks) + + for j, i, k, popt in results: + ktrans[j, i, k], ve[j, i, k], vp[j, i, k] = popt + + return ktrans, ve, vp + + +def extended_tofts_model_1vox(ca, c_tiss, t): + """ + Extended Tofts model for DCE-MRI DRO + ca -- arterial input function + c_tiss -- 1D array of tissue concentration data (time) + dt -- time interval between samples + """ + + ct = c_tiss[:] + popt = fit_single_voxel_extended_tofts(ct, ca, t) + + return popt + + +def forward_extended_tofts(K1, Ve, Vp, Ca, time): + x, y, z = K1.shape + t = Ca.shape[0] + c_tiss = np.zeros((y, x, z, t)) + + for k in range(0, K1.shape[2]): + for j in range(0, K1.shape[0]): + for i in range(0, K1.shape[1]): + c_tiss[i, j, k, :] = osipi.extended_tofts( + time, Ca, K1[i, j, k], Ve[i, j, k], Vp[i, j, k] + ) + + return c_tiss + + +def tofts_model(ca, c_tiss, t): + ktrans = np.zeros(c_tiss.shape[:-1]) + ve = np.zeros(c_tiss.shape[:-1]) + + tasks = [ + (j, i, k, c_tiss, ca, t, "T") + for k in range(c_tiss.shape[2]) + for j in range(c_tiss.shape[0]) + for i in range(c_tiss.shape[1]) + ] + + with mp.Pool(processes=mp.cpu_count()) as pool: + results = pool.starmap(process_voxel, tasks) + + for j, i, k, popt in results: + ktrans[j, i, k], ve[j, i, k] = popt + + return ktrans, ve diff --git a/src/osipi/DRO/roi_selection.py b/src/osipi/DRO/_roi_selection.py similarity index 92% rename from src/osipi/DRO/roi_selection.py rename to src/osipi/DRO/_roi_selection.py index 2709f7c..32018fd 100644 --- a/src/osipi/DRO/roi_selection.py +++ b/src/osipi/DRO/_roi_selection.py @@ -52,9 +52,9 @@ def onselect(verts): return mask4d, roivox, lasso -def ICfromROI(E, mask, roivox, numaxis): - Eroi = ( +def ic_from_roi(E, mask, roi_vox, numaxis): + E_roi = ( np.sum(mask * E, axis=tuple(range(0, numaxis))) - ) / roivox # calculates average roi signal enhancement + ) / roi_vox # calculates average roi signal enhancement - return Eroi + return E_roi diff --git a/src/osipi/DRO/main.py b/src/osipi/DRO/main.py index 7cad3ba..3d84ee0 100644 --- a/src/osipi/DRO/main.py +++ b/src/osipi/DRO/main.py @@ -1,23 +1,14 @@ import numpy as np -from DICOM_processing import ( - SignalEnhancementExtract, - read_dicom_slices_as_4d_signal, -) -from Display import animate_mri -from matplotlib import pyplot as plt -from roi_selection import ICfromROI - -import osipi -from osipi.DRO.Conc2DROSignal import Conc2Sig_Linear, STDmap, addnoise, calcR1_R2, createR10_withref -from osipi.DRO.filters_and_noise import median_filter -from osipi.DRO.Model import ( - extended_tofts_model, - extended_tofts_model_1vox, - forward_extended_tofts, -) +import pydicom + +import osipi.DRO as dro if __name__ == "__main__": - signal, slices, dicom_ref = read_dicom_slices_as_4d_signal( + uidprefix = "1.3.6.1.4.1.9328.50.16." + study_instance_uid = pydicom.uid.generate_uid(prefix=uidprefix) + dro_IDnum = "9215224289" + + signal, slices, dicom_ref = dro.read_dicom_slices_as_4d_signal( "data/subject2/12.000000-perfusion-17557" ) # shape of the mri data @@ -25,7 +16,7 @@ # extract the signal enhancement # E is the signal enhancement, S0 is the baseline signal, signal is the original signal - E, S0, signal = SignalEnhancementExtract(signal, data_shape, 5) + E, S0, signal = dro.signal_enhancement_extract(signal, data_shape, 5) dt = 4.8 / 60 # mins from the RIDER website DCE description t = np.linspace(0, dt * signal.shape[-1], signal.shape[-1]) # time series points @@ -42,23 +33,22 @@ z = 5 - Ea = ICfromROI(E, aifmask, roivoxa, aifmask.ndim - 1) - Ev = ICfromROI(E, sagmask, roivoxv, sagmask.ndim - 1) - S0ref = ICfromROI(S0[:, :, z], sagmask[:, :, z, 0], roivoxv, sagmask.ndim - 2) + Ea = dro.ic_from_roi(E, aifmask, roivoxa, aifmask.ndim - 1) + Ev = dro.ic_from_roi(E, sagmask, roivoxv, sagmask.ndim - 1) + S0ref = dro.ic_from_roi(S0[:, :, z], sagmask[:, :, z, 0], roivoxv, sagmask.ndim - 2) # choose a voxel with maximum enhancement to display the fitting process max_index = np.unravel_index(np.argmax(E * aifmask, axis=None), E.shape) - print(max_index) # hematocrit value for partial volume correction hct = 0.45 # fitting using extended tofts model for first 8 slices # choose only first 8 slices due to memory constrains - kt, ve, vp = extended_tofts_model(Ea, E[:, :, :8, :], t) + kt, ve, vp = dro.extended_tofts_model(Ea, E[:, :, :8, :], t) # A partial volume correction was applied using the sagittal sinus signal. - pvc_K1, pvc_Ve, pvc_Vp = extended_tofts_model_1vox(Ea, Ev, t) + pvc_K1, pvc_Ve, pvc_Vp = dro.extended_tofts_model_1vox(Ea, Ev, t) pvc = abs( (1 - hct) / pvc_Vp ) # sagittal sinus Vp should be 0.55, so calc correction factor if not @@ -69,9 +59,9 @@ cor_Vp = vp * pvc # Apply Median Filter to parameters all with footprint (3,3) - mf_Kt = median_filter(cor_Kt) - mf_Ve = median_filter(cor_Ve) - mf_Vp = median_filter(cor_Vp) + mf_Kt = dro.median_filter(cor_Kt) + mf_Ve = dro.median_filter(cor_Ve) + mf_Vp = dro.median_filter(cor_Vp) # Apply thresholds to remove negative values # limit volume fraction to max value of 1 @@ -91,25 +81,13 @@ # ) / aif_cor # calculate Cp using signal enhancement aif and concentration conversion factor # calculate the concentration in the tissue using the fitted parameters - c_tissue = forward_extended_tofts(mf_Kt, mf_Ve, mf_Vp, (Ea / aif_cor), t) + c_tissue = dro.forward_extended_tofts(mf_Kt, mf_Ve, mf_Vp, (Ea / aif_cor), t) # Choosing a specific voxel to plot Concentration curves and the fitting process kt_vox1 = mf_Kt[96, 118, 5] ve_vox1 = mf_Ve[96, 118, 5] vp_vox1 = mf_Vp[96, 118, 5] - # calculate the fitted concentration to compare with the real one - c_tissue_tofts = osipi.extended_tofts(t, Ea, kt_vox1, ve_vox1, vp_vox1) - - plt.figure(figsize=(10, 6)) - plt.plot(t, c_tissue_tofts, label="ct_Tofts") - plt.plot(t, E[96, 118, 5, :], label="Ct_raw") - plt.xlabel("Time") - plt.ylabel("Concentration") - plt.title("ct_raw vs model") - plt.legend() - plt.show() - # calculate the relaxation rates R1 and R2* for the tissue r1 = 3.9 # longitudinal relaxivity Gd-DTPA (Hz/mM) source: (Pintaske,2006) @@ -126,96 +104,81 @@ R20st_value = ( 17.24 # precontrast T1 relaxation rate (Hz) brain avg using T2* from (Siemonsen, 2008) ) - R10 = createR10_withref( + R10 = dro.createR10_withref( S0ref, S0[:, :, :8], Tr, fa, T1b, data_shape ) # precontrast R1 map (normalised to sagittal sinus) - R1, R2st = calcR1_R2(R10, R20st_value, r1, r2st, c_tissue) # returns R10 and R2st maps + R1, R2st = dro.calcR1_R2(R10, R20st_value, r1, r2st, c_tissue) # returns R10 and R2st maps # calculate the signal using the concentration and relaxation rates # spoiled gradient echo sequence - dro_S, M = Conc2Sig_Linear(Tr, Te, fa, R1, R2st, signal[:, :, :8, :], 1, 0) + dro_s, M = dro.Conc2Sig_Linear(Tr, Te, fa, R1, R2st, signal[:, :, :8, :], 1, 0) - stdS = STDmap(signal[:, :, :8, :], t0=5) # caluclate Standard deviation for original data + stdS = dro.STDmap(signal[:, :, :8, :], t0=5) # caluclate Standard deviation for original data # add noise to the signal - dro_Snoise = addnoise(1, stdS, dro_S, Nt=data_shape[-1]) - animate_mri(dro_Snoise, mode="time", slice_index=7, time_index=5) - - # - # trans_K1 = mf_K1.copy() - # trans_k2 = mf_k2.copy() - # trans_Vp = mf_Vp.copy() - # - # vmax_K1, vmax_k2, vmax_Vp = 1, 1, 0.2 - # vmin_K1, vmin_k2, vmin_Vp = 0.2, 0.2, 0.01 - # lb_K1, lb_k2, lb_Vp = 0.54, 0.52, 0.49 - # ub_K1, ub_k2, ub_Vp = 1.52, 1.5, 1.43 - # lim_K1, lim_k2, lim_Vp = vmax_K1 + 0.5, vmax_k2 + 0.1, vmax_Vp + 0.5 - # ub_lim = 1.01 - # - # trans_K1[trans_K1 <= vmin_K1] = trans_K1[trans_K1 <= vmin_K1] * lb_K1 - # trans_K1[trans_K1 >= lim_K1] = trans_K1[trans_K1 >= lim_K1] * ub_lim - # trans_K1[(trans_K1 >= vmax_K1) & (trans_K1 < lim_K1)] = ( - # trans_K1[(trans_K1 >= vmax_K1) & (trans_K1 < lim_K1)] * ub_K1 - # ) - # trans_K1[(trans_K1 > vmin_K1) & (trans_K1 < vmax_K1)] = trans_K1[ - # (trans_K1 > vmin_K1) & (trans_K1 < vmax_K1) - # ] * ( - # lb_K1 - # + ( - # ( - # (trans_K1[(trans_K1 > vmin_K1) & (trans_K1 < vmax_K1)] - vmin_K1) - # / (vmax_K1 - vmin_K1) - # ) - # * (ub_K1 - lb_K1) - # ) - # ) - # - # trans_k2[trans_k2 <= vmin_k2] = trans_k2[trans_k2 <= vmin_k2] * lb_k2 - # trans_k2[trans_k2 >= lim_k2] = trans_k2[trans_k2 >= lim_k2] * ub_lim - # trans_k2[(trans_k2 >= vmax_k2) & (trans_k2 < lim_k2)] = ( - # trans_k2[(trans_k2 >= vmax_k2) & (trans_k2 < lim_k2)] * ub_k2 - # ) - # trans_k2[(trans_k2 > vmin_k2) & (trans_k2 < vmax_k2)] = trans_k2[ - # (trans_k2 > vmin_k2) & (trans_k2 < vmax_k2) - # ] * ( - # lb_k2 - # + ( - # ( - # (trans_k2[(trans_k2 > vmin_k2) & (trans_k2 < vmax_k2)] - vmin_k2) - # / (vmax_k2 - vmin_k2) - # ) - # * (ub_k2 - lb_k2) - # ) - # ) - # - # trans_Vp[trans_Vp <= vmin_Vp] = trans_Vp[trans_Vp <= vmin_Vp] * lb_Vp - # trans_Vp[(trans_Vp >= lim_Vp)] = trans_Vp[trans_Vp >= lim_Vp] * ub_lim - # trans_Vp[(trans_Vp >= vmax_Vp) & (trans_Vp < lim_Vp)] = ( - # trans_Vp[(trans_Vp >= vmax_Vp) & (trans_Vp < lim_Vp)] * ub_Vp - # ) - # trans_Vp[(trans_Vp > vmin_Vp) & (trans_Vp < vmax_Vp)] = trans_Vp[ - # (trans_Vp > vmin_Vp) & (trans_Vp < vmax_Vp) - # ] * ( - # lb_Vp - # + ( - # ( - # (trans_Vp[(trans_Vp > vmin_Vp) & (trans_Vp < vmax_Vp)] - vmin_Vp) - # / (vmax_Vp - vmin_Vp) - # ) - # * (ub_Vp - lb_Vp) - # ) - # ) - # - # trans_Vp[trans_Vp > 1] = 1 - # - # Ctiss_tr = forward_extended_tofts(trans_K1, trans_k2, trans_Vp, Ea, t) - # - # R1_tr, R2st_tr = calcR1_R2(R10, R20st_value, r1, r2st, Ctiss_tr) - # dro_S_tr, M_tr = Conc2Sig_Linear(Tr, Te, fa, R1_tr, R2st_tr, signal[:, :, :8, :], 1, M) - # - # dro_Snoise_tr = addnoise(1, stdS, dro_S_tr, Nt=data_shape[-1]) - # - # animate_mri(signal, mode="time", slice_index=7, time_index=5) - # animate_mri(dro_Snoise_tr, mode="time", slice_index=7, time_index=5) - # animate_mri(dro_Snoise, mode="time", slice_index=7, time_index=5) + dro_s_noise = dro.addnoise(1, stdS, dro_s, Nt=data_shape[-1]) + + trans_Kt = mf_Kt.copy() + trans_Ve = mf_Ve.copy() + trans_Vp = mf_Vp.copy() + + vmax_Kt, vmax_ke, vmax_Vp = 1, 1, 0.2 + vmin_Kt, vmin_ke, vmin_Vp = 0.2, 0.2, 0.01 + lb_Kt, lb_ke, lb_Vp = 0.54, 0.52, 0.49 + ub_Kt, ub_ke, ub_Vp = 1.52, 1.5, 1.43 + lim_Kt, lim_ke, lim_Vp = vmax_Kt + 0.5, vmax_ke + 0.1, vmax_Vp + 0.5 + ub_lim = 1.01 + + trans_Kt[trans_Kt <= vmin_Kt] = trans_Kt[trans_Kt <= vmin_Kt] * lb_Kt + trans_Kt[trans_Kt >= lim_Kt] = trans_Kt[trans_Kt >= lim_Kt] * ub_lim + trans_Kt[(trans_Kt >= vmax_Kt) & (trans_Kt < lim_Kt)] = ( + trans_Kt[(trans_Kt >= vmax_Kt) & (trans_Kt < lim_Kt)] * ub_Kt + ) + trans_Kt[(trans_Kt > vmin_Kt) & (trans_Kt < vmax_Kt)] = trans_Kt[ + (trans_Kt > vmin_Kt) & (trans_Kt < vmax_Kt) + ] * ( + lb_Kt + + ( + ( + (trans_Kt[(trans_Kt > vmin_Kt) & (trans_Kt < vmax_Kt)] - vmin_Kt) + / (vmax_Kt - vmin_Kt) + ) + * (ub_Kt - lb_Kt) + ) + ) + + trans_Vp[trans_Vp > 1] = 1 + + trans_Vp[trans_Vp <= vmin_Vp] = trans_Vp[trans_Vp <= vmin_Vp] * lb_Vp + trans_Vp[(trans_Vp >= lim_Vp)] = trans_Vp[trans_Vp >= lim_Vp] * ub_lim + trans_Vp[(trans_Vp >= vmax_Vp) & (trans_Vp < lim_Vp)] = ( + trans_Vp[(trans_Vp >= vmax_Vp) & (trans_Vp < lim_Vp)] * ub_Vp + ) + trans_Vp[(trans_Vp > vmin_Vp) & (trans_Vp < vmax_Vp)] = trans_Vp[ + (trans_Vp > vmin_Vp) & (trans_Vp < vmax_Vp) + ] * ( + lb_Vp + + ( + ( + (trans_Vp[(trans_Vp > vmin_Vp) & (trans_Vp < vmax_Vp)] - vmin_Vp) + / (vmax_Vp - vmin_Vp) + ) + * (ub_Vp - lb_Vp) + ) + ) + + c_tissue_tr = dro.forward_extended_tofts(trans_Kt, trans_Ve, trans_Vp, (Ea / aif_cor), t) + + R1_tr, R2st_tr = dro.calcR1_R2(R10, R20st_value, r1, r2st, c_tissue_tr) + dro_s_tr, M_tr = dro.Conc2Sig_Linear(Tr, Te, fa, R1_tr, R2st_tr, signal[:, :, :8, :], 1, M) + + dro_s_noise_tr = dro.addnoise(1, stdS, dro_s_tr, Nt=data_shape[-1]) + + dro.animate_mri(signal, mode="time", slice_index=7, time_index=5) + dro.animate_mri(dro_s_noise_tr, mode="time", slice_index=7, time_index=5) + dro.animate_mri(dro_s_noise, mode="time", slice_index=7, time_index=5) + + save = True + + if save: + dro.save_dicoms("output", slices, dro_s_noise, dro_IDnum, study_instance_uid)