From 1841014fb4dcb4030efa41820761746f206ddc2e Mon Sep 17 00:00:00 2001 From: Oscar Esteban Date: Sun, 12 Mar 2023 19:16:27 +0100 Subject: [PATCH] maint: replace ported sources with shim imports and deprecation warnings Resolves: #787. --- niworkflows/interfaces/plotting.py | 234 +---- niworkflows/reports/core.py | 587 +----------- niworkflows/reports/default.yml | 177 ---- niworkflows/reports/report.tpl | 164 ---- niworkflows/reports/tests/__init__.py | 0 niworkflows/reports/tests/test_core.py | 285 ------ niworkflows/viz/notebook.py | 63 +- niworkflows/viz/plots.py | 1129 +----------------------- niworkflows/viz/utils.py | 736 +-------------- setup.cfg | 4 + 10 files changed, 95 insertions(+), 3284 deletions(-) delete mode 100644 niworkflows/reports/default.yml delete mode 100644 niworkflows/reports/report.tpl delete mode 100644 niworkflows/reports/tests/__init__.py delete mode 100644 niworkflows/reports/tests/test_core.py diff --git a/niworkflows/interfaces/plotting.py b/niworkflows/interfaces/plotting.py index c37aa9b1d3b..874ced26cf5 100644 --- a/niworkflows/interfaces/plotting.py +++ b/niworkflows/interfaces/plotting.py @@ -21,229 +21,17 @@ # https://www.nipreps.org/community/licensing/ # """Visualization tools.""" -import numpy as np -import nibabel as nb - -from nipype.utils.filemanip import fname_presuffix -from nipype.interfaces.base import ( - File, - BaseInterfaceInputSpec, - TraitedSpec, - SimpleInterface, - traits, - isdefined, -) -from niworkflows.utils.timeseries import _cifti_timeseries, _nifti_timeseries -from niworkflows.viz.plots import ( - fMRIPlot, - compcor_variance_plot, - confounds_correlation_plot, +import warnings +from nireports.interfaces import ( + CompCorVariancePlot, + ConfoundsCorrelationPlot, + FMRISummary, ) +__all__ = ( + "CompCorVariancePlot", + "ConfoundsCorrelationPlot", + "FMRISummary", +) -class _FMRISummaryInputSpec(BaseInterfaceInputSpec): - in_func = File(exists=True, mandatory=True, desc="") - in_spikes_bg = File(exists=True, desc="") - fd = File(exists=True, desc="") - dvars = File(exists=True, desc="") - outliers = File(exists=True, desc="") - in_segm = File(exists=True, desc="") - tr = traits.Either(None, traits.Float, usedefault=True, desc="the TR") - fd_thres = traits.Float(0.2, usedefault=True, desc="") - drop_trs = traits.Int(0, usedefault=True, desc="dummy scans") - - -class _FMRISummaryOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="written file path") - - -class FMRISummary(SimpleInterface): - """Prepare an fMRI summary plot for the report.""" - - input_spec = _FMRISummaryInputSpec - output_spec = _FMRISummaryOutputSpec - - def _run_interface(self, runtime): - import pandas as pd - - self._results["out_file"] = fname_presuffix( - self.inputs.in_func, - suffix="_fmriplot.svg", - use_ext=False, - newpath=runtime.cwd, - ) - - dataframe = pd.DataFrame({ - "outliers": np.loadtxt(self.inputs.outliers, usecols=[0]).tolist(), - # Pick non-standardize dvars (col 1) - # First timepoint is NaN (difference) - "DVARS": [np.nan] - + np.loadtxt(self.inputs.dvars, skiprows=1, usecols=[1]).tolist(), - # First timepoint is zero (reference volume) - "FD": [0.0] - + np.loadtxt(self.inputs.fd, skiprows=1, usecols=[0]).tolist(), - }) if ( - isdefined(self.inputs.outliers) - and isdefined(self.inputs.dvars) - and isdefined(self.inputs.fd) - ) else None - - input_data = nb.load(self.inputs.in_func) - seg_file = self.inputs.in_segm if isdefined(self.inputs.in_segm) else None - dataset, segments = ( - _cifti_timeseries(input_data) - if isinstance(input_data, nb.Cifti2Image) else - _nifti_timeseries(input_data, seg_file) - ) - - fig = fMRIPlot( - dataset, - segments=segments, - spikes_files=( - [self.inputs.in_spikes_bg] - if isdefined(self.inputs.in_spikes_bg) else None - ), - tr=( - self.inputs.tr if isdefined(self.inputs.tr) else - _get_tr(input_data) - ), - confounds=dataframe, - units={"outliers": "%", "FD": "mm"}, - vlines={"FD": [self.inputs.fd_thres]}, - nskip=self.inputs.drop_trs, - ).plot() - fig.savefig(self._results["out_file"], bbox_inches="tight") - return runtime - - -class _CompCorVariancePlotInputSpec(BaseInterfaceInputSpec): - metadata_files = traits.List( - File(exists=True), - mandatory=True, - desc="List of files containing component " "metadata", - ) - metadata_sources = traits.List( - traits.Str, - desc="List of names of decompositions " - "(e.g., aCompCor, tCompCor) yielding " - "the arguments in `metadata_files`", - ) - variance_thresholds = traits.Tuple( - traits.Float(0.5), - traits.Float(0.7), - traits.Float(0.9), - usedefault=True, - desc="Levels of explained variance to include in " "plot", - ) - out_file = traits.Either( - None, File, value=None, usedefault=True, desc="Path to save plot" - ) - - -class _CompCorVariancePlotOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="Path to saved plot") - - -class CompCorVariancePlot(SimpleInterface): - """Plot the number of components necessary to explain the specified levels of variance.""" - - input_spec = _CompCorVariancePlotInputSpec - output_spec = _CompCorVariancePlotOutputSpec - - def _run_interface(self, runtime): - if self.inputs.out_file is None: - self._results["out_file"] = fname_presuffix( - self.inputs.metadata_files[0], - suffix="_compcor.svg", - use_ext=False, - newpath=runtime.cwd, - ) - else: - self._results["out_file"] = self.inputs.out_file - compcor_variance_plot( - metadata_files=self.inputs.metadata_files, - metadata_sources=self.inputs.metadata_sources, - output_file=self._results["out_file"], - varexp_thresh=self.inputs.variance_thresholds, - ) - return runtime - - -class _ConfoundsCorrelationPlotInputSpec(BaseInterfaceInputSpec): - confounds_file = File( - exists=True, mandatory=True, desc="File containing confound regressors" - ) - out_file = traits.Either( - None, File, value=None, usedefault=True, desc="Path to save plot" - ) - reference_column = traits.Str( - "global_signal", - usedefault=True, - desc="Column in the confound file for " - "which all correlation magnitudes " - "should be ranked and plotted", - ) - columns = traits.List( - traits.Str, - desc="Filter out all regressors not found in this list." - ) - max_dim = traits.Int( - 20, - usedefault=True, - desc="Maximum number of regressors to include in " - "plot. Regressors with highest magnitude of " - "correlation with `reference_column` will be " - "selected.", - ) - - -class _ConfoundsCorrelationPlotOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="Path to saved plot") - - -class ConfoundsCorrelationPlot(SimpleInterface): - """Plot the correlation among confound regressors.""" - - input_spec = _ConfoundsCorrelationPlotInputSpec - output_spec = _ConfoundsCorrelationPlotOutputSpec - - def _run_interface(self, runtime): - if self.inputs.out_file is None: - self._results["out_file"] = fname_presuffix( - self.inputs.confounds_file, - suffix="_confoundCorrelation.svg", - use_ext=False, - newpath=runtime.cwd, - ) - else: - self._results["out_file"] = self.inputs.out_file - confounds_correlation_plot( - confounds_file=self.inputs.confounds_file, - columns=self.inputs.columns if isdefined(self.inputs.columns) else None, - max_dim=self.inputs.max_dim, - output_file=self._results["out_file"], - reference=self.inputs.reference_column, - ) - return runtime - - -def _get_tr(img): - """ - Attempt to extract repetition time from NIfTI/CIFTI header - - Examples - -------- - >>> _get_tr(nb.load(Path(test_data) / - ... 'sub-ds205s03_task-functionallocalizer_run-01_bold_volreg.nii.gz')) - 2.2 - >>> _get_tr(nb.load(Path(test_data) / - ... 'sub-01_task-mixedgamblestask_run-02_space-fsLR_den-91k_bold.dtseries.nii')) - 2.0 - - """ - - try: - return img.header.matrix.get_index_map(0).series_step - except AttributeError: - return img.header.get_zooms()[-1] - raise RuntimeError("Could not extract TR - unknown data structure type") +warnings.warn("Please use nireports.interfaces", DeprecationWarning) diff --git a/niworkflows/reports/core.py b/niworkflows/reports/core.py index d7a460fc01a..d6bd448d16c 100644 --- a/niworkflows/reports/core.py +++ b/niworkflows/reports/core.py @@ -20,572 +20,21 @@ # # https://www.nipreps.org/community/licensing/ # -""" -Reports builder for BIDS-Apps. - -Generalizes report generation across BIDS-Apps - -""" -from pathlib import Path -import re -from itertools import compress -from collections import defaultdict -from pkg_resources import resource_filename as pkgrf -from bids.layout import BIDSLayout, add_config_paths -import jinja2 -from nipype.utils.filemanip import copyfile - -# Add a new figures spec -try: - add_config_paths(figures=pkgrf("niworkflows", "data/nipreps.json")) -except ValueError as e: - if "Configuration 'figures' already exists" != str(e): - raise - -PLURAL_SUFFIX = defaultdict(str("s").format, [("echo", "es")]) -SVG_SNIPPET = [ - """\ - -Problem loading figure {0}. If the link below works, please try \ -reloading the report in your browser. - -
- Get figure file: {0} -
-""", - """\ - - -
- Get figure file: {0} -
-""", -] - - -class Smallest: - """An object that always evaluates smaller than anything else, for sorting - - >>> Smallest() < 1 - True - >>> Smallest() < "epsilon" - True - >>> sorted([1, None, 2], key=lambda x: x if x is not None else Smallest()) - [None, 1, 2] - """ - def __lt__(self, other): - return not isinstance(other, Smallest) - - def __eq__(self, other): - return isinstance(other, Smallest) - - def __gt__(self, other): - return False - - -class Element(object): - """Just a basic component of a report""" - - def __init__(self, name, title=None): - self.name = name - self.title = title - - -class Reportlet(Element): - """ - A reportlet has title, description and a list of components with either an - HTML fragment or a path to an SVG file, and possibly a caption. This is a - factory class to generate Reportlets reusing the layout from a ``Report`` - object. - - .. testsetup:: - - >>> cwd = os.getcwd() - >>> os.chdir(tmpdir) - - >>> from pkg_resources import resource_filename - >>> from shutil import copytree - >>> from bids.layout import BIDSLayout - >>> test_data_path = resource_filename('niworkflows', 'data/tests/work') - >>> testdir = Path(tmpdir) - >>> data_dir = copytree(test_data_path, str(testdir / 'work')) - >>> out_figs = testdir / 'out' / 'fmriprep' - >>> bl = BIDSLayout(str(testdir / 'work' / 'reportlets'), - ... config='figures', validate=False) - - .. doctest:: - - >>> bl.get(subject='01', desc='reconall') # doctest: +ELLIPSIS - [] - - >>> len(bl.get(subject='01', space='.*', regex_search=True)) - 2 - - >>> r = Reportlet(bl, out_figs, config={ - ... 'title': 'Some Title', 'bids': {'datatype': 'figures', 'desc': 'reconall'}, - ... 'description': 'Some description'}) - >>> r.name - 'datatype-figures_desc-reconall' - - >>> r.components[0][0].startswith('>> r = Reportlet(bl, out_figs, config={ - ... 'title': 'Some Title', 'bids': {'datatype': 'figures', 'desc': 'reconall'}, - ... 'description': 'Some description', 'static': False}) - >>> r.name - 'datatype-figures_desc-reconall' - - >>> r.components[0][0].startswith('>> r = Reportlet(bl, out_figs, config={ - ... 'title': 'Some Title', 'bids': {'datatype': 'figures', 'desc': 'summary'}, - ... 'description': 'Some description'}) - - >>> r.components[0][0].startswith('>> r.components[0][1] is None - True - - >>> r = Reportlet(bl, out_figs, config={ - ... 'title': 'Some Title', - ... 'bids': {'datatype': 'figures', 'space': '.*', 'regex_search': True}, - ... 'caption': 'Some description {space}'}) - >>> sorted(r.components)[0][1] - 'Some description MNI152NLin2009cAsym' - - >>> sorted(r.components)[1][1] - 'Some description MNI152NLin6Asym' - - - >>> r = Reportlet(bl, out_figs, config={ - ... 'title': 'Some Title', - ... 'bids': {'datatype': 'fmap', 'space': '.*', 'regex_search': True}, - ... 'caption': 'Some description {space}'}) - >>> r.is_empty() - True - - .. testcleanup:: - - >>> os.chdir(cwd) - - """ - - def __init__(self, layout, out_dir, config=None): - if not config: - raise RuntimeError("Reportlet must have a config object") - - self.name = config.get( - "name", "_".join("%s-%s" % i for i in sorted(config["bids"].items())) - ) - self.title = config.get("title") - self.subtitle = config.get("subtitle") - self.description = config.get("description") - - # Query the BIDS layout of reportlets - files = layout.get(**config["bids"]) - - self.components = [] - for bidsfile in files: - src = Path(bidsfile.path) - ext = "".join(src.suffixes) - desc_text = config.get("caption") - - contents = None - if ext == ".html": - contents = src.read_text().strip() - elif ext == ".svg": - entities = dict(bidsfile.entities) - if desc_text: - desc_text = desc_text.format(**entities) - - try: - html_anchor = src.relative_to(out_dir) - except ValueError: - html_anchor = src.relative_to(Path(layout.root).parent) - dst = out_dir / html_anchor - dst.parent.mkdir(parents=True, exist_ok=True) - copyfile(src, dst, copy=True, use_hardlink=True) - - contents = SVG_SNIPPET[config.get("static", True)].format(html_anchor) - - # Our current implementations of dynamic reportlets do this themselves, - # however I'll leave the code here since this is potentially something we - # will want to transfer from every figure generator to this location. - # The following code misses setting preserveAspecRatio="xMidYMid meet" - # if not is_static: - # # Remove height and width attributes from initial tag - # svglines = out_file.read_text().splitlines() - # expr = re.compile(r' (height|width)=["\'][0-9]+(\.[0-9]*)?[a-z]*["\']') - # for l, line in enumerate(svglines[:6]): - # if line.strip().startswith('>> cwd = os.getcwd() - >>> os.chdir(tmpdir) - - >>> from pkg_resources import resource_filename - >>> from shutil import copytree - >>> from bids.layout import BIDSLayout - >>> test_data_path = resource_filename('niworkflows', 'data/tests/work') - >>> testdir = Path(tmpdir) - >>> data_dir = copytree(test_data_path, str(testdir / 'work')) - >>> out_figs = testdir / 'out' / 'fmriprep' - - .. doctest:: - - >>> robj = Report(testdir / 'out', 'madeoutuuid', subject_id='01', packagename='fmriprep', - ... reportlets_dir=testdir / 'work' / 'reportlets') - >>> robj.layout.get(subject='01', desc='reconall') # doctest: +ELLIPSIS - [] - - >>> robj.generate_report() - 0 - >>> len((testdir / 'out' / 'fmriprep' / 'sub-01.html').read_text()) - 36713 - - .. testcleanup:: - - >>> os.chdir(cwd) - - """ - - def __init__( - self, - out_dir, - run_uuid, - config=None, - out_filename="report.html", - packagename=None, - reportlets_dir=None, - subject_id=None, - ): - self.root = Path(reportlets_dir or out_dir) - - # Initialize structuring elements - self.sections = [] - self.errors = [] - self.out_dir = Path(out_dir) - self.out_filename = out_filename - self.run_uuid = run_uuid - self.packagename = packagename - self.subject_id = subject_id - if subject_id is not None: - self.subject_id = ( - subject_id[4:] if subject_id.startswith("sub-") else subject_id - ) - self.out_filename = f"sub-{self.subject_id}.html" - - # Default template from niworkflows - self.template_path = Path(pkgrf("niworkflows", "reports/report.tpl")) - self._load_config(Path(config or pkgrf("niworkflows", "reports/default.yml"))) - assert self.template_path.exists() - - def _load_config(self, config): - from yaml import safe_load as load - - settings = load(config.read_text()) - self.packagename = self.packagename or settings.get("package", None) - - if self.packagename is not None: - self.root = self.root / self.packagename - self.out_dir = self.out_dir / self.packagename - - if self.subject_id is not None: - self.root = self.root / "sub-{}".format(self.subject_id) - - if "template_path" in settings: - self.template_path = config.parent / settings["template_path"] - - self.index(settings["sections"]) - - def init_layout(self): - self.layout = BIDSLayout(self.root, config="figures", validate=False) - - def index(self, config): - """ - Traverse the reports config definition and instantiate reportlets. - - This method also places figures in their final location. - """ - # Initialize a BIDS layout - self.init_layout() - for subrep_cfg in config: - # First determine whether we need to split by some ordering - # (ie. sessions / tasks / runs), which are separated by commas. - orderings = [ - s for s in subrep_cfg.get("ordering", "").strip().split(",") if s - ] - entities, list_combos = self._process_orderings(orderings, self.layout) - - if not list_combos: # E.g. this is an anatomical reportlet - reportlets = [ - Reportlet(self.layout, self.out_dir, config=cfg) - for cfg in subrep_cfg["reportlets"] - ] - else: - # Do not use dictionary for queries, as we need to preserve ordering - # of ordering columns. - reportlets = [] - for c in list_combos: - # do not display entities with the value None. - c_filt = list(filter(None, c)) - ent_filt = list(compress(entities, c)) - # Set a common title for this particular combination c - title = "Reports for: %s." % ", ".join( - [ - '%s %s' - % (ent_filt[i], c_filt[i]) - for i in range(len(c_filt)) - ] - ) - for cfg in subrep_cfg["reportlets"]: - cfg["bids"].update({entities[i]: c[i] for i in range(len(c))}) - rlet = Reportlet(self.layout, self.out_dir, config=cfg) - if not rlet.is_empty(): - rlet.title = title - title = None - reportlets.append(rlet) - - # Filter out empty reportlets - reportlets = [r for r in reportlets if not r.is_empty()] - if reportlets: - sub_report = SubReport( - subrep_cfg["name"], - isnested=bool(list_combos), - reportlets=reportlets, - title=subrep_cfg.get("title"), - ) - self.sections.append(sub_report) - - # Populate errors section - error_dir = ( - self.out_dir / "sub-{}".format(self.subject_id) / "log" / self.run_uuid - ) - if error_dir.is_dir(): - from ..utils.misc import read_crashfile - - self.errors = [read_crashfile(str(f)) for f in error_dir.glob("crash*.*")] - - def generate_report(self): - """Once the Report has been indexed, the final HTML can be generated""" - logs_path = self.out_dir / "logs" - - boilerplate = [] - boiler_idx = 0 - - if (logs_path / "CITATION.html").exists(): - text = ( - re.compile("(.*?)", re.DOTALL | re.IGNORECASE) - .findall((logs_path / "CITATION.html").read_text())[0] - .strip() - ) - boilerplate.append( - (boiler_idx, "HTML", f'
{text}
') - ) - boiler_idx += 1 - - if (logs_path / "CITATION.md").exists(): - text = (logs_path / "CITATION.md").read_text() - boilerplate.append((boiler_idx, "Markdown", f"
{text}
\n")) - boiler_idx += 1 - - if (logs_path / "CITATION.tex").exists(): - text = ( - re.compile( - r"\\begin{document}(.*?)\\end{document}", re.DOTALL | re.IGNORECASE - ) - .findall((logs_path / "CITATION.tex").read_text())[0] - .strip() - ) - boilerplate.append( - ( - boiler_idx, - "LaTeX", - f"""
{text}
-

Bibliography

-
{Path(pkgrf(self.packagename, 'data/boilerplate.bib')).read_text()}
-""", - ) - ) - boiler_idx += 1 - - env = jinja2.Environment( - loader=jinja2.FileSystemLoader(searchpath=str(self.template_path.parent)), - trim_blocks=True, - lstrip_blocks=True, - autoescape=False, - ) - report_tpl = env.get_template(self.template_path.name) - report_render = report_tpl.render( - sections=self.sections, errors=self.errors, boilerplate=boilerplate - ) - - # Write out report - self.out_dir.mkdir(parents=True, exist_ok=True) - (self.out_dir / self.out_filename).write_text(report_render, encoding="UTF-8") - return len(self.errors) - - @staticmethod - def _process_orderings(orderings, layout): - """ - Generate relevant combinations of orderings with observed values. - - Arguments - --------- - orderings : :obj:`list` of :obj:`list` of :obj:`str` - Sections prescribing an ordering to select across sessions, acquisitions, runs, etc. - layout : :obj:`bids.layout.BIDSLayout` - The BIDS layout - - Returns - ------- - entities: :obj:`list` of :obj:`str` - The relevant orderings that had unique values - value_combos: :obj:`list` of :obj:`tuple` - Unique value combinations for the entities - - """ - # get a set of all unique entity combinations - all_value_combos = { - tuple(bids_file.get_entities().get(k, None) for k in orderings) - for bids_file in layout.get() - } - # remove the all None member if it exists - none_member = tuple([None for k in orderings]) - if none_member in all_value_combos: - all_value_combos.remove(tuple([None for k in orderings])) - # see what values exist for each entity - unique_values = [ - {value[idx] for value in all_value_combos} for idx in range(len(orderings)) - ] - # if all values are None for an entity, we do not want to keep that entity - keep_idx = [ - False if (len(val_set) == 1 and None in val_set) or not val_set else True - for val_set in unique_values - ] - # the "kept" entities - entities = list(compress(orderings, keep_idx)) - # the "kept" value combinations - value_combos = [ - tuple(compress(value_combo, keep_idx)) for value_combo in all_value_combos - ] - # sort the value combinations alphabetically from the first entity to the last entity - value_combos.sort( - key=lambda entry: tuple( - value if value is not None else Smallest() for value in entry - ) - ) - - return entities, value_combos - - -def run_reports( - out_dir, - subject_label, - run_uuid, - config=None, - reportlets_dir=None, - packagename=None, -): - """ - Run the reports. - - .. testsetup:: - - >>> cwd = os.getcwd() - >>> os.chdir(tmpdir) - - >>> from pkg_resources import resource_filename - >>> from shutil import copytree - >>> test_data_path = resource_filename('niworkflows', 'data/tests/work') - >>> testdir = Path(tmpdir) - >>> data_dir = copytree(test_data_path, str(testdir / 'work')) - >>> (testdir / 'fmriprep').mkdir(parents=True, exist_ok=True) - - .. doctest:: - - >>> run_reports(testdir / 'out', '01', 'madeoutuuid', packagename='fmriprep', - ... reportlets_dir=testdir / 'work' / 'reportlets') - 0 - - .. testcleanup:: - - >>> os.chdir(cwd) - - """ - return Report( - out_dir, - run_uuid, - config=config, - subject_id=subject_label, - packagename=packagename, - reportlets_dir=reportlets_dir, - ).generate_report() - - -def generate_reports( - subject_list, output_dir, run_uuid, config=None, work_dir=None, packagename=None -): - """Execute run_reports on a list of subjects.""" - reportlets_dir = None - if work_dir is not None: - reportlets_dir = Path(work_dir) / "reportlets" - report_errors = [ - run_reports( - output_dir, - subject_label, - run_uuid, - config=config, - packagename=packagename, - reportlets_dir=reportlets_dir, - ) - for subject_label in subject_list - ] - - errno = sum(report_errors) - if errno: - import logging - - logger = logging.getLogger("cli") - error_list = ", ".join( - "%s (%d)" % (subid, err) - for subid, err in zip(subject_list, report_errors) - if err - ) - logger.error( - "Preprocessing did not finish successfully. Errors occurred while processing " - "data from participants: %s. Check the HTML reports for details.", - error_list, - ) - return errno +"""Reports builder for BIDS-Apps (deprecated; please use NiReports).""" +import warnings + +from nireports.assembler.misc import Element +from nireports.assembler.reportlet import Reportlet +from nireports.assembler.reports import SubReport, Report +from nireports.assembler.tools import run_reports, generate_reports + +__all__ = ( + "Element", + "Report", + "Reportlet", + "SubReport", + "generate_reports", + "run_reports", +) + +warnings.warn("Please use nireports.assembler", DeprecationWarning) diff --git a/niworkflows/reports/default.yml b/niworkflows/reports/default.yml deleted file mode 100644 index 86bd3bc1a24..00000000000 --- a/niworkflows/reports/default.yml +++ /dev/null @@ -1,177 +0,0 @@ -package: niworkflows -sections: -- name: Summary - reportlets: - - bids: {datatype: figures, desc: summary, suffix: T1w} -- name: Anatomical - reportlets: - - bids: - datatype: figures - desc: conform - extension: [.html] - suffix: T1w - - bids: {datatype: figures, suffix: dseg} - caption: This panel shows the template T1-weighted image (if several T1w images - were found), with contours delineating the detected brain mask and brain tissue - segmentations. - subtitle: Brain mask and brain tissue segmentation of the T1w - - bids: {datatype: figures, space: .*, suffix: T1w, regex_search: True} - caption: Spatial normalization of the T1w image to the {space} template. - description: Results of nonlinear alignment of the T1w reference one or more template - space(s). Hover on the panels with the mouse pointer to transition between both - spaces. - static: false - subtitle: Spatial normalization of the anatomical T1w reference - - bids: {datatype: figures, desc: reconall, suffix: T1w} - caption: Surfaces (white and pial) reconstructed with FreeSurfer (recon-all) - overlaid on the participant's T1w template. - subtitle: Surface reconstruction - -- name: B0 field mapping - ordering: session,acquisition,reconstruction,direction,run,fmapid - reportlets: - - bids: {datatype: figures, desc: mapped, suffix: fieldmap} - caption: Inhomogeneities of the B0 field introduce (oftentimes severe) spatial distortions - along the phase-encoding direction of the image. Some scanners produce a B0 - mapping of the field, using Spiral Echo Imaging (SEI) or postprocessing a "phase-difference" - acquisition. The plot below shows an anatomical "magnitude" reference and the corresponding - fieldmap. - description: Hover over the panels with the mouse pointer to also visualize the intensity of the - field inhomogeneity in Hertz. - static: false - subtitle: "Preprocessed B0 mapping acquisition" - - bids: {datatype: figures, desc: phasediff, suffix: fieldmap} - caption: Inhomogeneities of the B0 field introduce (oftentimes severe) spatial distortions - along the phase-encoding direction of the image. A Gradient-Recalled Echo (GRE) scheme for the - mapping of the B0 inhomogeneities by subtracting the phase maps obtained at - two subsequent echoes. The plot below shows an anatomical "magnitude" reference and the corresponding - fieldmap. - description: Hover over the panels with the mouse pointer to also visualize the intensity of the - field inhomogeneity in Hertz. - static: false - subtitle: "Preprocessed mapping of phase-difference acquisition" - - bids: {datatype: figures, desc: pepolar, suffix: fieldmap} - caption: Inhomogeneities of the B0 field introduce (oftentimes severe) spatial distortions - along the phase-encoding direction of the image. Utilizing two or more images with different - phase-encoding polarities (PEPolar) or directions, it is possible to estimate the inhomogeneity - of the field. The plot below shows a reference EPI (echo-planar imaging) volume generated - using two or more EPI images with varying phase-encoding blips. - description: Hover on the panels with the mouse pointer to also visualize the intensity of the - inhomogeneity of the field in Hertz. - static: false - subtitle: "Preprocessed estimation with varying Phase-Endocing (PE) blips" - - bids: {datatype: figures, desc: anat, suffix: fieldmap} - caption: Inhomogeneities of the B0 field introduce (oftentimes severe) spatial distortions - along the phase-encoding direction of the image. Utilizing an anatomically-correct acquisition - (for instance, T1w or T2w), it is possible to estimate the inhomogeneity of the field by means of nonlinear - registration. The plot below shows a reference EPI (echo-planar imaging) volume generated - using two or more EPI images with the same PE encoding, after alignment to the anatomical scan. - description: Hover on the panels with the mouse pointer to also visualize the intensity of the - inhomogeneity of the field in Hertz. - static: false - subtitle: "Preprocessed estimation by nonlinear registration to an anatomical scan (“fieldmap-less”)" - -- name: Functional - ordering: session,task,acquisition,ceagent,reconstruction,direction,run,echo - reportlets: - - bids: {datatype: figures, desc: summary, suffix: bold} - - bids: {datatype: figures, desc: validation, suffix: bold} - - bids: {datatype: figures, desc: magnitude, suffix: bold} - caption: Results of affine coregistration between the magnitude image of the fieldmap - and the reference EPI image - static: false - subtitle: Fieldmap to EPI registration - - bids: {datatype: figures, desc: fieldmap, suffix: bold} - caption: Overlaid on the reference EPI image. - static: false - subtitle: Fieldmap - - bids: {datatype: figures, desc: sdc, suffix: bold} - caption: Results of performing susceptibility distortion correction (SDC) on the - EPI - static: false - subtitle: Susceptibility distortion correction - - bids: {datatype: figures, desc: forcedsyn, suffix: bold} - caption: The dataset contained some fieldmap information, but the argument --force-syn - was used. The higher-priority SDC method was used. Here, we show the results - of performing SyN-based SDC on the EPI for comparison. - static: false - subtitle: Experimental fieldmap-less susceptibility distortion correction - - bids: {datatype: figures, desc: flirtnobbr, suffix: bold} - caption: FSL flirt was used to generate transformations from EPI - space to T1 Space - BBR refinement rejected. Note that Nearest Neighbor interpolation - is used in the reportlets in order to highlight potential spin-history and other - artifacts, whereas final images are resampled using Lanczos interpolation. - static: false - subtitle: Alignment of functional and anatomical MRI data (volume based) - - bids: {datatype: figures, desc: coreg, suffix: bold} - caption: mri_coreg (FreeSurfer) was used to generate transformations - from EPI space to T1 Space - bbregister refinement rejected. Note - that Nearest Neighbor interpolation is used in the reportlets in order to highlight - potential spin-history and other artifacts, whereas final images are resampled - using Lanczos interpolation. - static: false - subtitle: Alignment of functional and anatomical MRI data (volume based) - - bids: {datatype: figures, desc: flirtbbr, suffix: bold} - caption: FSL flirt was used to generate transformations from EPI-space - to T1w-space - The white matter mask calculated with FSL fast (brain - tissue segmentation) was used for BBR. Note that Nearest Neighbor interpolation - is used in the reportlets in order to highlight potential spin-history and other - artifacts, whereas final images are resampled using Lanczos interpolation. - static: false - subtitle: Alignment of functional and anatomical MRI data (surface driven) - - bids: {datatype: figures, desc: bbregister, suffix: bold} - caption: bbregister was used to generate transformations from EPI-space - to T1w-space. Note that Nearest Neighbor interpolation is used in the reportlets - in order to highlight potential spin-history and other artifacts, whereas final - images are resampled using Lanczos interpolation. - static: false - subtitle: Alignment of functional and anatomical MRI data (surface driven) - - bids: {datatype: figures, desc: rois, suffix: bold} - caption: Brain mask calculated on the BOLD signal (red contour), along with the - masks used for a/tCompCor.
The aCompCor mask (magenta contour) is a conservative - CSF and white-matter mask for extracting physiological and movement confounds. -
The fCompCor mask (blue contour) contains the top 5% most variable voxels - within a heavily-eroded brain-mask. - subtitle: Brain mask and (temporal/anatomical) CompCor ROIs - - bids: - datatype: figures - desc: '[at]compcor' - extension: [.html] - suffix: bold - - bids: {datatype: figures, desc: 'compcorvar', suffix: bold} - caption: The cumulative variance explained by the first k components of the - t/aCompCor decomposition, plotted for all values of k. - The number of components that must be included in the model in order to - explain some fraction of variance in the decomposition mask can be used - as a feature selection criterion for confound regression. - subtitle: Variance explained by t/aCompCor components - - bids: {datatype: figures, desc: carpetplot, suffix: bold} - caption: Summary statistics are plotted, which may reveal trends or artifacts - in the BOLD data. Global signals calculated within the whole-brain (GS), within - the white-matter (WM) and within cerebro-spinal fluid (CSF) show the mean BOLD - signal in their corresponding masks. DVARS and FD show the standardized DVARS - and framewise-displacement measures for each time point.
A carpet plot - shows the time series for all voxels within the brain mask. Voxels are grouped - into cortical (blue), and subcortical (orange) gray matter, cerebellum (green) - and white matter and CSF (red), indicated by the color map on the left-hand - side. - subtitle: BOLD Summary - - bids: {datatype: figures, desc: 'confoundcorr', suffix: bold} - caption: | - Left: Heatmap summarizing the correlation structure among confound variables. - (Cosine bases and PCA-derived CompCor components are inherently orthogonal.) - Right: magnitude of the correlation between each confound time series and the - mean global signal. Strong correlations might be indicative of partial volume - effects and can inform decisions about feature orthogonalization prior to - confound regression. - subtitle: Correlations among nuisance regressors - - bids: {datatype: figures, desc: aroma, suffix: bold} - caption: | - Maps created with maximum intensity projection (glass brain) with a - black brain outline. Right hand side of each map: time series (top in seconds), - frequency spectrum (bottom in Hertz). Components classified as signal are plotted - in green; noise components in red. - subtitle: ICA Components classified by AROMA -- name: About - reportlets: - - bids: {datatype: figures, desc: about, suffix: T1w} diff --git a/niworkflows/reports/report.tpl b/niworkflows/reports/report.tpl deleted file mode 100644 index f0160253264..00000000000 --- a/niworkflows/reports/report.tpl +++ /dev/null @@ -1,164 +0,0 @@ - - - - - - - - - - - - - - - - - - -{% for sub_report in sections %} - {% if sub_report.reportlets %} -
-

{{ sub_report.name }}

- {% for run_report in sub_report.reportlets %} -
- {% if run_report.title %}

{{ run_report.title }}

{% endif %} - {% if run_report.subtitle %}

{{ run_report.subtitle }}

{% endif %} - {% if run_report.description %}

{{ run_report.description }}

{% endif %} - {% for elem in run_report.components %} - {% if elem[0] %} - {% if elem[1] %}

{{ elem[1] }}

{% endif %} - {{ elem[0] }} - {% endif %} - {% endfor %} -
- {% endfor %} -
- {% endif %} -{% endfor %} - -
-

Methods

- {% if boilerplate %} -

We kindly ask to report results preprocessed with this tool using the following - boilerplate.

- -
- {% for b in boilerplate %} -
{{ b[2] }}
- {% endfor %} -
- {% else %} -

Failed to generate the boilerplate

- {% endif %} -
- -
-

Errors

- {% for error in errors %} -
- Node Name: {{ error.node }}
-
- File: {{ error.file }}
- Working Directory: {{ error.node_dir }}
- Inputs:
-
    - {% for name, spec in error.inputs %} -
  • {{ name }}: {{ spec }}
  • - {% endfor %} -
-
{{ error.traceback }}
-
-
- {% else %} -

No errors to report!

- {% endfor %} -
- - - - - diff --git a/niworkflows/reports/tests/__init__.py b/niworkflows/reports/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/niworkflows/reports/tests/test_core.py b/niworkflows/reports/tests/test_core.py deleted file mode 100644 index 279b0282a8c..00000000000 --- a/niworkflows/reports/tests/test_core.py +++ /dev/null @@ -1,285 +0,0 @@ -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -# -# Copyright 2021 The NiPreps Developers -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# We support and encourage derived works from this project, please read -# about our expectations at -# -# https://www.nipreps.org/community/licensing/ -# -""" Testing module for niworkflows.reports.core """ - -import os -from pathlib import Path -from pkg_resources import resource_filename as pkgrf -import tempfile -from itertools import product -from yaml import safe_load as load - -import matplotlib.pyplot as plt -from bids.layout.writing import build_path -from bids.layout import BIDSLayout - -import pytest - -from ..core import Report - - -@pytest.fixture() -def bids_sessions(tmpdir_factory): - f, _ = plt.subplots() - svg_dir = tmpdir_factory.mktemp("work") / "fmriprep" - svg_dir.ensure_dir() - - pattern = ( - "sub-{subject}[/ses-{session}]/{datatype}/" - "sub-{subject}[_ses-{session}][_task-{task}][_acq-{acquisition}]" - "[_ce-{ceagent}][_dir-{direction}][_rec-{reconstruction}]" - "[_mod-{modality}][_run-{run}][_echo-{echo}][_space-{space}]" - "[_desc-{desc}]_{suffix}{extension<.svg>}" - ) - subjects = ["01"] - tasks = ["t1", "t2", "t3"] - runs = ["01", "02", None] - ces = ["none", "Gd"] - descs = ["aroma", "bbregister", "carpetplot", "rois"] - # create functional data for both sessions - ses1_combos = product(subjects, ["1"], tasks, [None], runs, descs) - ses2_combos = product(subjects, ["2"], tasks, ces, [None], descs) - # have no runs in the second session (ex: dmriprep test data) - # https://github.com/nipreps/dmriprep/pull/59 - all_combos = list(ses1_combos) + list(ses2_combos) - - for subject, session, task, ce, run, desc in all_combos: - entities = { - "subject": subject, - "session": session, - "task": task, - "ceagent": ce, - "run": run, - "desc": desc, - "extension": ".svg", - "suffix": "bold", - "datatype": "figures", - } - bids_path = build_path(entities, pattern) - file_path = svg_dir / bids_path - file_path.ensure() - f.savefig(str(file_path)) - - # create anatomical data - anat_opts = [ - {"desc": "brain"}, - {"desc": "conform"}, - {"desc": "reconall"}, - {"desc": "rois"}, - {"suffix": "dseg"}, - {"space": "MNI152NLin6Asym"}, - {"space": "MNI152NLin2009cAsym"}, - ] - anat_combos = product(subjects, anat_opts) - for subject, anat_opt in anat_combos: - anat_entities = {"subject": subject, "datatype": "anat", "suffix": "t1w"} - anat_entities.update(**anat_opt) - bids_path = build_path(entities, pattern) - file_path = svg_dir / bids_path - file_path.ensure() - f.savefig(str(file_path)) - - return svg_dir.dirname - - -@pytest.fixture() -def test_report1(): - test_data_path = pkgrf( - "niworkflows", os.path.join("data", "tests", "work", "reportlets") - ) - out_dir = tempfile.mkdtemp() - - return Report( - Path(out_dir), - "fakeuuid", - reportlets_dir=Path(test_data_path), - subject_id="01", - packagename="fmriprep", - ) - - -@pytest.fixture() -def test_report2(bids_sessions): - out_dir = tempfile.mkdtemp() - return Report( - Path(out_dir), - "fakeuuid", - reportlets_dir=Path(bids_sessions), - subject_id="01", - packagename="fmriprep", - ) - - -@pytest.mark.parametrize( - "orderings,expected_entities,expected_value_combos", - [ - ( - ["session", "task", "run"], - ["task", "run"], - [ - ("faketask", None), - ("faketask2", None), - ("faketaskwithruns", 1), - ("faketaskwithruns", 2), - ("mixedgamblestask", 1), - ("mixedgamblestask", 2), - ("mixedgamblestask", 3), - ], - ), - ( - ["run", "task", "session"], - ["run", "task"], - [ - (None, "faketask"), - (None, "faketask2"), - (1, "faketaskwithruns"), - (1, "mixedgamblestask"), - (2, "faketaskwithruns"), - (2, "mixedgamblestask"), - (3, "mixedgamblestask"), - ], - ), - ([""], [], []), - (["session"], [], []), - ([], [], []), - (["madeupentity"], [], []), - ], -) -def test_process_orderings_small( - test_report1, orderings, expected_entities, expected_value_combos -): - report = test_report1 - report.init_layout() - entities, value_combos = report._process_orderings(orderings, report.layout) - - assert entities == expected_entities - assert expected_value_combos == value_combos - - -@pytest.mark.parametrize( - "orderings,expected_entities,first_value_combo,last_value_combo", - [ - ( - ["session", "task", "ceagent", "run"], - ["session", "task", "ceagent", "run"], - ("1", "t1", None, None), - ("2", "t3", "none", None), - ), - ( - ["run", "task", "session"], - ["run", "task", "session"], - (None, "t1", "1"), - (2, "t3", "1"), - ), - ([""], [], None, None), - (["session"], ["session"], ("1",), ("2",)), - ([], [], None, None), - (["madeupentity"], [], None, None), - ], -) -def test_process_orderings_large( - test_report2, orderings, expected_entities, first_value_combo, last_value_combo -): - report = test_report2 - report.init_layout() - entities, value_combos = report._process_orderings(orderings, report.layout) - - if not value_combos: - value_combos = [None] - - assert entities == expected_entities - assert value_combos[0] == first_value_combo - assert value_combos[-1] == last_value_combo - - -@pytest.mark.parametrize( - "ordering", - [ - ("session"), - ("task"), - ("run"), - ("session,task"), - ("session,task,run"), - ("session,task,ceagent,run"), - ("session,task,acquisition,ceagent,reconstruction,direction,run,echo"), - ("session,task,run,madeupentity"), - ], -) -def test_generated_reportlets(bids_sessions, ordering): - # make independent report - out_dir = tempfile.mkdtemp() - report = Report( - Path(out_dir), - "fakeuuid", - reportlets_dir=Path(bids_sessions), - subject_id="01", - packagename="fmriprep", - ) - config = Path(pkgrf("niworkflows", "reports/default.yml")) - settings = load(config.read_text()) - # change settings to only include some missing ordering - settings["sections"][3]["ordering"] = ordering - report.index(settings["sections"]) - # expected number of reportlets - expected_reportlets_num = len(report.layout.get(extension=".svg")) - # bids_session uses these entities - needed_entities = ["session", "task", "ceagent", "run"] - # the last section is the most recently run - reportlets_num = len(report.sections[-1].reportlets) - # get the number of figures in the output directory - out_layout = BIDSLayout(out_dir, config="figures", validate=False) - out_figs = len(out_layout.get()) - # if ordering does not contain all the relevant entities - # then there should be fewer reportlets than expected - if all(ent in ordering for ent in needed_entities): - assert reportlets_num == expected_reportlets_num == out_figs - else: - assert reportlets_num < expected_reportlets_num == out_figs - - -@pytest.mark.parametrize( - "subject_id,out_html", - [ - ("sub-01", "sub-01.html"), - ("sub-sub1", "sub-sub1.html"), - ("01", "sub-01.html"), - ("sub1", "sub-sub1.html"), - ], -) -def test_subject_id(tmp_path, subject_id, out_html): - reports = tmp_path / "reports" - Path( - reports - / "fmriprep" - / (subject_id if subject_id.startswith("sub-") else f"sub-{subject_id}") - ).mkdir(parents=True) - - report = Report( - str(tmp_path), - "myuniqueid", - reportlets_dir=reports, - subject_id=subject_id, - packagename="fmriprep", - ) - assert report.subject_id[:4] != "sub-" - assert report.out_filename == out_html diff --git a/niworkflows/viz/notebook.py b/niworkflows/viz/notebook.py index dc6aed68b12..151369df797 100644 --- a/niworkflows/viz/notebook.py +++ b/niworkflows/viz/notebook.py @@ -21,64 +21,9 @@ # https://www.nipreps.org/community/licensing/ # """Visualization component for Jupyter Notebooks.""" -from pathlib import Path -import numpy as np -import nibabel as nb -from .utils import compose_view, plot_registration, cuts_from_bbox +import warnings +from nireports.reportlets.notebook import display +__all__ = ("display", ) -def display( - fixed_image, - moving_image, - contour=None, - cuts=None, - fixed_label="F", - moving_label="M", -): - """Plot the flickering panels to show a registration process.""" - from IPython.display import SVG, display as _disp - - if isinstance(fixed_image, (str, Path)): - fixed_image = nb.load(str(fixed_image)) - if isinstance(moving_image, (str, Path)): - moving_image = nb.load(str(moving_image)) - - if cuts is None: - n_cuts = 7 - if contour is not None: - if isinstance(contour, (str, Path)): - contour = nb.load(str(contour)) - cuts = cuts_from_bbox(contour, cuts=n_cuts) - else: - hdr = fixed_image.header.copy() - hdr.set_data_dtype("uint8") - mask_nii = nb.Nifti1Image( - np.ones(fixed_image.shape, dtype="uint8"), fixed_image.affine, hdr - ) - cuts = cuts_from_bbox(mask_nii, cuts=n_cuts) - - # Call composer - _disp( - SVG( - compose_view( - plot_registration( - fixed_image, - "fixed-image", - estimate_brightness=True, - cuts=cuts, - label=fixed_label, - contour=contour, - compress=False, - ), - plot_registration( - moving_image, - "moving-image", - estimate_brightness=True, - cuts=cuts, - label=moving_label, - contour=contour, - compress=False, - ), - ) - ) - ) +warnings.warn("Please use nireports.reportlets.notebook", DeprecationWarning) diff --git a/niworkflows/viz/plots.py b/niworkflows/viz/plots.py index 69d9e442758..13185f8a294 100644 --- a/niworkflows/viz/plots.py +++ b/niworkflows/viz/plots.py @@ -21,1107 +21,28 @@ # https://www.nipreps.org/community/licensing/ # """Plotting tools shared across MRIQC and fMRIPrep.""" - -import numpy as np -import nibabel as nb -import pandas as pd - -import matplotlib.pyplot as plt -from matplotlib import gridspec as mgs -import matplotlib.cm as cm -from matplotlib.colors import Normalize -from matplotlib.colorbar import ColorbarBase - -DINA4_LANDSCAPE = (11.69, 8.27) - - -class fMRIPlot: - """Generates the fMRI Summary Plot.""" - - __slots__ = ( - "timeseries", - "segments", - "tr", - "confounds", - "spikes", - "nskip", - "sort_carpet", - "paired_carpet", - ) - - def __init__( - self, - timeseries, - segments, - confounds=None, - conf_file=None, - tr=None, - usecols=None, - units=None, - vlines=None, - spikes_files=None, - nskip=0, - sort_carpet=True, - paired_carpet=False, - ): - self.timeseries = timeseries - self.segments = segments - self.tr = tr - self.nskip = nskip - self.sort_carpet = sort_carpet - self.paired_carpet = paired_carpet - - if units is None: - units = {} - if vlines is None: - vlines = {} - self.confounds = {} - if confounds is None and conf_file: - confounds = pd.read_csv(conf_file, sep=r"[\t\s]+", usecols=usecols, index_col=False) - - if confounds is not None: - for name in confounds.columns: - self.confounds[name] = { - "values": confounds[[name]].values.squeeze().tolist(), - "units": units.get(name), - "cutoff": vlines.get(name), - } - - self.spikes = [] - if spikes_files: - for sp_file in spikes_files: - self.spikes.append((np.loadtxt(sp_file), None, False)) - - def plot(self, figure=None): - """Main plotter""" - import seaborn as sns - - sns.set_style("whitegrid") - sns.set_context("paper", font_scale=0.8) - - if figure is None: - figure = plt.gcf() - - nconfounds = len(self.confounds) - nspikes = len(self.spikes) - nrows = 1 + nconfounds + nspikes - - # Create grid - grid = mgs.GridSpec( - nrows, 1, wspace=0.0, hspace=0.05, height_ratios=[1] * (nrows - 1) + [5] - ) - - grid_id = 0 - for tsz, name, iszs in self.spikes: - spikesplot(tsz, title=name, outer_gs=grid[grid_id], tr=self.tr, zscored=iszs) - grid_id += 1 - - if self.confounds: - from seaborn import color_palette - - palette = color_palette("husl", nconfounds) - - for i, (name, kwargs) in enumerate(self.confounds.items()): - tseries = kwargs.pop("values") - confoundplot(tseries, grid[grid_id], tr=self.tr, color=palette[i], name=name, **kwargs) - grid_id += 1 - - plot_carpet( - self.timeseries, - segments=self.segments, - subplot=grid[-1], - tr=self.tr, - sort_rows=self.sort_carpet, - drop_trs=self.nskip, - cmap="paired" if self.paired_carpet else None, - ) - return figure - - -def plot_carpet( - data, - segments=None, - cmap=None, - tr=None, - detrend=True, - subplot=None, - title=None, - output_file=None, - size=(900, 1200), - sort_rows="ward", - drop_trs=0, - legend=True, -): - """ - Plot an image representation of voxel intensities across time. - - This kind of plot is known as "carpet plot" or "Power plot". - See Jonathan Power Neuroimage 2017 Jul 1; 154:150-158. - - Parameters - ---------- - data : N x T :obj:`numpy.array` - The functional data to be plotted (*N* sampling locations by *T* timepoints). - segments: :obj:`dict`, optional - A mapping between segment labels (e.g., `"Left Cortex"`) and list of indexes - in the data array. - cmap : colormap - Overrides the generation of an automated colormap. - tr : float , optional - Specify the TR, if specified it uses this value. If left as None, - # of frames is plotted instead of time. - detrend : :obj:`bool`, optional - Detrend and standardize the data prior to plotting. - subplot : matplotlib subplot, optional - Subplot to plot figure on. - title : string, optional - The title displayed on the figure. - output_file : string, or None, optional - The name of an image file to export the plot to. Valid extensions - are .png, .pdf, .svg. If output_file is not None, the plot - is saved to a file, and the display is closed. - size : :obj:`tuple` - Maximum number of samples to plot (voxels, timepoints) - sort_rows : :obj:`str` or :obj:`False` or :obj:`None` - Apply a clustering algorithm to reorganize the rows of the carpet. - ``""``, ``False``, and ``None`` skip clustering sorting. - ``"linkage"`` uses linkage hierarchical clustering - :obj:`scipy.cluster.hierarchy.linkage`. - Any other value that Python evaluates to ``True`` will use the - default clustering, which is :obj:`sklearn.cluster.ward_tree`. - - """ - if segments is None: - segments = { - "whole brain (voxels)": list(range(data.shape[0])) - } - - nsegments = len(segments) - if nsegments == 1: - legend = False - - if cmap is None: - colors = cm.get_cmap("tab10").colors - elif cmap == "paired": - colors = list(cm.get_cmap("Paired").colors) - colors[0], colors[1] = colors[1], colors[0] - colors[2], colors[7] = colors[7], colors[2] - - if detrend: - from nilearn.signal import clean - data = clean(data.T, t_r=tr, filter=False).T - - # We want all subplots to have the same dynamic range - vminmax = (np.percentile(data, 2), np.percentile(data, 98)) - - # Decimate number of time-series before clustering - n_dec = int((1.8 * data.shape[0]) // size[0]) - if n_dec > 1: - segments = { - lab: idx[::n_dec] for lab, idx in segments.items() if np.array(idx).shape >= (1,) - } - - # Cluster segments (if argument enabled) - if sort_rows: - from scipy.cluster.hierarchy import linkage, dendrogram - from sklearn.cluster import ward_tree - - for seg_label, seg_idx in segments.items(): - # In debugging cases, we might have ROIs too small to have enough rows to sort - if len(seg_idx) < 2: - continue - roi_data = data[seg_idx] - if isinstance(sort_rows, str) and sort_rows.lower() == "linkage": - linkage_matrix = linkage( - roi_data, method="average", metric="euclidean", optimal_ordering=True - ) - else: - children, _, n_leaves, _, distances = ward_tree(roi_data, return_distance=True) - linkage_matrix = _ward_to_linkage(children, n_leaves, distances) - - dn = dendrogram(linkage_matrix, no_plot=True) - # Override the ordering of the indices in this segment - segments[seg_label] = np.array(seg_idx)[np.array(dn["leaves"])] - - # If subplot is not defined - if subplot is None: - subplot = mgs.GridSpec(1, 1)[0] - - # Length before decimation - n_trs = data.shape[-1] - drop_trs - - # Calculate time decimation factor - t_dec = max(int((1.8 * n_trs) // size[1]), 1) - data = data[:, drop_trs::t_dec] - - # Define nested GridSpec - gs = mgs.GridSpecFromSubplotSpec( - nsegments, - 1, - subplot_spec=subplot, - hspace=0.05, - height_ratios=[len(v) for v in segments.values()] - ) - - for i, (label, indices) in enumerate(segments.items()): - # Carpet plot - ax = plt.subplot(gs[i]) - - ax.imshow( - data[indices, :], - interpolation="nearest", - aspect="auto", - cmap="gray", - vmin=vminmax[0], - vmax=vminmax[1], - ) - - # Toggle the spine objects - ax.spines["top"].set_color("none") - ax.spines["top"].set_visible(False) - ax.spines["right"].set_color("none") - ax.spines["right"].set_visible(False) - - # Make colored left axis - ax.spines["left"].set_linewidth(3) - ax.spines["left"].set_color(colors[i]) - ax.spines["left"].set_capstyle("butt") - ax.spines["left"].set_position(("outward", 2)) - - # Make all subplots have same xticks - xticks = np.linspace(0, data.shape[-1], endpoint=True, num=7) - ax.set_xticks(xticks) - ax.set_yticks([]) - ax.grid(False) - - if i == (nsegments - 1): - xlabel = "time-points (index)" - xticklabels = (xticks * n_trs / data.shape[-1]).astype("uint32") + drop_trs - if tr is not None: - xlabel = "time (mm:ss)" - xticklabels = [ - f"{int(t // 60):02d}:{(t % 60).round(0).astype(int):02d}" - for t in (tr * xticklabels) - ] - - ax.set_xlabel(xlabel) - ax.set_xticklabels(xticklabels) - ax.spines["bottom"].set_position(("outward", 5)) - ax.spines["bottom"].set_color("k") - ax.spines["bottom"].set_linewidth(.8) - else: - ax.set_xticklabels([]) - ax.set_xticks([]) - ax.spines["bottom"].set_color("none") - ax.spines["bottom"].set_visible(False) - - if title and i == 0: - ax.set_title(title) - - if nsegments == 1: - ax.set_ylabel(label) - - if legend: - from matplotlib.patches import Patch - from mpl_toolkits.axes_grid1.inset_locator import inset_axes - - axlegend = inset_axes( - ax, - width="100%", - height=0.01, - loc='lower center', - borderpad=-4.1, - ) - axlegend.grid(False) - axlegend.set_xticks([]) - axlegend.set_yticks([]) - axlegend.patch.set_alpha(0.0) - for loc in ("top", "bottom", "left", "right"): - axlegend.spines[loc].set_color("none") - axlegend.spines[loc].set_visible(False) - - axlegend.legend( - handles=[ - Patch(color=colors[i], label=l) - for i, l in enumerate(segments.keys()) - ], - loc="upper center", - bbox_to_anchor=(0.5, 0), - shadow=False, - fancybox=False, - ncol=min(len(segments.keys()), 5), - frameon=False, - prop={'size': 8} - ) - - if output_file is not None: - figure = plt.gcf() - figure.savefig(output_file, bbox_inches="tight") - plt.close(figure) - figure = None - return output_file - - return gs - - -def spikesplot( - ts_z, - outer_gs=None, - tr=None, - zscored=True, - spike_thresh=6.0, - title="Spike plot", - ax=None, - cmap="viridis", - hide_x=True, - nskip=0, -): - """ - A spikes plot. Thanks to Bob Dogherty (this docstring needs be improved with proper ack) - """ - - if ax is None: - ax = plt.gca() - - if outer_gs is not None: - gs = mgs.GridSpecFromSubplotSpec( - 1, 2, subplot_spec=outer_gs, width_ratios=[1, 100], wspace=0.0 - ) - ax = plt.subplot(gs[1]) - - # Define TR and number of frames - if tr is None: - tr = 1.0 - - # Load timeseries, zscored slice-wise - nslices = ts_z.shape[0] - ntsteps = ts_z.shape[1] - - # Load a colormap - my_cmap = cm.get_cmap(cmap) - norm = Normalize(vmin=0, vmax=float(nslices - 1)) - colors = [my_cmap(norm(sl)) for sl in range(nslices)] - - stem = len(np.unique(ts_z).tolist()) == 2 - # Plot one line per axial slice timeseries - for sl in range(nslices): - if not stem: - ax.plot(ts_z[sl, :], color=colors[sl], lw=0.5) - else: - markerline, stemlines, baseline = ax.stem(ts_z[sl, :]) - plt.setp(markerline, "markerfacecolor", colors[sl]) - plt.setp(baseline, "color", colors[sl], "linewidth", 1) - plt.setp(stemlines, "color", colors[sl], "linewidth", 1) - - # Handle X, Y axes - ax.grid(False) - - # Handle X axis - last = ntsteps - 1 - ax.set_xlim(0, last) - xticks = list(range(0, last)[::20]) + [last] if not hide_x else [] - ax.set_xticks(xticks) - - if not hide_x: - if tr is None: - ax.set_xlabel("time (frame #)") - else: - ax.set_xlabel("time (s)") - ax.set_xticklabels(["%.02f" % t for t in (tr * np.array(xticks)).tolist()]) - - # Handle Y axis - ylabel = "slice-wise noise average on background" - if zscored: - ylabel += " (z-scored)" - zs_max = np.abs(ts_z).max() - ax.set_ylim( - ( - -(np.abs(ts_z[:, nskip:]).max()) * 1.05, - (np.abs(ts_z[:, nskip:]).max()) * 1.05, - ) - ) - - ytick_vals = np.arange(0.0, zs_max, float(np.floor(zs_max / 2.0))) - yticks = list(reversed((-1.0 * ytick_vals[ytick_vals > 0]).tolist())) + ytick_vals.tolist() - - # TODO plot min/max or mark spikes - # yticks.insert(0, ts_z.min()) - # yticks += [ts_z.max()] - for val in ytick_vals: - ax.plot((0, ntsteps - 1), (-val, -val), "k:", alpha=0.2) - ax.plot((0, ntsteps - 1), (val, val), "k:", alpha=0.2) - - # Plot spike threshold - if zs_max < spike_thresh: - ax.plot((0, ntsteps - 1), (-spike_thresh, -spike_thresh), "k:") - ax.plot((0, ntsteps - 1), (spike_thresh, spike_thresh), "k:") - else: - yticks = [ - ts_z[:, nskip:].min(), - np.median(ts_z[:, nskip:]), - ts_z[:, nskip:].max(), - ] - ax.set_ylim(0, max(yticks[-1] * 1.05, (yticks[-1] - yticks[0]) * 2.0 + yticks[-1])) - # ax.set_ylim(ts_z[:, nskip:].min() * 0.95, - # ts_z[:, nskip:].max() * 1.05) - - ax.annotate( - ylabel, - xy=(0.0, 0.7), - xycoords="axes fraction", - xytext=(0, 0), - textcoords="offset points", - va="center", - ha="left", - color="gray", - size=4, - bbox={ - "boxstyle": "round", - "fc": "w", - "ec": "none", - "color": "none", - "lw": 0, - "alpha": 0.8, - }, - ) - ax.set_yticks([]) - ax.set_yticklabels([]) - - # if yticks: - # # ax.set_yticks(yticks) - # # ax.set_yticklabels(['%.02f' % y for y in yticks]) - # # Plot maximum and minimum horizontal lines - # ax.plot((0, ntsteps - 1), (yticks[0], yticks[0]), 'k:') - # ax.plot((0, ntsteps - 1), (yticks[-1], yticks[-1]), 'k:') - - for side in ["top", "right"]: - ax.spines[side].set_color("none") - ax.spines[side].set_visible(False) - - if not hide_x: - ax.spines["bottom"].set_position(("outward", 10)) - ax.xaxis.set_ticks_position("bottom") - else: - ax.spines["bottom"].set_color("none") - ax.spines["bottom"].set_visible(False) - - # ax.spines["left"].set_position(('outward', 30)) - # ax.yaxis.set_ticks_position('left') - ax.spines["left"].set_visible(False) - ax.spines["left"].set_color(None) - - # labels = [label for label in ax.yaxis.get_ticklabels()] - # labels[0].set_weight('bold') - # labels[-1].set_weight('bold') - if title: - ax.set_title(title) - return ax - - -def spikesplot_cb(position, cmap="viridis", fig=None): - # Add colorbar - if fig is None: - fig = plt.gcf() - - cax = fig.add_axes(position) - cb = ColorbarBase( - cax, - cmap=cm.get_cmap(cmap), - spacing="proportional", - orientation="horizontal", - drawedges=False, - ) - cb.set_ticks([0, 0.5, 1.0]) - cb.set_ticklabels(["Inferior", "(axial slice)", "Superior"]) - cb.outline.set_linewidth(0) - cb.ax.xaxis.set_tick_params(width=0) - return cax - - -def confoundplot( - tseries, - gs_ts, - gs_dist=None, - name=None, - units=None, - tr=None, - hide_x=True, - color="b", - nskip=0, - cutoff=None, - ylims=None, -): - import seaborn as sns - - # Define TR and number of frames - notr = False - if tr is None: - notr = True - tr = 1.0 - ntsteps = len(tseries) - tseries = np.array(tseries) - - # Define nested GridSpec - gs = mgs.GridSpecFromSubplotSpec(1, 2, subplot_spec=gs_ts, width_ratios=[1, 100], wspace=0.0) - - ax_ts = plt.subplot(gs[1]) - ax_ts.grid(False) - - # Set 10 frame markers in X axis - interval = max((ntsteps // 10, ntsteps // 5, 1)) - xticks = list(range(0, ntsteps)[::interval]) - ax_ts.set_xticks(xticks) - - if not hide_x: - if notr: - ax_ts.set_xlabel("time (frame #)") - else: - ax_ts.set_xlabel("time (s)") - labels = tr * np.array(xticks) - ax_ts.set_xticklabels(["%.02f" % t for t in labels.tolist()]) - else: - ax_ts.set_xticklabels([]) - - if name is not None: - if units is not None: - name += " [%s]" % units - - ax_ts.annotate( - name, - xy=(0.0, 0.7), - xytext=(0, 0), - xycoords="axes fraction", - textcoords="offset points", - va="center", - ha="left", - color=color, - size=8, - bbox={ - "boxstyle": "round", - "fc": "w", - "ec": "none", - "color": "none", - "lw": 0, - "alpha": 0.8, - }, - ) - - for side in ["top", "right"]: - ax_ts.spines[side].set_color("none") - ax_ts.spines[side].set_visible(False) - - if not hide_x: - ax_ts.spines["bottom"].set_position(("outward", 20)) - ax_ts.xaxis.set_ticks_position("bottom") - else: - ax_ts.spines["bottom"].set_color("none") - ax_ts.spines["bottom"].set_visible(False) - - # ax_ts.spines["left"].set_position(('outward', 30)) - ax_ts.spines["left"].set_color("none") - ax_ts.spines["left"].set_visible(False) - # ax_ts.yaxis.set_ticks_position('left') - - ax_ts.set_yticks([]) - ax_ts.set_yticklabels([]) - - nonnan = tseries[~np.isnan(tseries)] - if nonnan.size > 0: - # Calculate Y limits - valrange = nonnan.max() - nonnan.min() - def_ylims = [nonnan.min() - 0.1 * valrange, nonnan.max() + 0.1 * valrange] - if ylims is not None: - if ylims[0] is not None: - def_ylims[0] = min([def_ylims[0], ylims[0]]) - if ylims[1] is not None: - def_ylims[1] = max([def_ylims[1], ylims[1]]) - - # Add space for plot title and mean/SD annotation - def_ylims[0] -= 0.1 * (def_ylims[1] - def_ylims[0]) - - ax_ts.set_ylim(def_ylims) - - # Annotate stats - maxv = nonnan.max() - mean = nonnan.mean() - stdv = nonnan.std() - p95 = np.percentile(nonnan, 95.0) - else: - maxv = 0 - mean = 0 - stdv = 0 - p95 = 0 - - stats_label = ( - r"max: {max:.3f}{units} $\bullet$ mean: {mean:.3f}{units} " - r"$\bullet$ $\sigma$: {sigma:.3f}" - ).format(max=maxv, mean=mean, units=units or "", sigma=stdv) - ax_ts.annotate( - stats_label, - xy=(0.98, 0.7), - xycoords="axes fraction", - xytext=(0, 0), - textcoords="offset points", - va="center", - ha="right", - color=color, - size=4, - bbox={ - "boxstyle": "round", - "fc": "w", - "ec": "none", - "color": "none", - "lw": 0, - "alpha": 0.8, - }, - ) - - # Annotate percentile 95 - ax_ts.plot((0, ntsteps - 1), [p95] * 2, linewidth=0.1, color="lightgray") - ax_ts.annotate( - "%.2f" % p95, - xy=(0, p95), - xytext=(-1, 0), - textcoords="offset points", - va="center", - ha="right", - color="lightgray", - size=3, - ) - - if cutoff is None: - cutoff = [] - - for i, thr in enumerate(cutoff): - ax_ts.plot((0, ntsteps - 1), [thr] * 2, linewidth=0.2, color="dimgray") - - ax_ts.annotate( - "%.2f" % thr, - xy=(0, thr), - xytext=(-1, 0), - textcoords="offset points", - va="center", - ha="right", - color="dimgray", - size=3, - ) - - ax_ts.plot(tseries, color=color, linewidth=0.8) - ax_ts.set_xlim((0, ntsteps - 1)) - - if gs_dist is not None: - ax_dist = plt.subplot(gs_dist) - sns.displot(tseries, vertical=True, ax=ax_dist) - ax_dist.set_xlabel("Timesteps") - ax_dist.set_ylim(ax_ts.get_ylim()) - ax_dist.set_yticklabels([]) - - return [ax_ts, ax_dist], gs - return ax_ts, gs - - -def compcor_variance_plot( - metadata_files, - metadata_sources=None, - output_file=None, - varexp_thresh=(0.5, 0.7, 0.9), - fig=None, -): - """ - Parameters - ---------- - metadata_files: list - List of paths to files containing component metadata. If more than one - decomposition has been performed (e.g., anatomical and temporal - CompCor decompositions), then all metadata files can be provided in - the list. However, each metadata file should have a corresponding - entry in `metadata_sources`. - metadata_sources: list or None - List of source names (e.g., ['aCompCor']) for decompositions. This - list should be of the same length as `metadata_files`. - output_file: str or None - Path where the output figure should be saved. If this is not defined, - then the plotting axes will be returned instead of the saved figure - path. - varexp_thresh: tuple - Set of variance thresholds to include in the plot (default 0.5, 0.7, - 0.9). - fig: figure or None - Existing figure on which to plot. - - Returns - ------- - ax: axes - Plotting axes. Returned only if the `output_file` parameter is None. - output_file: str - The file where the figure is saved. - """ - metadata = {} - if metadata_sources is None: - if len(metadata_files) == 1: - metadata_sources = ["CompCor"] - else: - metadata_sources = ["Decomposition {:d}".format(i) for i in range(len(metadata_files))] - for file, source in zip(metadata_files, metadata_sources): - metadata[source] = pd.read_csv(str(file), sep=r"\s+") - metadata[source]["source"] = source - metadata = pd.concat(list(metadata.values())) - bbox_txt = { - "boxstyle": "round", - "fc": "white", - "ec": "none", - "color": "none", - "linewidth": 0, - "alpha": 0.8, - } - - decompositions = [] - data_sources = list(metadata.groupby(["source", "mask"]).groups.keys()) - for source, mask in data_sources: - if not np.isnan( - metadata.loc[(metadata["source"] == source) & (metadata["mask"] == mask)][ - "singular_value" - ].values[0] - ): - decompositions.append((source, mask)) - - if fig is not None: - ax = [fig.add_subplot(1, len(decompositions), i + 1) for i in range(len(decompositions))] - elif len(decompositions) > 1: - fig, ax = plt.subplots(1, len(decompositions), figsize=(5 * len(decompositions), 5)) - else: - ax = [plt.axes()] - - for m, (source, mask) in enumerate(decompositions): - components = metadata[(metadata["mask"] == mask) & (metadata["source"] == source)] - if len([m for s, m in decompositions if s == source]) > 1: - title_mask = " ({} mask)".format(mask) - else: - title_mask = "" - fig_title = "{}{}".format(source, title_mask) - - ax[m].plot( - np.arange(components.shape[0] + 1), - [0] + list(100 * components["cumulative_variance_explained"]), - color="purple", - linewidth=2.5, - ) - ax[m].grid(False) - ax[m].set_xlabel("number of components in model") - ax[m].set_ylabel("cumulative variance explained (%)") - ax[m].set_title(fig_title) - - varexp = {} - - for i, thr in enumerate(varexp_thresh): - varexp[thr] = ( - np.atleast_1d(np.searchsorted(components["cumulative_variance_explained"], thr)) - + 1 - ) - ax[m].axhline(y=100 * thr, color="lightgrey", linewidth=0.25) - ax[m].axvline(x=varexp[thr], color="C{}".format(i), linewidth=2, linestyle=":") - ax[m].text( - 0, - 100 * thr, - "{:.0f}".format(100 * thr), - fontsize="x-small", - bbox=bbox_txt, - ) - ax[m].text( - varexp[thr][0], - 25, - "{} components explain\n{:.0f}% of variance".format(varexp[thr][0], 100 * thr), - rotation=90, - horizontalalignment="center", - fontsize="xx-small", - bbox=bbox_txt, - ) - - ax[m].set_yticks([]) - ax[m].set_yticklabels([]) - for tick in ax[m].xaxis.get_major_ticks(): - tick.label.set_fontsize("x-small") - tick.label.set_rotation("vertical") - for side in ["top", "right", "left"]: - ax[m].spines[side].set_color("none") - ax[m].spines[side].set_visible(False) - - if output_file is not None: - figure = plt.gcf() - figure.savefig(output_file, bbox_inches="tight") - plt.close(figure) - figure = None - return output_file - return ax - - -def confounds_correlation_plot( - confounds_file, - columns=None, - figure=None, - max_dim=20, - output_file=None, - reference="global_signal", -): - """ - Generate a bar plot with the correlation of confounds. - - Parameters - ---------- - confounds_file: :obj:`str` - File containing all confound regressors to be included in the - correlation plot. - figure: figure or None - Existing figure on which to plot. - columns: :obj:`list` or :obj:`None`. - Select a list of columns from the dataset. - max_dim: :obj:`int` - The maximum number of regressors to be included in the output plot. - Reductions (e.g., CompCor) of high-dimensional data can yield so many - regressors that the correlation structure becomes obfuscated. This - criterion selects the ``max_dim`` regressors that have the largest - correlation magnitude with ``reference`` for inclusion in the plot. - output_file: :obj:`str` or :obj:`None` - Path where the output figure should be saved. If this is not defined, - then the plotting axes will be returned instead of the saved figure - path. - reference: :obj:`str` - ``confounds_correlation_plot`` prepares a bar plot of the correlations - of each confound regressor with a reference column. By default, this - is the global signal (so that collinearities with the global signal - can readily be assessed). - - Returns - ------- - axes and gridspec - Plotting axes and gridspec. Returned only if ``output_file`` is ``None``. - output_file: :obj:`str` - The file where the figure is saved. - """ - import seaborn as sns - - confounds_data = pd.read_table(confounds_file) - - if columns: - columns = dict.fromkeys(columns) # Drop duplicates - columns[reference] = None # Make sure the reference is included - confounds_data = confounds_data[list(columns)] - - confounds_data = confounds_data.loc[ - :, np.logical_not(np.isclose(confounds_data.var(skipna=True), 0)) - ] - corr = confounds_data.corr() - - gscorr = corr.copy() - gscorr["index"] = gscorr.index - gscorr[reference] = np.abs(gscorr[reference]) - gs_descending = gscorr.sort_values(by=reference, ascending=False)["index"] - n_vars = corr.shape[0] - max_dim = min(n_vars, max_dim) - - gs_descending = gs_descending[:max_dim] - features = [p for p in corr.columns if p in gs_descending] - corr = corr.loc[features, features] - np.fill_diagonal(corr.values, 0) - - if figure is None: - plt.figure(figsize=(15, 5)) - gs = mgs.GridSpec(1, 21) - ax0 = plt.subplot(gs[0, :10]) - ax1 = plt.subplot(gs[0, 11:]) - - mask = np.zeros_like(corr, dtype=bool) - mask[np.triu_indices_from(mask)] = True - sns.heatmap(corr, linewidths=0.5, cmap="coolwarm", center=0, square=True, ax=ax0) - ax0.tick_params(axis="both", which="both", width=0) - - for tick in ax0.xaxis.get_major_ticks(): - tick.label.set_fontsize("small") - for tick in ax0.yaxis.get_major_ticks(): - tick.label.set_fontsize("small") - sns.barplot( - data=gscorr, - x="index", - y=reference, - ax=ax1, - order=gs_descending, - palette="Reds_d", - saturation=0.5, - ) - - ax1.set_xlabel("Confound time series") - ax1.set_ylabel("Magnitude of correlation with {}".format(reference)) - ax1.tick_params(axis="x", which="both", width=0) - ax1.tick_params(axis="y", which="both", width=5, length=5) - - for tick in ax1.xaxis.get_major_ticks(): - tick.label.set_fontsize("small") - tick.label.set_rotation("vertical") - for tick in ax1.yaxis.get_major_ticks(): - tick.label.set_fontsize("small") - for side in ["top", "right", "left"]: - ax1.spines[side].set_color("none") - ax1.spines[side].set_visible(False) - - if output_file is not None: - figure = plt.gcf() - figure.savefig(output_file, bbox_inches="tight") - plt.close(figure) - figure = None - return output_file - return [ax0, ax1], gs - - -def cifti_surfaces_plot( - in_cifti, - density="32k", - surface_type="inflated", - clip_range=(0, None), - output_file=None, - **kwargs, -): - """ - Plots a CIFTI-2 dense timeseries onto left/right mesh surfaces. - - Parameters - ---------- - in_cifti : str - CIFTI-2 dense timeseries (.dtseries.nii) - density : str - Surface density - surface_type : str - Inflation level of mesh surfaces. Supported: midthickness, inflated, veryinflated - clip_range : tuple or None - Range to clip `in_cifti` data prior to plotting. - If not None, two values must be provided as lower and upper bounds. - If values are None, no clipping is performed for that bound. - output_file: :obj:`str` or :obj:`None` - Path where the output figure should be saved. If this is not defined, - then the figure will be returned. - kwargs : dict - Keyword arguments for :obj:`nilearn.plotting.plot_surf` - - Outputs - ------- - figure : matplotlib.pyplot.figure - Surface plot figure. Returned only if ``output_file`` is ``None``. - output_file: :obj:`str` - The file where the figure is saved. - """ - from nilearn.plotting import plot_surf - - def get_surface_meshes(density, surface_type): - import templateflow.api as tf - - lh, rh = tf.get("fsLR", density=density, suffix=surface_type, extension=[".surf.gii"]) - return str(lh), str(rh) - - if density != "32k": - raise NotImplementedError("Only 32k density is currently supported.") - - img = nb.cifti2.load(in_cifti) - if img.nifti_header.get_intent()[0] != "ConnDenseSeries": - raise TypeError(f"{in_cifti} is not a dense timeseries CIFTI file") - - geo = img.header.get_index_map(1) - left_cortex, right_cortex = None, None - for bm in geo.brain_models: - if bm.brain_structure == "CIFTI_STRUCTURE_CORTEX_LEFT": - left_cortex = bm - elif bm.brain_structure == "CIFTI_STRUCTURE_CORTEX_RIGHT": - right_cortex = bm - - if left_cortex is None or right_cortex is None: - raise RuntimeError("CIFTI is missing cortex information") - - # calculate an average of the BOLD data, excluding the first 5 volumes - # as potential nonsteady states - data = img.dataobj[5:20].mean(axis=0) - - counts = (left_cortex.index_count, right_cortex.index_count) - if density == "32k" and counts != (29696, 29716): - raise ValueError("Cortex data is not in fsLR space") - - # medial wall needs to be added back in - lh_data = np.full(left_cortex.surface_number_of_vertices, np.nan) - rh_data = np.full(right_cortex.surface_number_of_vertices, np.nan) - lh_data[left_cortex.vertex_indices] = _concat_brain_struct_data([left_cortex], data) - rh_data[right_cortex.vertex_indices] = _concat_brain_struct_data([right_cortex], data) - - if clip_range: - lh_data = np.clip(lh_data, clip_range[0], clip_range[1], out=lh_data) - rh_data = np.clip(rh_data, clip_range[0], clip_range[1], out=rh_data) - mn, mx = clip_range - else: - mn, mx = None, None - - if mn is None: - mn = np.min(data) - if mx is None: - mx = np.max(data) - - cmap = kwargs.pop('cmap', 'YlOrRd_r') - cbar_map = cm.ScalarMappable(norm=Normalize(mn, mx), cmap=cmap) - - # Make background maps that rescale to a medium gray - lh_bg = np.zeros(lh_data.shape, 'int8') - rh_bg = np.zeros(rh_data.shape, 'int8') - lh_bg[:2] = [3, -2] - rh_bg[:2] = [3, -2] - - lh_mesh, rh_mesh = get_surface_meshes(density, surface_type) - lh_kwargs = dict(surf_mesh=lh_mesh, surf_map=lh_data, bg_map=lh_bg) - rh_kwargs = dict(surf_mesh=rh_mesh, surf_map=rh_data, bg_map=rh_bg) - - # Build the figure - figure = plt.figure(figsize=plt.figaspect(0.25), constrained_layout=True) - for i, view in enumerate(('lateral', 'medial')): - for j, hemi in enumerate(('left', 'right')): - title = f'{hemi.title()} - {view.title()}' - ax = figure.add_subplot(1, 4, i * 2 + j + 1, projection='3d', rasterized=True) - hemi_kwargs = (lh_kwargs, rh_kwargs)[j] - plot_surf( - hemi=hemi, - view=view, - title=title, - cmap=cmap, - vmin=mn, - vmax=mx, - axes=ax, - **hemi_kwargs, - **kwargs - ) - # plot_surf sets this to 8, which seems a little far out, but 6 starts clipping - ax.dist = 7 - - figure.colorbar(cbar_map, shrink=0.2, ax=figure.axes, location='bottom') - - if output_file is not None: - figure.savefig(output_file, bbox_inches="tight", dpi=400) - plt.close(figure) - return output_file - - return figure - - -def _concat_brain_struct_data(structs, data): - concat_data = np.array([], dtype=data.dtype) - for struct in structs: - struct_upper_bound = struct.index_offset + struct.index_count - struct_data = data[struct.index_offset:struct_upper_bound] - concat_data = np.concatenate((concat_data, struct_data)) - return concat_data - - -def _ward_to_linkage(children, n_leaves, distances): - """Create linkage matrix from the output of Ward clustering.""" - # create the counts of samples under each node - counts = np.zeros(children.shape[0]) - n_samples = n_leaves - for i, merge in enumerate(children): - current_count = 0 - for child_idx in merge: - current_count += 1 if child_idx < n_samples else counts[child_idx - n_samples] - counts[i] = current_count - - return np.column_stack([children, distances, counts]).astype(float) +import warnings + +from nireports.reportlets.nuisance import ( + confoundplot, + confounds_correlation_plot, + plot_carpet, + spikesplot, + spikesplot_cb, +) +from nireports.reportlets.surface import cifti_surfaces_plot +from nireports.reportlets.xca import compcor_variance_plot +from nireports.reportlets.modality.func import fMRIPlot + +__all__ = ( + "cifti_surfaces_plot", + "compcor_variance_plot", + "confoundplot", + "confounds_correlation_plot", + "fMRIPlot", + "plot_carpet", + "spikesplot", + "spikesplot_cb", +) + +warnings.warn("Please use nireports.reportlets", DeprecationWarning) diff --git a/niworkflows/viz/utils.py b/niworkflows/viz/utils.py index 25d2b9ef61c..adc65697707 100644 --- a/niworkflows/viz/utils.py +++ b/niworkflows/viz/utils.py @@ -21,706 +21,36 @@ # https://www.nipreps.org/community/licensing/ # """Helper tools for visualization purposes.""" -from pathlib import Path -from shutil import which -from tempfile import TemporaryDirectory -import subprocess -import base64 -import re -from uuid import uuid4 -from io import StringIO - -import numpy as np -import nibabel as nb - -from nipype.utils import filemanip -from .. import NIWORKFLOWS_LOG -from ..utils.images import rotation2canonical, rotate_affine - - -SVGNS = "http://www.w3.org/2000/svg" - - -def robust_set_limits(data, plot_params, percentiles=(15, 99.8)): - """Set (vmax, vmin) based on percentiles of the data.""" - plot_params["vmin"] = plot_params.get("vmin", np.percentile(data, percentiles[0])) - plot_params["vmax"] = plot_params.get("vmax", np.percentile(data, percentiles[1])) - return plot_params - - -def svg_compress(image, compress="auto"): - """Generate a blob SVG from a matplotlib figure, may perform compression.""" - # Check availability of svgo and cwebp - has_compress = all((which("svgo"), which("cwebp"))) - if compress is True and not has_compress: - raise RuntimeError( - "Compression is required, but svgo or cwebp are not installed" - ) - else: - compress = (compress is True or compress == "auto") and has_compress - - # Compress the SVG file using SVGO - if compress: - cmd = "svgo -i - -o - -q -p 3 --pretty" - try: - pout = subprocess.run( - cmd, - input=image.encode("utf-8"), - stdout=subprocess.PIPE, - shell=True, - check=True, - close_fds=True, - ).stdout - except OSError as e: - from errno import ENOENT - - if compress is True and e.errno == ENOENT: - raise e - else: - image = pout.decode("utf-8") - - # Convert all of the rasters inside the SVG file with 80% compressed WEBP - if compress: - new_lines = [] - with StringIO(image) as fp: - for line in fp: - if "image/png" in line: - tmp_lines = [line] - while "/>" not in line: - line = fp.readline() - tmp_lines.append(line) - content = "".join(tmp_lines).replace("\n", "").replace(", ", ",") - - left = content.split("base64,")[0] + "base64," - left = left.replace("image/png", "image/webp") - right = content.split("base64,")[1] - png_b64 = right.split('"')[0] - right = '"' + '"'.join(right.split('"')[1:]) - - cmd = "cwebp -quiet -noalpha -q 80 -o - -- -" - pout = subprocess.run( - cmd, - input=base64.b64decode(png_b64), - shell=True, - stdout=subprocess.PIPE, - check=True, - close_fds=True, - ).stdout - webpimg = base64.b64encode(pout).decode("utf-8") - new_lines.append(left + webpimg + right) - else: - new_lines.append(line) - lines = new_lines - else: - lines = image.splitlines() - - svg_start = 0 - for i, line in enumerate(lines): - if " 0.0 - - # First, project the number of masked voxels on each axes - ijk_counts = [ - mask_data.sum(2).sum(1), # project sagittal planes to transverse (i) axis - mask_data.sum(2).sum(0), # project coronal planes to to longitudinal (j) axis - mask_data.sum(1).sum(0), # project axial planes to vertical (k) axis - ] - - # If all voxels are masked in a slice (say that happens at k=10), - # then the value for ijk_counts for the projection to k (ie. ijk_counts[2]) - # at that element of the orthogonal axes (ijk_counts[2][10]) is - # the total number of voxels in that slice (ie. Ni x Nj). - # Here we define some thresholds to consider the plane as "masked" - # The thresholds vary because of the shape of the brain - # I have manually found that for the axial view requiring 30% - # of the slice elements to be masked drops almost empty boxes - # in the mosaic of axial planes (and also addresses #281) - ijk_th = np.ceil([ - (mask_data.shape[1] * mask_data.shape[2]) * 0.2, # sagittal - (mask_data.shape[0] * mask_data.shape[2]) * 0.1, # coronal - (mask_data.shape[0] * mask_data.shape[1]) * 0.3, # axial - ]).astype(int) - - vox_coords = np.zeros((4, cuts), dtype=np.float32) - vox_coords[-1, :] = 1.0 - for ax, (c, th) in enumerate(zip(ijk_counts, ijk_th)): - # Start with full plane if mask is seemingly empty - smin, smax = (0, mask_data.shape[ax] - 1) - - B = np.argwhere(c > th) - if B.size < cuts: # Threshold too high - B = np.argwhere(c > 0) - if B.size: - smin, smax = B.min(), B.max() - - vox_coords[ax, :] = np.linspace(smin, smax, num=cuts + 2)[1:-1] - - ras_coords = mask_nii.affine.dot(vox_coords)[:3, ...] - return {k: list(v) for k, v in zip(["x", "y", "z"], np.around(ras_coords, 3))} - - -def _3d_in_file(in_file): - """ if self.inputs.in_file is 3d, return it. - if 4d, pick an arbitrary volume and return that. - - if in_file is a list of files, return an arbitrary file from - the list, and an arbitrary volume from that file - """ - from nilearn import image as nlimage - - in_file = filemanip.filename_to_list(in_file)[0] - - try: - in_file = nb.load(in_file) - except AttributeError: - in_file = in_file - - if len(in_file.shape) == 3: - return in_file - - return nlimage.index_img(in_file, 0) - - -def plot_segs( - image_nii, - seg_niis, - out_file, - bbox_nii=None, - masked=False, - colors=None, - compress="auto", - **plot_params -): - """ - Generate a static mosaic with ROIs represented by their delimiting contour. - - Plot segmentation as contours over the image (e.g. anatomical). - seg_niis should be a list of files. mask_nii helps determine the cut - coordinates. plot_params will be passed on to nilearn plot_* functions. If - seg_niis is a list of size one, it behaves as if it was plotting the mask. - """ - from svgutils.transform import fromstring - from nilearn import image as nlimage - - plot_params = {} if plot_params is None else plot_params - - image_nii = _3d_in_file(image_nii) - canonical_r = rotation2canonical(image_nii) - image_nii = rotate_affine(image_nii, rot=canonical_r) - seg_niis = [rotate_affine(_3d_in_file(f), rot=canonical_r) for f in seg_niis] - data = image_nii.get_fdata() - - plot_params = robust_set_limits(data, plot_params) - - bbox_nii = ( - image_nii if bbox_nii is None - else rotate_affine(_3d_in_file(bbox_nii), rot=canonical_r) - ) - - if masked: - bbox_nii = nlimage.threshold_img(bbox_nii, 1e-3) - - cuts = cuts_from_bbox(bbox_nii, cuts=7) - plot_params["colors"] = colors or plot_params.get("colors", None) - out_files = [] - for d in plot_params.pop("dimensions", ("z", "x", "y")): - plot_params["display_mode"] = d - plot_params["cut_coords"] = cuts[d] - svg = _plot_anat_with_contours( - image_nii, segs=seg_niis, compress=compress, **plot_params - ) - # Find and replace the figure_1 id. - svg = svg.replace("figure_1", "segmentation-%s-%s" % (d, uuid4()), 1) - out_files.append(fromstring(svg)) - - return out_files - - -def _plot_anat_with_contours(image, segs=None, compress="auto", **plot_params): - from nilearn.plotting import plot_anat - - nsegs = len(segs or []) - plot_params = plot_params or {} - # plot_params' values can be None, however they MUST NOT - # be None for colors and levels from this point on. - colors = plot_params.pop("colors", None) or [] - levels = plot_params.pop("levels", None) or [] - missing = nsegs - len(colors) - if missing > 0: # missing may be negative - from seaborn import color_palette - - colors = colors + color_palette("husl", missing) - - colors = [[c] if not isinstance(c, list) else c for c in colors] - - if not levels: - levels = [[0.5]] * nsegs - - # anatomical - display = plot_anat(image, **plot_params) - - # remove plot_anat -specific parameters - plot_params.pop("display_mode") - plot_params.pop("cut_coords") - - plot_params["linewidths"] = 0.5 - for i in reversed(range(nsegs)): - plot_params["colors"] = colors[i] - display.add_contours(segs[i], levels=levels[i], **plot_params) - - svg = extract_svg(display, compress=compress) - display.close() - return svg - - -def plot_registration( - anat_nii, - div_id, - plot_params=None, - order=("z", "x", "y"), - cuts=None, - estimate_brightness=False, - label=None, - contour=None, - compress="auto", - dismiss_affine=False, -): - """ - Plots the foreground and background views - Default order is: axial, coronal, sagittal - """ - from svgutils.transform import fromstring - from nilearn.plotting import plot_anat - from nilearn import image as nlimage - - plot_params = {} if plot_params is None else plot_params - - # Use default MNI cuts if none defined - if cuts is None: - raise NotImplementedError # TODO - - # nilearn 0.10.0 uses Nifti-specific methods - anat_nii = nb.Nifti1Image.from_image(anat_nii) - - out_files = [] - if estimate_brightness: - plot_params = robust_set_limits(anat_nii.get_fdata().reshape(-1), plot_params) - - # FreeSurfer ribbon.mgz - if contour: - contour = nb.Nifti1Image.from_image(anat_nii) - - ribbon = contour is not None and np.array_equal( - np.unique(contour.get_fdata()), [0, 2, 3, 41, 42] - ) - - if ribbon: - contour_data = contour.get_fdata() % 39 - white = nlimage.new_img_like(contour, contour_data == 2) - pial = nlimage.new_img_like(contour, contour_data >= 2) - - if dismiss_affine: - canonical_r = rotation2canonical(anat_nii) - anat_nii = rotate_affine(anat_nii, rot=canonical_r) - if ribbon: - white = rotate_affine(white, rot=canonical_r) - pial = rotate_affine(pial, rot=canonical_r) - if contour: - contour = rotate_affine(contour, rot=canonical_r) - - # Plot each cut axis - for i, mode in enumerate(list(order)): - plot_params["display_mode"] = mode - plot_params["cut_coords"] = cuts[mode] - if i == 0: - plot_params["title"] = label - else: - plot_params["title"] = None - - # Generate nilearn figure - display = plot_anat(anat_nii, **plot_params) - if ribbon: - kwargs = {"levels": [0.5], "linewidths": 0.5} - display.add_contours(white, colors="b", **kwargs) - display.add_contours(pial, colors="r", **kwargs) - elif contour is not None: - display.add_contours(contour, colors="r", levels=[0.5], linewidths=0.5) - - svg = extract_svg(display, compress=compress) - display.close() - - # Find and replace the figure_1 id. - svg = svg.replace("figure_1", "%s-%s-%s" % (div_id, mode, uuid4()), 1) - out_files.append(fromstring(svg)) - - return out_files - - -def compose_view(bg_svgs, fg_svgs, ref=0, out_file="report.svg"): - """Compose the input svgs into one standalone svg with CSS flickering animation.""" - out_file = Path(out_file).absolute() - out_file.write_text("\n".join(_compose_view(bg_svgs, fg_svgs, ref=ref))) - return str(out_file) - - -def _compose_view(bg_svgs, fg_svgs, ref=0): - from svgutils.compose import Unit - from svgutils.transform import SVGFigure, GroupElement - - if fg_svgs is None: - fg_svgs = [] - - # Merge SVGs and get roots - svgs = bg_svgs + fg_svgs - roots = [f.getroot() for f in svgs] - - # Query the size of each - sizes = [] - for f in svgs: - viewbox = [float(v) for v in f.root.get("viewBox").split(" ")] - width = int(viewbox[2]) - height = int(viewbox[3]) - sizes.append((width, height)) - nsvgs = len(bg_svgs) - - sizes = np.array(sizes) - - # Calculate the scale to fit all widths - width = sizes[ref, 0] - scales = width / sizes[:, 0] - heights = sizes[:, 1] * scales - - # Compose the views panel: total size is the width of - # any element (used the first here) and the sum of heights - fig = SVGFigure(Unit(f"{width}px"), Unit(f"{heights[:nsvgs].sum()}px")) - - yoffset = 0 - for i, r in enumerate(roots): - r.moveto(0, yoffset, scale_x=scales[i]) - if i == (nsvgs - 1): - yoffset = 0 - else: - yoffset += heights[i] - - # Group background and foreground panels in two groups - if fg_svgs: - newroots = [ - GroupElement(roots[:nsvgs], {"class": "background-svg"}), - GroupElement(roots[nsvgs:], {"class": "foreground-svg"}), - ] - else: - newroots = roots - fig.append(newroots) - fig.root.attrib.pop("width", None) - fig.root.attrib.pop("height", None) - fig.root.set("preserveAspectRatio", "xMidYMid meet") - - with TemporaryDirectory() as tmpdirname: - out_file = Path(tmpdirname) / "tmp.svg" - fig.save(str(out_file)) - # Post processing - svg = out_file.read_text().splitlines() - - # Remove -@keyframes flickerAnimation%s { 0%% {opacity: 1;} 100%% { opacity: 0; }} -.foreground-svg { animation: 1s ease-in-out 0s alternate none infinite paused flickerAnimation%s;} -.foreground-svg:hover { animation-play-state: running;} -""" - % tuple([uuid4()] * 2), - ) - - return svg - - -def transform_to_2d(data, max_axis): - """ - Projects 3d data cube along one axis using maximum intensity with - preservation of the signs. Adapted from nilearn. - """ - import numpy as np - - # get the shape of the array we are projecting to - new_shape = list(data.shape) - del new_shape[max_axis] - - # generate a 3D indexing array that points to max abs value in the - # current projection - a1, a2 = np.indices(new_shape) - inds = [a1, a2] - inds.insert(max_axis, np.abs(data).argmax(axis=max_axis)) - - # take the values where the absolute value of the projection - # is the highest - maximum_intensity_data = data[tuple(inds)] - - return np.rot90(maximum_intensity_data) - - -def plot_melodic_components( - melodic_dir, - in_file, - tr=None, - out_file="melodic_reportlet.svg", - compress="auto", - report_mask=None, - noise_components_file=None, -): - """ - Plots the spatiotemporal components extracted by FSL MELODIC - from functional MRI data. - - Parameters - ---------- - melodic_dir : str - Path pointing to the outputs of MELODIC - in_file : str - Path pointing to the reference fMRI dataset. This file - will be used to extract the TR value, if the ``tr`` argument - is not set. This file will be used to calculate a mask - if ``report_mask`` is not provided. - tr : float - Repetition time in seconds - out_file : str - Path where the resulting SVG file will be stored - compress : ``'auto'`` or bool - Whether SVG should be compressed. If ``'auto'``, compression - will be executed if dependencies are installed (SVGO) - report_mask : str - Path to a brain mask corresponding to ``in_file`` - noise_components_file : str - A CSV file listing the indexes of components classified as noise - by some manual or automated (e.g. ICA-AROMA) procedure. If a - ``noise_components_file`` is provided, then components will be - plotted with red/green colors (correspondingly to whether they - are in the file -noise components, red-, or not -signal, green-). - When all or none of the components are in the file, a warning - is printed at the top. - - """ - from nilearn.image import index_img, iter_img - import nibabel as nb - import numpy as np - import pylab as plt - import seaborn as sns - from matplotlib.gridspec import GridSpec - import os - - sns.set_style("white") - current_palette = sns.color_palette() - in_nii = nb.load(in_file) - if not tr: - tr = in_nii.header.get_zooms()[3] - units = in_nii.header.get_xyzt_units() - if units: - if units[-1] == "msec": - tr = tr / 1000.0 - elif units[-1] == "usec": - tr = tr / 1000000.0 - elif units[-1] != "sec": - NIWORKFLOWS_LOG.warning( - "Unknown repetition time units " "specified - assuming seconds" - ) - else: - NIWORKFLOWS_LOG.warning( - "Repetition time units not specified - assuming seconds" - ) - - from nilearn.input_data import NiftiMasker - from nilearn.plotting import cm - - if not report_mask: - nifti_masker = NiftiMasker(mask_strategy="epi") - nifti_masker.fit(index_img(in_nii, range(2))) - mask_img = nifti_masker.mask_img_ - else: - mask_img = nb.load(report_mask) - - mask_sl = [] - for j in range(3): - mask_sl.append(transform_to_2d(mask_img.get_fdata(), j)) - - timeseries = np.loadtxt(os.path.join(melodic_dir, "melodic_mix")) - power = np.loadtxt(os.path.join(melodic_dir, "melodic_FTmix")) - stats = np.loadtxt(os.path.join(melodic_dir, "melodic_ICstats")) - n_components = stats.shape[0] - Fs = 1.0 / tr - Ny = Fs / 2 - f = Ny * (np.array(list(range(1, power.shape[0] + 1)))) / (power.shape[0]) - - # Set default colors - color_title = "k" - color_time = current_palette[0] - color_power = current_palette[1] - classified_colors = None - - warning_row = 0 # Do not allocate warning row - # Only if the components file has been provided, a warning banner will - # be issued if all or none of the components were classified as noise - if noise_components_file: - noise_components = np.loadtxt( - noise_components_file, dtype=int, delimiter=",", ndmin=1 - ) - # Activate warning row if pertinent - warning_row = int(noise_components.size in (0, n_components)) - classified_colors = {True: "r", False: "g"} - - n_rows = int((n_components + (n_components % 2)) / 2) - fig = plt.figure(figsize=(6.5 * 1.5, (n_rows + warning_row) * 0.85)) - gs = GridSpec( - n_rows * 2 + warning_row, - 9, - width_ratios=[1, 1, 1, 4, 0.001, 1, 1, 1, 4], - height_ratios=[5] * warning_row + [1.1, 1] * n_rows, - ) - - if warning_row: - ax = fig.add_subplot(gs[0, :]) - ncomps = "NONE of the" - if noise_components.size == n_components: - ncomps = "ALL" - ax.annotate( - "WARNING: {} components were classified as noise".format(ncomps), - xy=(0.0, 0.5), - xycoords="axes fraction", - xytext=(0.01, 0.5), - textcoords="axes fraction", - size=12, - color="#ea8800", - bbox=dict(boxstyle="round", fc="#f7dcb7", ec="#FC990E"), - ) - ax.axes.get_xaxis().set_visible(False) - ax.axes.get_yaxis().set_visible(False) - - titlefmt = "C{id:d}{noise}: Tot. var. expl. {var:.2g}%".format - ICs = nb.load(os.path.join(melodic_dir, "melodic_IC.nii.gz")) - # Ensure 4D - if ICs.ndim == 3: - ICs = ICs.slicer[..., None] - for i, img in enumerate(iter_img(ICs)): - - col = i % 2 - row = i // 2 - l_row = row * 2 + warning_row - is_noise = False - - if classified_colors: - # If a noise components list is provided, assign red/green - is_noise = (i + 1) in noise_components - color_title = color_time = color_power = classified_colors[is_noise] - - data = img.get_fdata() - for j in range(3): - ax1 = fig.add_subplot(gs[l_row:l_row + 2, j + col * 5]) - sl = transform_to_2d(data, j) - m = np.abs(sl).max() - ax1.imshow( - sl, vmin=-m, vmax=+m, cmap=cm.cold_white_hot, interpolation="nearest" - ) - ax1.contour(mask_sl[j], levels=[0.5], colors="k", linewidths=0.5) - plt.axis("off") - ax1.autoscale_view("tight") - if j == 0: - ax1.set_title( - titlefmt(id=i + 1, noise=" [noise]" * is_noise, var=stats[i, 1]), - x=0, - y=1.18, - fontsize=7, - horizontalalignment="left", - verticalalignment="top", - color=color_title, - ) - - ax2 = fig.add_subplot(gs[l_row, 3 + col * 5]) - ax3 = fig.add_subplot(gs[l_row + 1, 3 + col * 5]) - - ax2.plot( - np.arange(len(timeseries[:, i])) * tr, - timeseries[:, i], - linewidth=min(200 / len(timeseries[:, i]), 1.0), - color=color_time, - ) - ax2.set_xlim([0, len(timeseries[:, i]) * tr]) - ax2.axes.get_yaxis().set_visible(False) - ax2.autoscale_view("tight") - ax2.tick_params(axis="both", which="major", pad=0) - sns.despine(left=True, bottom=True) - for tick in ax2.xaxis.get_major_ticks(): - tick.label.set_fontsize(6) - tick.label.set_color(color_time) - - ax3.plot( - f[0:], - power[0:, i], - color=color_power, - linewidth=min(100 / len(power[0:, i]), 1.0), - ) - ax3.set_xlim([f[0], f.max()]) - ax3.axes.get_yaxis().set_visible(False) - ax3.autoscale_view("tight") - ax3.tick_params(axis="both", which="major", pad=0) - for tick in ax3.xaxis.get_major_ticks(): - tick.label.set_fontsize(6) - tick.label.set_color(color_power) - sns.despine(left=True, bottom=True) - - plt.subplots_adjust(hspace=0.5) - fig.savefig( - out_file, - dpi=300, - format="svg", - transparent=True, - bbox_inches="tight", - pad_inches=0.01, - ) - fig.clf() +import warnings + +from nireports.reportlets.utils import ( + compose_view, + cuts_from_bbox, + extract_svg, + robust_set_limits, + svg2str, + svg_compress, + transform_to_2d, + SVGNS, +) +from nireports.reportlets.mosaic import ( + plot_registration, + plot_segs, +) +from nireports.reportlets.xca import plot_melodic_components + +__all__ = ( + "compose_view", + "cuts_from_bbox", + "extract_svg", + "plot_melodic_components", + "plot_registration", + "plot_segs", + "robust_set_limits", + "svg2str", + "svg_compress", + "transform_to_2d", + "SVGNS", +) + +warnings.warn("Please use nireports.reportlets", DeprecationWarning) diff --git a/setup.cfg b/setup.cfg index f659a4db8af..407f5795491 100644 --- a/setup.cfg +++ b/setup.cfg @@ -34,6 +34,7 @@ install_requires = nilearn >= 0.5.2 nipype >= 1.8.5 traits < 6.4 + nireports >= 23.0.1 nitransforms >= 21.0.0 numpy packaging @@ -136,6 +137,9 @@ omit = */tests/* niworkflows/_version.py niworkflows/conftest.py + niworkflows/reports/* + niworkflows/interfaces/plotting.py + niworkflows/viz/* [coverage:report] # Regexes for lines to exclude from consideration