Skip to content

Commit

Permalink
Merge pull request #497 from flatironinstitute/dev
Browse files Browse the repository at this point in the history
Dev
  • Loading branch information
epnev authored Mar 27, 2019
2 parents 7180060 + 11f5715 commit 79384a6
Show file tree
Hide file tree
Showing 49 changed files with 2,070 additions and 1,007 deletions.
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ FROM continuumio/anaconda3

RUN conda config --set always_yes yes
RUN conda update --yes conda
RUN apt-get install -y gcc g++ libgl1
RUN apt-get update && apt-get install -y gcc g++ libgl1
RUN mkdir src && cd src && git clone -b dev https://github.com/flatironinstitute/CaImAn.git && cd CaImAn && conda env create -n caiman -f environment.yml && conda install --override-channels -c conda-forge -n caiman pip
RUN /bin/bash -c "cd src/CaImAn && source activate caiman && /opt/conda/envs/caiman/bin/pip install ."
RUN /bin/bash -c "source activate caiman && caimanmanager.py install"
Expand Down
1 change: 1 addition & 0 deletions INSTALL-windows.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ The Windows installation process differs more widely from installation on Linux

### Process
* Increase the maximum size of your pagefile to 64G or more (http://www.tomshardware.com/faq/id-2864547/manage-virtual-memory-pagefile-windows.html ) - The Windows memmap interface is sensitive to the maximum setting and leaving it at the default can cause errors when processing larger datasets
* Remove any associations you may have made between .py files and an existing python interpreter or editor
* Download and install Anaconda (Python 3.x, not 2.x) <http://docs.continuum.io/anaconda/install>. Allow the installer to modify your PATH variable
* Use Conda to install git (With "conda install git") - use of another commandline git is acceptable, but may lead to issues depending on default settings
* Install Microsoft Build Tools for Visual Studio 2017 <https://www.visualstudio.com/downloads/#build-tools-for-visual-studio-2017>. Check the "Build Tools" box, and in the detailed view on the right check the "C/C++ CLI Tools" component too. The specifics of this occasionally change as Microsoft changes its products and website; you may need to go off-script.
Expand Down
95 changes: 39 additions & 56 deletions caiman/base/movies.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
from matplotlib import animation
import numpy as np
import os
from PIL import Image # $ pip install pillow
import pylab as pl
import scipy.ndimage
import scipy
Expand All @@ -41,11 +42,11 @@
import sys
import tifffile
from tqdm import tqdm
from typing import List, Tuple
import warnings
from zipfile import ZipFile
from PIL import Image # $ pip install pillow
import caiman as cm

import caiman as cm

from . import timeseries

Expand Down Expand Up @@ -106,12 +107,6 @@ def __new__(cls, input_arr, **kwargs):
else:
raise Exception('Input must be an ndarray, use load instead!')

def motion_correction_online(self, max_shift_w=25, max_shift_h=25, init_frames_template=100,
show_movie=False, bilateral_blur=False, template=None, min_count=1000):
return motion_correct_online(self, max_shift_w=max_shift_w, max_shift_h=max_shift_h,
init_frames_template=init_frames_template, show_movie=show_movie,
bilateral_blur=bilateral_blur, template=template, min_count=min_count)

def apply_shifts_online(self, xy_shifts, save_base_name=None):
# todo: todocument

Expand Down Expand Up @@ -427,7 +422,7 @@ def linf(x, a, b):
return a * x + b

try:
p0 = (y[0] - y[-1], 1e-6, y[-1])
p0:Tuple = (y[0] - y[-1], 1e-6, y[-1])
popt, _ = scipy.optimize.curve_fit(expf, x, y, p0=p0)
y_fit = expf(x, *popt)
except:
Expand Down Expand Up @@ -825,7 +820,7 @@ def resize(self, fx=1, fy=1, fz=1, interpolation=cv2.INTER_AREA):
max_els = 2**31 - 1
if elm > max_els:
chunk_size = old_div((max_els), d)
new_m = []
new_m:List = []
logging.debug('Resizing in chunks because of opencv bug')
for chunk in range(0, T, chunk_size):
logging.debug([chunk, np.minimum(chunk + chunk_size, T)])
Expand Down Expand Up @@ -1094,34 +1089,34 @@ def animate(i):
for i in range(10):
cv2.waitKey(100)

def load(file_name,fr=30,start_time=0,meta_data=None,subindices=None,shape=None,
var_name_hdf5 = 'mov', in_memory = False, is_behavior = False, bottom=0,
top=0, left=0, right=0, channel = None, outtype=np.float32):
def load(file_name, fr=30, start_time=0, meta_data=None, subindices=None,
shape=None, var_name_hdf5='mov', in_memory=False, is_behavior=False,
bottom=0, top=0, left=0, right=0, channel = None, outtype=np.float32):
"""
load movie from file. SUpports a variety of formats. tif, hdf5, npy and memory mapped. Matlab is experimental.
Args:
file_name: string
name of file. Possible extensions are tif, avi, npy, (npz and hdf5 are usable only if saved by calblitz)
fr: float
frame rate
start_time: float
initial time for frame 1
meta_data: dict
dictionary containing meta information about the movie
subindices: iterable indexes
for loading only portion of the movie
shape: tuple of two values
dimension of the movie along x and y if loading from a two dimensional numpy array
num_frames_sub_idx:
when reading sbx format (experimental and unstable)
var_name_hdf5: str
if loading from hdf5 name of the variable to load
Expand All @@ -1140,39 +1135,45 @@ def load(file_name,fr=30,start_time=0,meta_data=None,subindices=None,shape=None,
Exception 'File not found!'
"""
# case we load movie from file
if max(top, bottom, left, right) > 0 and type(file_name) is str:
file_name = [file_name]

if type(file_name) is list:
if shape is not None:
raise Exception('shape not supported for multiple movie input')
logging.error('shape not supported for multiple movie input')

return load_movie_chain(file_name,fr=fr, start_time=start_time,
meta_data=meta_data, subindices=subindices,
bottom=bottom, top=top, left=left, right=right,
channel = channel, outtype=outtype)

if bottom != 0:
raise Exception('top bottom etc... not supported for single movie input')
if max(top, bottom, left, right) > 0:
logging.error('top bottom etc... not supported for single movie input')

if channel is not None:
raise Exception('channel not supported for single movie input')
logging.error('channel not supported for single movie input')

if os.path.exists(file_name):
_, extension = os.path.splitext(file_name)[:2]
extension = extension.lower()
if extension == '.tif' or extension == '.tiff': # load avi file
with tifffile.TiffFile(file_name) as tffl:
multi_page = True if tffl.series[0].shape[0] > 1 else False
if len(tffl.pages) == 1:
logging.warning('Your tif file is saved a single page' +
'file. Performance will be affected')
multi_page = False
if subindices is not None:
if type(subindices) is list:
try:
if multi_page:
input_arr = tffl.asarray(key=subindices[0])[:, subindices[1], subindices[2]]
except:
logging.warning('Your tif file is saved a single page file. Performance will be affected')
else:
input_arr = tffl.asarray()
input_arr = input_arr[subindices[0], subindices[1], subindices[2]]
else:
try:
if multi_page:
input_arr = tffl.asarray(key=subindices)
except:
logging.warning('Your tif file is saved a single page file. Performance will be affected')
else:
input_arr = tffl.asarray()
input_arr = input_arr[subindices]

Expand Down Expand Up @@ -1287,25 +1288,7 @@ def rgb2gray(rgb):
with np.load(file_name) as f:
return movie(**f).astype(outtype)

# elif extension in ('.hdf5', '.h5'):
# with h5py.File(file_name, "r") as f:
# attrs = dict(f[var_name_hdf5].attrs)
# if meta_data in attrs:
# attrs['meta_data'] = cpk.loads(attrs['meta_data'])
#
# if subindices is None:
# return movie(f[var_name_hdf5], **attrs).astype(outtype)
# else:
# return movie(f[var_name_hdf5][subindices], **attrs).astype(outtype)

elif extension == '.h5_at':
with h5py.File(file_name, "r") as f:
if subindices is None:
return movie(f['quietBlock'], fr=fr).astype(outtype)
else:
return movie(f['quietBlock'][subindices], fr=fr).astype(outtype)

elif extension in ('.hdf5', '.h5'):
elif extension in ('.hdf5', '.h5', '.nwb'):
if is_behavior:
with h5py.File(file_name, "r") as f:
kk = list(f.keys())
Expand All @@ -1322,22 +1305,22 @@ def rgb2gray(rgb):
fkeys = list(f.keys())
if len(fkeys) == 1:
var_name_hdf5 = fkeys[0]
if var_name_hdf5 in fkeys:
if var_name_hdf5 in f:
if subindices is None:
images = np.array(f[var_name_hdf5]).squeeze()
if images.ndim > 3:
images = images[:, 0]
#if images.ndim > 3:
# images = images[:, 0]
else:
images = np.array(
f[var_name_hdf5][subindices]).squeeze()
if images.ndim > 3:
images = images[:, 0]
#if images.ndim > 3:
# images = images[:, 0]

#input_arr = images
return movie(images.astype(outtype))
else:
logging.debug('KEYS:' + str(f.keys()))
raise Exception('Key not found in hdf5n file')
raise Exception('Key not found in hdf5 file')

elif extension == '.mmap':

Expand Down Expand Up @@ -1626,7 +1609,7 @@ def from_zip_file_to_movie(zipfile_name, start_end = None):
start and end frame to extract
@return:
'''
mov = []
mov:List = []
print('unzipping file into movie object')
if start_end is not None:
num_frames = start_end[1] - start_end[0]
Expand Down
3 changes: 2 additions & 1 deletion caiman/base/rois.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
from skimage.draw import polygon
import tempfile
import time
from typing import List
import zipfile

from ..motion_correction import tile_and_correct
Expand Down Expand Up @@ -1260,7 +1261,7 @@ def detect_duplicates_and_subsets(binary_masks, predictions=None, r_values=None,
one, two = np.unravel_index(max_idx, overlap_tmp.shape)
max_val = overlap_tmp[one, two]

indeces_to_keep = []
indeces_to_keep:List = []
indeces_to_remove = []
while max_val > 0:
one, two = np.unravel_index(max_idx, overlap_tmp.shape)
Expand Down
20 changes: 13 additions & 7 deletions caiman/base/timeseries.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@
import tifffile
import warnings

from caiman.paths import memmap_frames_filename

try:
cv2.setNumThreads(0)
except:
Expand Down Expand Up @@ -112,7 +114,8 @@ def __array_finalize__(self, obj):
self.file_name = getattr(obj, 'file_name', None)
self.meta_data = getattr(obj, 'meta_data', None)

def save(self, file_name, to32=True, order='F',imagej=False, bigtiff=True, software='CaImAn', compress=0):
def save(self, file_name, to32=True, order='F',imagej=False, bigtiff=True,
software='CaImAn', compress=0, var_name_hdf5='mov'):
"""
Save the timeseries in various formats
Expand All @@ -126,6 +129,9 @@ def save(self, file_name, to32=True, order='F',imagej=False, bigtiff=True, softw
order: 'F' or 'C'
C or Fortran order
var_name_hdf5: str
Name of hdf5 file subdirectory
Raises:
Exception 'Extension Unknown'
Expand Down Expand Up @@ -191,10 +197,12 @@ def save(self, file_name, to32=True, order='F',imagej=False, bigtiff=True, softw

if self.meta_data[0] is None:
savemat(file_name, {'input_arr': np.rollaxis(
input_arr, axis=0, start=3), 'start_time': self.start_time, 'fr': self.fr, 'meta_data': [], 'file_name': f_name})
input_arr, axis=0, start=3), 'start_time': self.start_time,
'fr': self.fr, 'meta_data': [], 'file_name': f_name})
else:
savemat(file_name, {'input_arr': np.rollaxis(
input_arr, axis=0, start=3), 'start_time': self.start_time, 'fr': self.fr, 'meta_data': self.meta_data, 'file_name': f_name})
input_arr, axis=0, start=3), 'start_time': self.start_time,
'fr': self.fr, 'meta_data': self.meta_data, 'file_name': f_name})

elif extension in ('.hdf5', '.h5'):
with h5py.File(file_name, "w") as f:
Expand All @@ -203,7 +211,7 @@ def save(self, file_name, to32=True, order='F',imagej=False, bigtiff=True, softw
else:
input_arr = np.array(self)

dset = f.create_dataset("mov", data=input_arr)
dset = f.create_dataset(var_name_hdf5, data=input_arr)
dset.attrs["fr"] = self.fr
dset.attrs["start_time"] = self.start_time
try:
Expand All @@ -228,8 +236,7 @@ def save(self, file_name, to32=True, order='F',imagej=False, bigtiff=True, softw
input_arr = np.transpose(input_arr, list(range(1, len(dims) + 1)) + [0])
input_arr = np.reshape(input_arr, (np.prod(dims), T), order='F')

fname_tot = base_name + '_d1_' + str(dims[0]) + '_d2_' + str(dims[1]) + '_d3_' + str(
1 if len(dims) == 2 else dims[2]) + '_order_' + str(order) + '_frames_' + str(T) + '_.mmap'
fname_tot = memmap_frames_filename(base_name, dims, T, order)
fname_tot = os.path.join(os.path.split(file_name)[0], fname_tot)
big_mov = np.memmap(fname_tot, mode='w+', dtype=np.float32,
shape=(np.uint64(np.prod(dims)), np.uint64(T)), order=order)
Expand All @@ -253,7 +260,6 @@ def concatenate(*args, **kwargs):
"""
# todo: todocument return

obj = []
frRef = None
for arg in args:
for m in arg:
Expand Down
3 changes: 2 additions & 1 deletion caiman/cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -411,7 +411,8 @@ def setup_cluster(backend='multiprocessing', n_processes=None, single_thread=Fal
'A cluster is already runnning. Terminate with dview.terminate() if you want to restart.')
if (platform.system() == 'Darwin') and (sys.version_info > (3, 0)):
try:
if 'kernel' in get_ipython().trait_names(): # If you're on OSX and you're running under Jupyter or Spyder,
if 'kernel' in get_ipython().trait_names(): # type: ignore
# If you're on OSX and you're running under Jupyter or Spyder,
# which already run the code in a forkserver-friendly way, this
# can eliminate some setup and make this a reasonable approach.
# Otherwise, seting VECLIB_MAXIMUM_THREADS=1 or using a different
Expand Down
18 changes: 5 additions & 13 deletions caiman/components_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import scipy
from scipy.sparse import csc_matrix
from scipy.stats import norm
from typing import Any, List
import warnings

from caiman.paths import caiman_datadir
Expand Down Expand Up @@ -165,7 +166,7 @@ def find_activity_intervals(C, Npeaks=5, tB=-3, tA=10, thres=0.3):
# todo todocument
import peakutils
K, T = np.shape(C)
L = []
L:List = []
for i in range(K):
if np.sum(np.abs(np.diff(C[i, :]))) == 0:
L.append([])
Expand Down Expand Up @@ -204,7 +205,7 @@ def classify_components_ep(Y, A, C, b, f, Athresh=0.1, Npeaks=5, tB=-3, tA=10, t
LOC = find_activity_intervals(C, Npeaks=Npeaks, tB=tB, tA=tA, thres=thres)
rval = np.zeros(K)

significant_samples = []
significant_samples:List[Any] = []
for i in range(K):
if i % 200 == 0: # Show status periodically
logging.info('Components evaluated:' + str(i))
Expand Down Expand Up @@ -414,20 +415,11 @@ def evaluate_components(Y, traces, A, C, b, f, final_frate, remove_baseline=True


#%%
# FIXME xrange is python2-specific
def chunker(seq, size):
for pos in xrange(0, len(seq), size):
yield seq[pos:pos + size]
#%%


def grouper(n, iterable, fillvalue=None):
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
try: # py3
return itertools.zip_longest(*args, fillvalue=fillvalue)
except: # py2
return itertools.izip_longest(*args, fillvalue=fillvalue)
return itertools.zip_longest(*args, fillvalue=fillvalue)

#%%

Expand Down Expand Up @@ -556,7 +548,7 @@ def select_components_from_metrics(A, dims, gSig, r_values, comp_SNR,
idx_components_r = np.where(r_values >= r_values_min)[0]
idx_components_raw = np.where(comp_SNR > min_SNR)[0]

idx_components = []
idx_components:Any = [] # changes type over the function

if use_cnn:
# normally 1
Expand Down
Loading

0 comments on commit 79384a6

Please sign in to comment.