From 06a5cab73399fdb0f6e060321f70c9dae8f0a7f2 Mon Sep 17 00:00:00 2001 From: zm711 <92116279+zm711@users.noreply.github.com> Date: Fri, 5 Apr 2024 10:59:07 -0400 Subject: [PATCH 1/6] add igor2 for building docs --- pyproject.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 050816947..8d6f2fa51 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -79,7 +79,8 @@ docs = [ "ipython", "matplotlib", "nixio", - "pynwb" + "pynwb", + "igor2" ] dev = [ From 1c5b0899f3110973665009df0cfe47b35809c0a4 Mon Sep 17 00:00:00 2001 From: zm711 <92116279+zm711@users.noreply.github.com> Date: Fri, 5 Apr 2024 11:07:55 -0400 Subject: [PATCH 2/6] switch pip install for dhn-med-py --- pyproject.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 8d6f2fa51..89991f419 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -45,7 +45,7 @@ iocache = [ ] test = [ - "dhn_med_py>=1.0.0", + "dhn-med-py>=1.0.0", "pytest", "pytest-cov", # datalad # this dependency is covered by conda (environment_testing.yml) @@ -100,13 +100,13 @@ ced = ["sonpy"] nwb = ["pynwb"] maxwell = ["h5py"] biocam = ["h5py"] -med = ["dhn_med_py>=1.0.0"] +med = ["dhn-med-py>=1.0.0"] plexon2 = ["zugbruecke>=0.2; sys_platform!='win32'", "wenv; sys_platform!='win32'"] all = [ "coverage", "coveralls", - "dhn_med_py>=1.0.0", + "dhn-med-py>=1.0.0", "h5py", "igor2", "ipython", From 43190d55325bdb2af0900aadd4255138c0901e00 Mon Sep 17 00:00:00 2001 From: zm711 <92116279+zm711@users.noreply.github.com> Date: Fri, 5 Apr 2024 11:25:12 -0400 Subject: [PATCH 3/6] revert dhn_med_py --- pyproject.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 89991f419..8d6f2fa51 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -45,7 +45,7 @@ iocache = [ ] test = [ - "dhn-med-py>=1.0.0", + "dhn_med_py>=1.0.0", "pytest", "pytest-cov", # datalad # this dependency is covered by conda (environment_testing.yml) @@ -100,13 +100,13 @@ ced = ["sonpy"] nwb = ["pynwb"] maxwell = ["h5py"] biocam = ["h5py"] -med = ["dhn-med-py>=1.0.0"] +med = ["dhn_med_py>=1.0.0"] plexon2 = ["zugbruecke>=0.2; sys_platform!='win32'", "wenv; sys_platform!='win32'"] all = [ "coverage", "coveralls", - "dhn-med-py>=1.0.0", + "dhn_med_py>=1.0.0", "h5py", "igor2", "ipython", From b3538ce4f97486652c18f31afb3efbab5ba90738 Mon Sep 17 00:00:00 2001 From: zm711 <92116279+zm711@users.noreply.github.com> Date: Fri, 5 Apr 2024 13:26:03 -0400 Subject: [PATCH 4/6] add python in the env_testing --- environment_testing.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/environment_testing.yml b/environment_testing.yml index 8ff8bd984..7e5aee66a 100644 --- a/environment_testing.yml +++ b/environment_testing.yml @@ -2,5 +2,6 @@ name: neo-test-env channels: - conda-forge dependencies: + - python=3.9 - datalad - pip From 51dfa2b1b6182846913c380d0de227ee01bd37f7 Mon Sep 17 00:00:00 2001 From: zm711 <92116279+zm711@users.noreply.github.com> Date: Fri, 5 Apr 2024 14:03:50 -0400 Subject: [PATCH 5/6] fix examples displaying wrong order --- examples/plot_igorio.py | 11 ++-- examples/plot_imageseq.py | 12 ++++- examples/plot_multi_tetrode_example.py | 11 ++-- examples/plot_read_files_neo_io.py | 28 +++++++++- examples/plot_read_files_neo_rawio.py | 62 ++++++++++++++++++---- examples/plot_read_proxy_with_lazy_load.py | 25 ++++++++- examples/plot_roi_demo.py | 8 ++- 7 files changed, 136 insertions(+), 21 deletions(-) diff --git a/examples/plot_igorio.py b/examples/plot_igorio.py index 822a9e2d5..ae4cfc47b 100644 --- a/examples/plot_igorio.py +++ b/examples/plot_igorio.py @@ -3,14 +3,16 @@ =========================== """ - +########################################################### +# Import our packages import os from urllib.request import urlretrieve import zipfile import matplotlib.pyplot as plt from neo.io import get_io - +############################################################# +# Then download some data # Downloaded from Human Brain Project Collaboratory # Digital Reconstruction of Neocortical Microcircuitry (nmc-portal) # http://microcircuits.epfl.ch/#/animal/8ecde7d1-b2d2-11e4-b949-6003088da632 @@ -23,7 +25,10 @@ zip_ref.extract(path=".", member=filename) # extract file to dir zip_ref.close() - +###################################################### +# Once we have our data we can use `get_io` to find an +# io (Igor in this case). Then we read the analogsignals +# Finally we will make some nice plots reader = get_io(filename) signal = reader.read_analogsignal() plt.plot(signal.times, signal) diff --git a/examples/plot_imageseq.py b/examples/plot_imageseq.py index ce3e932f3..a2c7091db 100644 --- a/examples/plot_imageseq.py +++ b/examples/plot_imageseq.py @@ -4,6 +4,9 @@ """ +########################################################## +# Let's import some packages + from neo.core import ImageSequence from neo.core import RectangularRegionOfInterest, CircularRegionOfInterest, PolygonRegionOfInterest import matplotlib.pyplot as plt @@ -11,7 +14,11 @@ import random -# generate data + +############################################################ +# Now we need to generate some data +# We will just make a nice box and then we can attach this +# ImageSequence to a variety of ROIs l = [] for frame in range(50): @@ -29,6 +36,9 @@ PolygonRegionOfInterest(image_seq,(50, 25), (50, 45), (14, 65), (90, 80)), ) +############################################################### +# It is easy to plot our results using matplotlib + for i in range(len(result)): plt.figure() plt.plot(result[i].times, result[i]) diff --git a/examples/plot_multi_tetrode_example.py b/examples/plot_multi_tetrode_example.py index 0ca27d0b8..725a7dd3c 100644 --- a/examples/plot_multi_tetrode_example.py +++ b/examples/plot_multi_tetrode_example.py @@ -1,5 +1,6 @@ """ -Example for usecases.rst +Analyzing and Plotting Data with Neo Structures +=============================================== """ from itertools import cycle @@ -57,8 +58,10 @@ current_group.add(signals[tetrode_id]) -# Now plot the data +################################################### +# Now we will plot the data +################################################### # .. by trial plt.figure() for seg in block.segments: @@ -70,6 +73,7 @@ plt.title(f"PSTH in segment {seg.index}") plt.show() +#################################################### # ..by neuron plt.figure() @@ -81,7 +85,8 @@ plt.title(f"PSTH of unit {group.name}") plt.show() -# ..by tetrode +########################################################### +# ..by tetrode (or other electrode number) plt.figure() for i, tetrode_id in enumerate(block.annotations["tetrode_ids"]): diff --git a/examples/plot_read_files_neo_io.py b/examples/plot_read_files_neo_io.py index 95734e896..8d3eedb8b 100644 --- a/examples/plot_read_files_neo_io.py +++ b/examples/plot_read_files_neo_io.py @@ -4,6 +4,9 @@ """ +#################################################### +# Start with package import and getting a datafile + import urllib import neo @@ -15,11 +18,22 @@ localfile = "File_plexon_3.plx" urllib.request.urlretrieve(distantfile, localfile) + + +################################################### +# Now we can create our reader and read some data + # create a reader reader = neo.io.PlexonIO(filename="File_plexon_3.plx") # read the blocks blks = reader.read(lazy=False) print(blks) + +###################################################### +# Once we have our blocks we can iterate through each +# block of data and see the contents of all parts of +# that data + # access to segments for blk in blks: for seg in blk.segments: @@ -29,16 +43,28 @@ for st in seg.spiketrains: print(st) +####################################################### +# Let's look at another file type + # CED Spike2 files distantfile = url_repo + "spike2/File_spike2_1.smr" localfile = "./File_spike2_1.smr" urllib.request.urlretrieve(distantfile, localfile) - # create a reader reader = neo.io.Spike2IO(filename="File_spike2_1.smr") + +######################################################### +# Despite being a different raw file format we can access +# the data in the same way + # read the block bl = reader.read(lazy=False)[0] print(bl) + +########################################################## +# Similarly we can view the different types of data within +# the block (AnalogSignals and SpikeTrains) + # access to segments for seg in bl.segments: print(seg) diff --git a/examples/plot_read_files_neo_rawio.py b/examples/plot_read_files_neo_rawio.py index ccc86a33b..11a081e51 100644 --- a/examples/plot_read_files_neo_rawio.py +++ b/examples/plot_read_files_neo_rawio.py @@ -4,58 +4,98 @@ compare with read_files_neo_io.py """ +########################################################### +# First we import a RawIO from neo.rawio +# For this example we will use PlexonRawIO import urllib from neo.rawio import PlexonRawIO url_repo = "https://web.gin.g-node.org/NeuralEnsemble/ephy_testing_data/raw/master/" +############################################################## # Get Plexon files +# We will be pulling these files down, but if you have local file +# then all you need to do is specify the file location on your +# computer + distantfile = url_repo + "plexon/File_plexon_3.plx" localfile = "File_plexon_3.plx" urllib.request.urlretrieve(distantfile, localfile) -# create a reader +############################################################### +# Create a reader +# All it takes to create a reader is giving the filename +# Then we need to do the slow step of parsing the header with the +# `parse_header` function. This collects metadata as well as +# make all future steps much faster for us + reader = PlexonRawIO(filename="File_plexon_3.plx") reader.parse_header() print(reader) +# we can view metadata in the header print(reader.header) +############################################################### # Read signal chunks +# This is how we read raw data. We choose indices that we want or +# we can use None to mean look at all channels. We also need to +# specify the block of data (block_index) as well as the segment +# (seg_index). Then we give the index start and stop. Since we +# often thing in time to go from time to index would just require +# the sample rate (so index = time / sampling_rate) channel_indexes = None # could be channel_indexes = [0] raw_sigs = reader.get_analogsignal_chunk( block_index=0, seg_index=0, i_start=1024, i_stop=2048, channel_indexes=channel_indexes ) + +# raw_sigs are not voltages so to convert to voltages we do the follwing float_sigs = reader.rescale_signal_raw_to_float(raw_sigs, dtype="float64") +# each rawio gives you access to lots of information about your data sampling_rate = reader.get_signal_sampling_rate() t_start = reader.get_signal_t_start(block_index=0, seg_index=0) units = reader.header["signal_channels"][0]["units"] +# and we can display all of this information print(raw_sigs.shape, raw_sigs.dtype) print(float_sigs.shape, float_sigs.dtype) -print(sampling_rate, t_start, units) +print(f"{sampling_rate=}, {t_start=}, {units=}") + + +#################################################################### +# Some rawio's and file formats all provide information about spikes +# If an rawio can't read this data it will raise an error, but lucky +# for us PlexonRawIO does have spikes data!! # Count units and spikes per unit nb_unit = reader.spike_channels_count() print("nb_unit", nb_unit) for spike_channel_index in range(nb_unit): nb_spike = reader.spike_count(block_index=0, seg_index=0, spike_channel_index=spike_channel_index) - print("spike_channel_index", spike_channel_index, "nb_spike", nb_spike) + print(f"{spike_channel_index=}\n{nb_spike}\n") -# Read spike times +# Read spike times and rescale (just like analogsignal above) spike_timestamps = reader.get_spike_timestamps( block_index=0, seg_index=0, spike_channel_index=0, t_start=0.0, t_stop=10.0 ) -print(spike_timestamps.shape, spike_timestamps.dtype, spike_timestamps[:5]) +print(f"{spike_timestamps.shape=}\n{spike_timestamps.dtype=}\n{spike_timestamps[:5]=}") spike_times = reader.rescale_spike_timestamp(spike_timestamps, dtype="float64") -print(spike_times.shape, spike_times.dtype, spike_times[:5]) +print(f"{spike_times.shape=}\n{spike_times.dtype=}\n{spike_times[:5]}") + +####################################################################### +# Some file formats can also give waveform information. We are lucky +# again. Our file has waveform data!! # Read spike waveforms raw_waveforms = reader.get_spike_raw_waveforms( block_index=0, seg_index=0, spike_channel_index=0, t_start=0.0, t_stop=10.0 ) -print(raw_waveforms.shape, raw_waveforms.dtype, raw_waveforms[0, 0, :4]) +print(f"{raw_waveforms.shape=}\n{raw_waveforms.dtype=}\n{raw_waveforms[0, 0, :4]=}") float_waveforms = reader.rescale_waveforms_to_float(raw_waveforms, dtype="float32", spike_channel_index=0) -print(float_waveforms.shape, float_waveforms.dtype, float_waveforms[0, 0, :4]) +print(f"{float_waveforms.shape=}\n{float_waveforms.dtype=}{float_waveforms[0, 0, :4]=}") + +######################################################################### +# RawIOs can also read event timestamps. But looks like our luck ran out +# let's grab a new file to see this feature # Read event timestamps and times (take another file) distantfile = url_repo + "plexon/File_plexon_2.plx" @@ -66,7 +106,7 @@ reader = PlexonRawIO(filename="File_plexon_2.plx") reader.parse_header() nb_event_channel = reader.event_channels_count() -print("nb_event_channel", nb_event_channel) +print("nb_event_channel:", nb_event_channel) for chan_index in range(nb_event_channel): nb_event = reader.event_count(block_index=0, seg_index=0, event_channel_index=chan_index) print("chan_index", chan_index, "nb_event", nb_event) @@ -74,6 +114,6 @@ ev_timestamps, ev_durations, ev_labels = reader.get_event_timestamps( block_index=0, seg_index=0, event_channel_index=0, t_start=None, t_stop=None ) -print(ev_timestamps, ev_durations, ev_labels) +print(f"{ev_timestamps=}\n{ev_durations=}\n{ev_labels=}") ev_times = reader.rescale_event_timestamp(ev_timestamps, dtype="float64") -print(ev_times) +print(f"{ev_times=}") diff --git a/examples/plot_read_proxy_with_lazy_load.py b/examples/plot_read_proxy_with_lazy_load.py index 9aace0d62..f35a5508f 100644 --- a/examples/plot_read_proxy_with_lazy_load.py +++ b/examples/plot_read_proxy_with_lazy_load.py @@ -4,6 +4,9 @@ """ +################################################ +# Import our packages first + import urllib import neo import quantities as pq @@ -11,18 +14,27 @@ url_repo = "https://web.gin.g-node.org/NeuralEnsemble/ephy_testing_data/raw/master/" -# Get Plexon files + +############################################### +# Let's get a file + +# Get med file distantfile = url_repo + "micromed/File_micromed_1.TRC" localfile = "./File_micromed_1.TRC" urllib.request.urlretrieve(distantfile, localfile) +################################################## # create a reader + reader = neo.MicromedIO(filename="File_micromed_1.TRC") reader.parse_header() lim0, lim1 = -20 * pq.ms, +20 * pq.ms +##################################################### +# Now let's make a function that we want to apply to +# look at lazy for eager uses of the API def apply_my_fancy_average(sig_list): """basic average along triggers and then channels @@ -33,6 +45,9 @@ def apply_my_fancy_average(sig_list): sigs = np.stack(sig_list, axis=0) return np.mean(np.mean(sigs, axis=0), axis=1) +################################################## +# We start with eager where `lazy=False`. Everything +# is loaded into memory seg = reader.read_segment(lazy=False) triggers = seg.events[0] @@ -44,6 +59,11 @@ def apply_my_fancy_average(sig_list): all_sig_chunks.append(anasig_chunk) m1 = apply_my_fancy_average(all_sig_chunks) +##################################################### +# Here we do `lazy=True`, so we do lazy loading. We +# only load the events that we want into memory +# and we use a proxy object for our analogsignal until we +# load it chunk by chunk (no running out of memory!) seg = reader.read_segment(lazy=True) triggers = seg.events[0].load(time_slice=None) # this load all trigers in memory @@ -55,5 +75,8 @@ def apply_my_fancy_average(sig_list): all_sig_chunks.append(anasig_chunk) m2 = apply_my_fancy_average(all_sig_chunks) +########################################################## +# We see that either way the result is the same, but +# we do not exhaust our RAM/memory print(m1) print(m2) diff --git a/examples/plot_roi_demo.py b/examples/plot_roi_demo.py index c4b7d550e..bb057897d 100644 --- a/examples/plot_roi_demo.py +++ b/examples/plot_roi_demo.py @@ -3,6 +3,8 @@ ===================================== """ +############################################# +# Import our packages import matplotlib.pyplot as plt import numpy as np @@ -11,7 +13,7 @@ import random import quantities as pq - +################################################################## # First we create our image_sequence. Let's generate some data l = [] @@ -24,6 +26,8 @@ image_seq = ImageSequence(l, sampling_rate=500 * pq.Hz, spatial_scale="m", units="V") +################################################################# +# Now we will write a function for plotting an roi def plot_roi(roi, shape): img = rand(120, 100) pir = np.array(roi.pixels_in_region()).T @@ -35,6 +39,8 @@ def plot_roi(roi, shape): ax = plt.gca() ax.add_artist(shape) +################################################################## +# Finally we will plot each roi roi = CircularRegionOfInterest(image_sequence=image_seq, x=50.3, y=50.8, radius=30.2) shape = plt.Circle(roi.centre, roi.radius, color="r", fill=False) From 207b851d9399a24ee1d44f24cceab2f18fdf7140 Mon Sep 17 00:00:00 2001 From: zm711 <92116279+zm711@users.noreply.github.com> Date: Sat, 6 Apr 2024 17:44:33 -0400 Subject: [PATCH 6/6] more examples touchups --- examples/plot_igorio.py | 2 + examples/plot_imageseq.py | 7 +- examples/plot_multi_tetrode_example.py | 37 +++++++++- examples/plot_read_files_neo_rawio.py | 81 ++++++++++++++++------ examples/plot_read_proxy_with_lazy_load.py | 63 +++++++++++++---- examples/plot_roi_demo.py | 20 +++++- 6 files changed, 166 insertions(+), 44 deletions(-) diff --git a/examples/plot_igorio.py b/examples/plot_igorio.py index ae4cfc47b..71c24535b 100644 --- a/examples/plot_igorio.py +++ b/examples/plot_igorio.py @@ -16,6 +16,8 @@ # Downloaded from Human Brain Project Collaboratory # Digital Reconstruction of Neocortical Microcircuitry (nmc-portal) # http://microcircuits.epfl.ch/#/animal/8ecde7d1-b2d2-11e4-b949-6003088da632 + + datafile_url = "https://microcircuits.epfl.ch/data/released_data/B95.zip" filename_zip = "B95.zip" filename = "grouped_ephys/B95/B95_Ch0_IDRest_107.ibw" diff --git a/examples/plot_imageseq.py b/examples/plot_imageseq.py index a2c7091db..74fa04e4a 100644 --- a/examples/plot_imageseq.py +++ b/examples/plot_imageseq.py @@ -19,6 +19,7 @@ # Now we need to generate some data # We will just make a nice box and then we can attach this # ImageSequence to a variety of ROIs +# our ImageSequence will be 50 frames of 100x100 pixel images l = [] for frame in range(50): @@ -28,6 +29,10 @@ for x in range(100): l[frame][y].append(random.randint(0, 50)) +##################################################################### +# we then make our image sequence and pull out our results from the +# image_seq + image_seq = ImageSequence(l, sampling_rate=500 * pq.Hz, spatial_scale="m", units="V") result = image_seq.signal_from_region( @@ -44,5 +49,5 @@ plt.plot(result[i].times, result[i]) plt.xlabel("seconde") plt.ylabel("valeur") - +plt.tight_layout() plt.show() diff --git a/examples/plot_multi_tetrode_example.py b/examples/plot_multi_tetrode_example.py index 725a7dd3c..008369186 100644 --- a/examples/plot_multi_tetrode_example.py +++ b/examples/plot_multi_tetrode_example.py @@ -2,6 +2,10 @@ Analyzing and Plotting Data with Neo Structures =============================================== """ +###################################################### +# First we import some packages. Since we are making simulated +# data we will import quite a few neo features as well as use +# quantities to provide units from itertools import cycle import numpy as np @@ -9,6 +13,10 @@ import matplotlib.pyplot as plt from neo import Block, Segment, ChannelView, Group, SpikeTrain, AnalogSignal +########################################################################## +# For Neo we start with a block of data that will contain segments of data +# so we will create a block of probe data that has a couple tetrodes +# Then we will load in 3 segments (for examples trials of a stimulus) store_signals = False block = Block(name="probe data", tetrode_ids=["Tetrode #1", "Tetrode #2"]) @@ -18,8 +26,13 @@ Segment(name="trial #3", index=2), ] +# we will decide how many units each tetrode has found. If only science was this easy n_units = {"Tetrode #1": 2, "Tetrode #2": 5} +################################################################################## +# Neo can also have groups. Groups are structures within a block that can cross segments +# for example we could group a neuron across trials or across probes. + # Create a group for each neuron, annotate each group with the tetrode from which it was recorded groups = [] counter = 0 @@ -30,10 +43,19 @@ iter_group = cycle(groups) +########################################################################################## +# Segments are also containers of data. Segments can hold raw signal data like an AnalogSignal +# Segments can also hold spiketrain data (in a SpikeTrain). It can also hold event data (which +# we are not show in this example) + + # Create dummy data, one segment at a time for segment in block.segments: - # create two 4-channel AnalogSignals with dummy data + # create two 4-channel AnalogSignals with simulated data (because we have two tetrodes!) + # note that the AnalogSignal with have numpy array-like data with units and sampling rates + # Neo keeps track of these units while also giving you the flexibility of treating the raw data + # like a numpy array signals = { "Tetrode #1": AnalogSignal(np.random.rand(1000, 4) * mV, sampling_rate=10 * kHz, tetrode_id="Tetrode #1"), "Tetrode #2": AnalogSignal(np.random.rand(1000, 4) * mV, sampling_rate=10 * kHz, tetrode_id="Tetrode #2"), @@ -41,8 +63,8 @@ if store_signals: segment.analogsignals.extend(signals.values()) - # create spike trains with dummy data - # we will pretend the spikes have been extracted from the dummy signal + # create spike trains with simulated data + # we will pretend the spikes have been extracted from the simulated signal for tetrode_id in ("Tetrode #1", "Tetrode #2"): for i in range(n_units[tetrode_id]): spiketrain = SpikeTrain(np.random.uniform(0, 100, size=30) * ms, t_stop=100 * ms) @@ -60,6 +82,12 @@ ################################################### # Now we will plot the data +# Neo doesn't provide it's own plotting functions, but +# since its data can be treated like numpy arrays +# it is easy to use standard packages like matplotlib +# for all your plotting needs +# We do a classic in neuroscience and show various ways +# to plot a PSTH (Peristimulus histogram) ################################################### # .. by trial @@ -71,6 +99,7 @@ count, bins = np.histogram(stlist) plt.bar(bins[:-1], count, width=bins[1] - bins[0]) plt.title(f"PSTH in segment {seg.index}") +plt.tight_layout() plt.show() #################################################### @@ -83,6 +112,7 @@ count, bins = np.histogram(stlist) plt.bar(bins[:-1], count, width=bins[1] - bins[0]) plt.title(f"PSTH of unit {group.name}") +plt.tight_layout() plt.show() ########################################################### @@ -97,4 +127,5 @@ count, bins = np.histogram(stlist) plt.bar(bins[:-1], count, width=bins[1] - bins[0]) plt.title(f"PSTH blend of tetrode {tetrode_id}") +plt.tight_layout() plt.show() diff --git a/examples/plot_read_files_neo_rawio.py b/examples/plot_read_files_neo_rawio.py index 11a081e51..1b6e5e287 100644 --- a/examples/plot_read_files_neo_rawio.py +++ b/examples/plot_read_files_neo_rawio.py @@ -11,21 +11,24 @@ import urllib from neo.rawio import PlexonRawIO -url_repo = "https://web.gin.g-node.org/NeuralEnsemble/ephy_testing_data/raw/master/" - ############################################################## # Get Plexon files -# We will be pulling these files down, but if you have local file + +# We will be pulling these files down, but if you have a local file # then all you need to do is specify the file location on your -# computer +# computer. NeuralEnsemble keeps a wide variety of freely accesible, small +# test files that can be used. So for this example we will take advantage of that +# fact. +url_repo = "https://web.gin.g-node.org/NeuralEnsemble/ephy_testing_data/raw/master/" distantfile = url_repo + "plexon/File_plexon_3.plx" localfile = "File_plexon_3.plx" urllib.request.urlretrieve(distantfile, localfile) ############################################################### # Create a reader -# All it takes to create a reader is giving the filename + +# All it takes to create a reader is giving the filename (or dirname) # Then we need to do the slow step of parsing the header with the # `parse_header` function. This collects metadata as well as # make all future steps much faster for us @@ -42,8 +45,9 @@ # we can use None to mean look at all channels. We also need to # specify the block of data (block_index) as well as the segment # (seg_index). Then we give the index start and stop. Since we -# often thing in time to go from time to index would just require +# often think in time: to go from time to index would just require # the sample rate (so index = time / sampling_rate) + channel_indexes = None # could be channel_indexes = [0] raw_sigs = reader.get_analogsignal_chunk( block_index=0, seg_index=0, i_start=1024, i_stop=2048, channel_indexes=channel_indexes @@ -51,47 +55,60 @@ # raw_sigs are not voltages so to convert to voltages we do the follwing float_sigs = reader.rescale_signal_raw_to_float(raw_sigs, dtype="float64") -# each rawio gives you access to lots of information about your data + +# We can see that the shapes are the same, but that the datatypes +# are different once we've rescaled our data +print("Raw Data: ", raw_sigs.shape, raw_sigs.dtype) +print("Scaled Data: ", float_sigs.shape, float_sigs.dtype) + +############################################################### +# Each rawio gives you access to lots of information about your data +# some of this information comes from functions +# other information is stored as metadata in the reader.header + sampling_rate = reader.get_signal_sampling_rate() +# Like above we need to indicate the block and segment t_start = reader.get_signal_t_start(block_index=0, seg_index=0) units = reader.header["signal_channels"][0]["units"] + # and we can display all of this information -print(raw_sigs.shape, raw_sigs.dtype) -print(float_sigs.shape, float_sigs.dtype) print(f"{sampling_rate=}, {t_start=}, {units=}") #################################################################### -# Some rawio's and file formats all provide information about spikes -# If an rawio can't read this data it will raise an error, but lucky +# Some rawio's and file formats also provide information about spikes +# If a rawio can't read this data it will raise an error, but lucky # for us PlexonRawIO does have spikes data!! # Count units and spikes per unit nb_unit = reader.spike_channels_count() -print("nb_unit", nb_unit) +print(f"nb_unit: {nb_unit}\n") # nb_unit stands for number of units +print("spike_channel_index nb_spike") for spike_channel_index in range(nb_unit): nb_spike = reader.spike_count(block_index=0, seg_index=0, spike_channel_index=spike_channel_index) - print(f"{spike_channel_index=}\n{nb_spike}\n") + print(f"{spike_channel_index}: {nb_spike}\n") # Read spike times and rescale (just like analogsignal above) spike_timestamps = reader.get_spike_timestamps( block_index=0, seg_index=0, spike_channel_index=0, t_start=0.0, t_stop=10.0 ) -print(f"{spike_timestamps.shape=}\n{spike_timestamps.dtype=}\n{spike_timestamps[:5]=}") + +print(f"{spike_timestamps.shape=}\n{spike_timestamps.dtype=}\n{spike_timestamps[:5]=}\n") spike_times = reader.rescale_spike_timestamp(spike_timestamps, dtype="float64") -print(f"{spike_times.shape=}\n{spike_times.dtype=}\n{spike_times[:5]}") +print(f"{spike_times.shape=}\n{spike_times.dtype=}\n{spike_times[:5]}\n") ####################################################################### # Some file formats can also give waveform information. We are lucky -# again. Our file has waveform data!! +# again our file has waveform data!! We forms are a 3d dataset of +# (nb_spike, nb_channel, nb_sample) # Read spike waveforms raw_waveforms = reader.get_spike_raw_waveforms( block_index=0, seg_index=0, spike_channel_index=0, t_start=0.0, t_stop=10.0 ) -print(f"{raw_waveforms.shape=}\n{raw_waveforms.dtype=}\n{raw_waveforms[0, 0, :4]=}") +print(f"{raw_waveforms.shape=}\n{raw_waveforms.dtype=}\n{raw_waveforms[0, 0, :4]=}\n") float_waveforms = reader.rescale_waveforms_to_float(raw_waveforms, dtype="float32", spike_channel_index=0) -print(f"{float_waveforms.shape=}\n{float_waveforms.dtype=}{float_waveforms[0, 0, :4]=}") +print(f"{float_waveforms.shape=}\n{float_waveforms.dtype=}{float_waveforms[0, 0, :4]=}\n") ######################################################################### # RawIOs can also read event timestamps. But looks like our luck ran out @@ -102,18 +119,36 @@ localfile = "File_plexon_2.plx" urllib.request.urlretrieve(distantfile, localfile) -# Count events per channel +######################################################################### +# Since this is a new file we need to read initialize our reader as well +# as parse the header + reader = PlexonRawIO(filename="File_plexon_2.plx") reader.parse_header() +# if we look at this header we see it is different than the header above +print(reader.header) + +########################################################################### +# Now let's look at some event data. This could be things like stimuli applied +# during the course of an ephys recording + nb_event_channel = reader.event_channels_count() -print("nb_event_channel:", nb_event_channel) +print(f"nb_event_channel: {nb_event_channel}") +# now iterate through the channels for chan_index in range(nb_event_channel): nb_event = reader.event_count(block_index=0, seg_index=0, event_channel_index=chan_index) - print("chan_index", chan_index, "nb_event", nb_event) + print(f"chan_index: {chan_index} nb_event: {nb_event}\n") + + +############################################################################### +# Finally we can get our actual event timestamps. Some file formats provide the +# real timestamps (timestamps in s/ms) others have raw timestamps (in samples) +# so we can do the same style of functions as above. Get the raw timestamps +# and convert to real times with a rescale function. ev_timestamps, ev_durations, ev_labels = reader.get_event_timestamps( block_index=0, seg_index=0, event_channel_index=0, t_start=None, t_stop=None ) -print(f"{ev_timestamps=}\n{ev_durations=}\n{ev_labels=}") +print(f"{ev_timestamps=}\n{ev_durations=}\n{ev_labels=}\n") ev_times = reader.rescale_event_timestamp(ev_timestamps, dtype="float64") -print(f"{ev_times=}") +print(f"{ev_times=}\n") diff --git a/examples/plot_read_proxy_with_lazy_load.py b/examples/plot_read_proxy_with_lazy_load.py index f35a5508f..15e5871a4 100644 --- a/examples/plot_read_proxy_with_lazy_load.py +++ b/examples/plot_read_proxy_with_lazy_load.py @@ -6,18 +6,22 @@ ################################################ # Import our packages first +# It is often nice to have units so we will also +# import quantities import urllib import neo import quantities as pq import numpy as np -url_repo = "https://web.gin.g-node.org/NeuralEnsemble/ephy_testing_data/raw/master/" - ############################################### # Let's get a file +# NeuralEnsemble maintains a wide variety of small test +# datasets that are free to use. We can use urllib to pull +# down one of these files for use +url_repo = "https://web.gin.g-node.org/NeuralEnsemble/ephy_testing_data/raw/master/" # Get med file distantfile = url_repo + "micromed/File_micromed_1.TRC" localfile = "./File_micromed_1.TRC" @@ -25,58 +29,89 @@ ################################################## # create a reader +# creating a reader for neo is easy it just requires using +# the name of the desired reader and providing either a filename +# or a directory name (reader dependent). Since we got a micromed +# file we will use MicromedIO. reader = neo.MicromedIO(filename="File_micromed_1.TRC") reader.parse_header() +############################################################ +# as always we can look view some interesting information about the +# metadata and structure of a file just by printing the reader and +# it's header +print(reader) +print(f"Header information: {reader.header}") -lim0, lim1 = -20 * pq.ms, +20 * pq.ms ##################################################### # Now let's make a function that we want to apply to -# look at lazy for eager uses of the API +# look at lazy vs eager uses of the API def apply_my_fancy_average(sig_list): """basic average along triggers and then channels here we go back to numpy with magnitude - to be able to use np.stack + to be able to use np.stack. + + Because neo uses quantities to keep track of units + we can always get just the magnitude of an array + with `.magnitude` """ sig_list = [s.magnitude for s in sig_list] sigs = np.stack(sig_list, axis=0) return np.mean(np.mean(sigs, axis=0), axis=1) +################################################# +# Let's set our limits for both cases. We will +# use quantities to include time dimensions. + +lim_start = -20 * pq.ms # 20 milliseconds before +lim_end = +20 * pq.ms # 20 milliseconds after + ################################################## -# We start with eager where `lazy=False`. Everything -# is loaded into memory +# We start with eager (where `lazy=False`.) Everything +# is loaded into memory. We will read a segment of data. +# This includes analog signal data and events data +# (final contents of a segment are dependent on the +# underlying IO being used) + seg = reader.read_segment(lazy=False) triggers = seg.events[0] anasig = seg.analogsignals[0] # here anasig contain the whole recording in memory all_sig_chunks = [] for t in triggers.times: - t0, t1 = (t + lim0), (t + lim1) + t0, t1 = (t + lim_start), (t + lim_end) anasig_chunk = anasig.time_slice(t0, t1) all_sig_chunks.append(anasig_chunk) + +# After pulling all data into memory and then iterating through triggers +# we end by doing our average m1 = apply_my_fancy_average(all_sig_chunks) ##################################################### -# Here we do `lazy=True`, so we do lazy loading. We -# only load the events that we want into memory +# Here we do `lazy=True`, i.e. we do lazy loading. We +# only load the data that we want into memory # and we use a proxy object for our analogsignal until we # load it chunk by chunk (no running out of memory!) seg = reader.read_segment(lazy=True) -triggers = seg.events[0].load(time_slice=None) # this load all trigers in memory +triggers = seg.events[0].load(time_slice=None) # this load all triggers in memory anasigproxy = seg.analogsignals[0] # this is a proxy all_sig_chunks = [] for t in triggers.times: - t0, t1 = (t + lim0), (t + lim1) + t0, t1 = (t + lim_start), (t + lim_end) + # at this step we load actual data into memory, but notice that we only load one + # chunk of data at a time, so we reduce the memory strain anasig_chunk = anasigproxy.load(time_slice=(t0, t1)) # here real data are loaded all_sig_chunks.append(anasig_chunk) + +# Finally we apply the same average as we did above m2 = apply_my_fancy_average(all_sig_chunks) ########################################################## # We see that either way the result is the same, but # we do not exhaust our RAM/memory -print(m1) -print(m2) +print(f"Eagerly loading data and averaging: {m1}") +print(f"Lazy loading data and average {m2}") diff --git a/examples/plot_roi_demo.py b/examples/plot_roi_demo.py index bb057897d..653947074 100644 --- a/examples/plot_roi_demo.py +++ b/examples/plot_roi_demo.py @@ -3,8 +3,12 @@ ===================================== """ -############################################# + +################################################################# # Import our packages +# We can import a variety of neo objects from neo.core and since an ImageSquence +# also uses units let's import quantities which is the "units" library +# that neo uses under the hood import matplotlib.pyplot as plt import numpy as np @@ -15,6 +19,9 @@ ################################################################## # First we create our image_sequence. Let's generate some data +# In this simulated dataset we will just make an image that is +# 100x100 pixels and then we will make 50 frames of this image +# finally we will fill with random values for the data l = [] for frame in range(50): @@ -24,8 +31,10 @@ for x in range(100): l[frame][y].append(random.randint(0, 50)) +# make an ImageSquence in Neo image_seq = ImageSequence(l, sampling_rate=500 * pq.Hz, spatial_scale="m", units="V") + ################################################################# # Now we will write a function for plotting an roi def plot_roi(roi, shape): @@ -39,22 +48,27 @@ def plot_roi(roi, shape): ax = plt.gca() ax.add_artist(shape) -################################################################## -# Finally we will plot each roi +################################################################################ +# Finally we will plot each roi to demonstrate how we could high regions of interest + +# First a nice circle roi = CircularRegionOfInterest(image_sequence=image_seq, x=50.3, y=50.8, radius=30.2) shape = plt.Circle(roi.centre, roi.radius, color="r", fill=False) plt.subplot(1, 3, 1) plot_roi(roi, shape) +# Next a rectangle roi = RectangularRegionOfInterest(image_sequence=image_seq, x=50.3, y=40.2, width=40.1, height=50.3) shape = plt.Rectangle((roi.x - roi.width / 2.0, roi.y - roi.height / 2.0), roi.width, roi.height, color="r", fill=False) plt.subplot(1, 3, 2) plot_roi(roi, shape) +# Finally we can make a polygon (in this case a triangle) roi = PolygonRegionOfInterest(image_seq, (20.3, 30.2), (80.7, 30.1), (55.2, 59.4)) shape = plt.Polygon(np.array(roi.vertices), closed=True, color="r", fill=False) plt.subplot(1, 3, 3) plot_roi(roi, shape) +plt.tight_layout() plt.show()