Quick benchmarck with new API and new sorters (april 2021)

Quick benchmark with new spikeinetrface API with new sorters

In spring 2021 the spikeinterface is refactored deeply.

During this refactoring some sorters have been added.

Here quick benchmark with one simulated dataset with MEArec.

In [7]:
%matplotlib inline
In [8]:
from pathlib import Path
import os
import shutil
from pprint import pprint
import getpass


import numpy as np
import matplotlib.pyplot as plt

import MEArec as mr
import neo
import quantities as pq


import spikeinterface.extractors  as se
import spikeinterface.widgets  as sw
import spikeinterface.sorters  as ss

from spikeinterface.comparison import GroundTruthStudy
In [9]:
basedir = '/mnt/data/sam/DataSpikeSorting/'

basedir = Path(basedir)

workdir = basedir / 'mearec_bench_2021'

study_folder = workdir /'study_mearec_march_2021'

tmp_folder = workdir / 'tmp'
tmp_folder.mkdir(parents=True, exist_ok=True)

generate recording with mearec

In [ ]:
template_filename = workdir / 'templates_Neuronexus-32_100.h5'
probe = 'Neuronexus-32'
n_cell = 15
duration = 10 * 60.

recording_filename = workdir /  f'recordings_{n_cell}cells_{probe}_{duration:0.0f}s.h5'


fs = 30000.


#~ spgen = mr.SpikeTrainGenerator()
rec_params = mr.get_default_recordings_params()

rec_params['recordings']['fs'] = fs
rec_params['recordings']['sync_rate'] = None
rec_params['recordings']['sync_jitter'] = 5
rec_params['recordings']['noise_level'] = 5
rec_params['recordings']['filter'] = False
rec_params['recordings']['chunk_duration'] = 10.
rec_params['spiketrains']['duration'] = duration
rec_params['spiketrains']['n_exc'] = n_cell
rec_params['spiketrains']['n_inh'] = 0
rec_params['templates']['n_overlap_pairs'] = None
rec_params['templates']['min_dist'] = 0

recgen = mr.gen_recordings(params=rec_params, #spgen=spgen, 
            templates=template_filename, verbose=True,
            n_jobs=1, tmp_mode='memmap',
            tmp_folder=str(tmp_folder))

mr.save_recording_generator(recgen, filename=recording_filename)

set sorter path

In [3]:
user = getpass.getuser()

kilosort_path = f'/home/{user}/Documents/SpikeInterface/code_sorters/KiloSort1'
ss.KilosortSorter.set_kilosort_path(kilosort_path)

kilosort2_path = f'/home/{user}/Documents/SpikeInterface/code_sorters/Kilosort2'
ss.Kilosort2Sorter.set_kilosort2_path(kilosort2_path)

kilosort2_5_path = f'/home/{user}/Documents/SpikeInterface/code_sorters/Kilosort2.5'
ss.Kilosort2_5Sorter.set_kilosort2_5_path(kilosort2_path)

kilosort3_path = f'/home/{user}/Documents/SpikeInterface/code_sorters/Kilosort3'
ss.Kilosort3Sorter.set_kilosort3_path(kilosort3_path)

ironclust_path = f'/home/{user}/Documents/SpikeInterface/code_sorters/ironclust/'
ss.IronClustSorter.set_ironclust_path(ironclust_path)
Setting KILOSORT_PATH environment variable for subprocess calls to: /home/samuel.garcia/Documents/SpikeInterface/code_sorters/KiloSort1
Setting KILOSORT2_PATH environment variable for subprocess calls to: /home/samuel.garcia/Documents/SpikeInterface/code_sorters/Kilosort2
Setting KILOSORT2_5_PATH environment variable for subprocess calls to: /home/samuel.garcia/Documents/SpikeInterface/code_sorters/Kilosort2
Setting KILOSORT3_PATH environment variable for subprocess calls to: /home/samuel.garcia/Documents/SpikeInterface/code_sorters/Kilosort3
Setting IRONCLUST_PATH environment variable for subprocess calls to: /home/samuel.garcia/Documents/SpikeInterface/code_sorters/ironclust

create study

In [6]:
mearec_filename = workdir / 'recordings_15cells_Neuronexus-32_600s.h5'

if study_folder.is_dir():
    shutil.rmtree(study_folder)

rec  = se.MEArecRecordingExtractor(mearec_filename)
sorting_gt = se.MEArecSortingExtractor(mearec_filename)
print(rec)
print(sorting_gt)

gt_dict = {'rec0' : (rec, sorting_gt) }

study = GroundTruthStudy.create(study_folder, gt_dict)
MEArecRecordingExtractor: 32 channels - 1 segments - 30.0kHz
  file_path: /mnt/data/sam/DataSpikeSorting/mearec_bench_2021/recordings_15cells_Neuronexus-32_600s.h5
MEArecSortingExtractor: 15 units - 1 segments - 30.0kHz
  file_path: /mnt/data/sam/DataSpikeSorting/mearec_bench_2021/recordings_15cells_Neuronexus-32_600s.h5
write_binary_recording with n_jobs 1  chunk_size None

plot probe

In [14]:
study = GroundTruthStudy(study_folder)
rec = study.get_recording()
probe = rec.get_probe()
print(probe)
from probeinterface.plotting import plot_probe
plot_probe(probe)
Probe - 32ch
Out[14]:
(<matplotlib.collections.PolyCollection at 0x7f93854cc370>,
 <matplotlib.collections.PolyCollection at 0x7f947882e7c0>)

run sorters

In [ ]:
sorter_list = ['spykingcircus', 'kilosort2', 'kilosort3', 'tridesclous']
study = GroundTruthStudy(study_folder)
study.run_sorters(sorter_list, mode_if_folder_exists='overwrite', verbose=False)
study.copy_sortings()

collect results

In [4]:
study = GroundTruthStudy(study_folder)
study.copy_sortings()


study.run_comparisons(exhaustive_gt=True, delta_time=1.5)


comparisons = study.comparisons
dataframes = study.aggregate_dataframes()
In [10]:
for (rec_name, sorter_name), comp in comparisons.items():
    print()
    print('*'*20)
    print(rec_name, sorter_name)
    print(comp.count_score)
********************
rec0 spykingcircus
              tp    fn    fp num_gt num_tested tested_id
gt_unit_id                                              
#0             0  2772     0   2772          0        -1
#1          2305     0  2127   2305       4432         0
#2             0  3009     0   3009          0        -1
#3             0  2503     0   2503          0        -1
#4          3135     0     4   3135       3139         2
#5             0  2081     0   2081          0        -1
#6          2192     0     2   2192       2194         5
#7          2723     0    55   2723       2778         3
#8             0  3453     0   3453          0        -1
#9             0  2334     0   2334          0        -1
#10         2280    15     8   2295       2288        11
#11         2588     8    12   2596       2600        10
#12         2721   333  1503   3054       4224         8
#13            0  3020     0   3020          0        -1
#14         3612     0  1070   3612       4682         6

********************
rec0 kilosort2
              tp   fn  fp num_gt num_tested tested_id
gt_unit_id                                           
#0          2765    7   6   2772       2771        29
#1          2299    6   0   2305       2299         8
#2          3008    1   0   3009       3008        19
#3          2502    1   2   2503       2504        25
#4          3117   18   0   3135       3117        10
#5          2076    5   1   2081       2077         7
#6          2188    4   0   2192       2188         3
#7          2717    6   0   2723       2717        26
#8          3447    6   0   3453       3447         4
#9          2323   11   5   2334       2328         6
#10         2112  183  54   2295       2166        31
#11         2592    4   0   2596       2592        11
#12         3051    3   0   3054       3051        14
#13         3019    1   0   3020       3019         1
#14         3603    9   0   3612       3603        22

********************
rec0 tridesclous
              tp  fn  fp num_gt num_tested tested_id
gt_unit_id                                          
#0          2727  45  22   2772       2749        14
#1          2294  11   0   2305       2294         4
#2          3003   6   1   3009       3004         1
#3          2467  36  20   2503       2487         9
#4          3123  12   9   3135       3132        13
#5          2047  34   6   2081       2053        10
#6          2159  33  12   2192       2171         7
#7          2695  28   0   2723       2695         6
#8          3420  33   1   3453       3421         5
#9          2293  41  63   2334       2356        12
#10         2230  65  24   2295       2254         3
#11         2532  64  18   2596       2550         2
#12         3023  31  21   3054       3044         0
#13         2979  41  10   3020       2989         8
#14         3588  24  12   3612       3600        11

********************
rec0 kilosort3
              tp    fn  fp num_gt num_tested tested_id
gt_unit_id                                            
#0          2734    38  12   2772       2746         3
#1          2302     3   0   2305       2302        29
#2          3005     4   2   3009       3007        77
#3          2450    53  96   2503       2546        74
#4          2906   229  26   3135       2932         7
#5          2067    14  42   2081       2109         2
#6          1381   811  56   2192       1437        14
#7          2712    11   2   2723       2714        76
#8          3447     6   0   3453       3447         0
#9          2288    46   3   2334       2291         1
#10         1424   871  52   2295       1476        35
#11            0  2596   0   2596          0        -1
#12         3041    13   0   3054       3041        23
#13         1580  1440   0   3020       1580        11
#14         3573    39  97   3612       3670        32

Agreement matrix

In [11]:
for (rec_name, sorter_name), comp in comparisons.items():
    fig, ax = plt.subplots()
    sw.plot_agreement_matrix(comp, ax=ax)
    fig.suptitle(rec_name+'   '+ sorter_name)

Accuracy vs SNR

In [ ]:
 

Compare old vs new spikeinterface API

Compare "old" vs "new " spikeinterface API

Author : Samuel Garcia 29 March 2021

In spring 2021, the spikeinterface team plan a "big refactoring" of the spikeinterface tool suite.

Main changes are:

  • use neo as much as possible for extractors
  • handle multi segment
  • improve performance (pre and post processing)
  • add A WaveformExtractor class

Here I will benchmark 2 aspects of the "new API":

  • filter with 10 workers on a multi core machine
  • extractor waveform 1 worker vs 10 workers

The becnhmark is done a 10 min spikeglx file with 384 channels.

The sorting is done with kilosort3.

My machine is Intel(R) Xeon(R) Silver 4210 CPU @ 2.20GHz 2 CPU with 20 core each.

In [5]:
from pathlib import Path
import shutil
import time
import matplotlib.pyplot as plt

base_folder = Path('/mnt/data/sam/DataSpikeSorting/eduarda_arthur') 
data_folder = base_folder / 'raw_awake'

Filter with OLD API

Here we :

  1. open the file
  2. lazy filter
  3. cache it
  4. dump to json

The "cache" step is in fact the "compute and save" step.

In [6]:
import spikeextractors as se
import spiketoolkit as st

print('spikeextractors version', se.__version__)
print('spiketoolkit version', st.__version__)

# step 1: open
file_path = data_folder / 'raw_awake_01_g0_t0.imec0.ap.bin'
recording = se.SpikeGLXRecordingExtractor(file_path)

# step 2: lazy filter
rec_filtered = st.preprocessing.bandpass_filter(recording,  freq_min=300. freq_max=6000.)
print(rec_filtered)

save_folder = base_folder / 'raw_awake_filtered_old'
if save_folder.is_dir():
    shutil.rmtree(save_folder)
save_folder.mkdir()

save_file = save_folder / 'filetred_recording.dat'
dump_file = save_folder / 'filetred_recording.json'

# step 3: cache
t0 = time.perf_counter()
cached = se.CacheRecordingExtractor(rec_filtered, chunk_mb=50, n_jobs=10, 
    save_path=save_file)
t1 = time.perf_counter()
run_time_filter_old = t1-t0
print('Old spikeextractors cache', run_time_filter_old)

# step : dump
cached.dump_to_json(dump_file)
spikeextractors version 0.9.5
spiketoolkit version 0.7.4
<spiketoolkit.preprocessing.bandpass_filter.BandpassFilterRecording object at 0x7f648d3ee130>
Old spikeextractors cache 801.9439885600004

Filter with NEW API

Here we :

  1. open the file
  2. lazy filter
  3. save it

The "save" step is in fact the "compute and save" step.

In [7]:
 
import spikeinterface as si

import spikeinterface.extractors as se
import spikeinterface.toolkit as st
print('spikeinterface version', si.__version__)

# step 1: open
recording = se.SpikeGLXRecordingExtractor(data_folder)
print(recording)

# step 2: lazy filter
rec_filtered =st.bandpass_filter(recording,  freq_min=300., freq_max=6000.)
print(rec_filtered)


filter_path = base_folder / 'raw_awake_filtered'
if filter_path.is_dir():
    shutil.rmtree(filter_path)

# step 3 : compute and save with 10 workers
t0 = time.perf_counter()
cached = rec_filtered.save(folder=filter_path,
    format='binary', dtype='int16',
    n_jobs=10,  total_memory="50M", progress_bar=True)
t1 = time.perf_counter()
run_time_filter_new = t1 -t0
print('New spikeinterface filter + save binary', run_time_filter_new)
spikeinterface version 0.90.0
SpikeGLXRecordingExtractor: 385 channels - 1 segments - 30.0kHz
BandpassFilterRecording: 385 channels - 1 segments - 30.0kHz
write_binary_recording with n_jobs 10  chunk_size 3246
write_binary_recording: 100%|██████████| 5546/5546 [00:51<00:00, 108.39it/s]
New spikeinterface filter + save binary 54.79437772196252

Extract waveform with OLD API

Here we use get_unit_waveforms from toolkit.

We do the computation with 1 and then 10 jobs.

In [21]:
from spikeextractors.baseextractor import BaseExtractor
import spikeextractors as se
import spiketoolkit as st
print('spikeextractors version', se.__version__)
print('spiketoolkit version', st.__version__)
spikeextractors version 0.9.5
spiketoolkit version 0.7.4
In [24]:
save_folder = base_folder / 'raw_awake_filtered_old'
dump_file = save_folder / 'filetred_recording.json'
recording = BaseExtractor.load_extractor_from_json(dump_file)

sorting_KS3 = se.KiloSortSortingExtractor(base_folder / 'output_kilosort3')
waveform_folder = base_folder / 'waveforms_extractor_old_1_job'
if waveform_folder.is_dir():
    shutil.rmtree(waveform_folder)
waveform_folder.mkdir()
sorting_KS3.set_tmp_folder(waveform_folder)

t0 = time.perf_counter()
wf, indexes, channels = st.postprocessing.get_unit_waveforms(recording, sorting_KS3,
            max_spikes_per_unit=500, return_idxs=True, chunk_mb=50, n_jobs=1,
            memmap=True)
t1 = time.perf_counter()
run_time_waveform_old_1jobs = t1 - t0
print('OLD API get_unit_waveforms 1 jobs', run_time_waveform_old_1jobs)
OLD API get_unit_waveforms 1 jobs 513.5964983040467
In [30]:
save_folder = base_folder / 'raw_awake_filtered_old'
dump_file = save_folder / 'filetred_recording.json'
recording = BaseExtractor.load_extractor_from_json(dump_file)

sorting_KS3_bis = se.KiloSortSortingExtractor(base_folder / 'output_kilosort3')
waveform_folder = base_folder / 'waveforms_extractor_old_10_jobs_'
if waveform_folder.is_dir():
    shutil.rmtree(waveform_folder)
waveform_folder.mkdir()
sorting_KS3_bis.set_tmp_folder(waveform_folder)

t0 = time.perf_counter()
wf, indexes, channels = st.postprocessing.get_unit_waveforms(recording, sorting_KS3_bis,
            max_spikes_per_unit=500, return_idxs=True, chunk_mb=500, n_jobs=10,
            memmap=True, verbose=True)
t1 = time.perf_counter()
run_time_waveform_old_10jobs = t1 - t0
print('OLD API get_unit_waveforms 10 jobs', run_time_waveform_old_10jobs)
Number of chunks: 553 - Number of jobs: 10
Impossible to delete temp file: /mnt/data/sam/DataSpikeSorting/eduarda_arthur/waveforms_extractor_old_10_jobs Error [Errno 16] Device or resource busy: '.nfs0000000004ce04d3000007b8'
OLD API get_unit_waveforms 10 jobs 823.8002076600096

Extract waveform with NEW API

The spikeinterface 0.9 API introduce more flexible object WaveformExtractor to do the same (extract snipet).

Here some code example and benchmark speed.

In [39]:
import spikeinterface.extractors as se
from spikeinterface import WaveformExtractor, load_extractor
print('spikeinterface version', si.__version__)

filter_path = base_folder / 'raw_awake_filtered'
filered_recording = load_extractor(filter_path)

sorting_KS3 = se.KiloSortSortingExtractor(base_folder / 'output_kilosort3')
print(sorting_KS3)
spikeinterface version 0.90.0
KiloSortSortingExtractor: 184 units - 1 segments - 30.0kHz
In [41]:
# 1 worker
waveform_folder = base_folder / 'waveforms_extractor_1_job_new_'
if waveform_folder.is_dir():
    shutil.rmtree(waveform_folder)
we = WaveformExtractor.create(filered_recording, sorting_KS3, waveform_folder)

t0 = time.perf_counter()
we.set_params(ms_before=3., ms_after=4., max_spikes_per_unit=500)
we.run(n_jobs=1, total_memory="50M", progress_bar=True)
t1 = time.perf_counter()
run_time_waveform_new_1jobs = t1 - t0
print('New WaveformExtractor 1 jobs',run_time_waveform_new_1jobs)
100%|##########| 278/278 [01:42<00:00,  2.72it/s]
New WaveformExtractor 1 jobs 115.03656197001692
In [42]:
# 1 worker
waveform_folder = base_folder / 'waveforms_extractor_10_job_new_'
if waveform_folder.is_dir():
    shutil.rmtree(waveform_folder)
we = WaveformExtractor.create(filered_recording, sorting_KS3, waveform_folder)

t0 = time.perf_counter()
we.set_params(ms_before=3., ms_after=4., max_spikes_per_unit=500)
we.run(n_jobs=10, total_memory="500M", progress_bar=True)
t1 = time.perf_counter()
run_time_waveform_new_10jobs = t1 - t0
print('New WaveformExtractor 10 jobs', run_time_waveform_new_10jobs)
100%|██████████| 278/278 [00:31<00:00,  8.87it/s]
New WaveformExtractor 10 jobs 48.819815920025576

Conclusion

For filter with 10 workers the speedup is x14.

For waveform extactor with 1 workers the speedup is x4

For waveform extactor with 10 workers the speedup is x16

In [11]:
speedup_filter = run_time_filter_old / run_time_filter_new
print('speedup filter', speedup_filter)
speedup filter 14.635515939778026
In [43]:
speedup_waveform_1jobs = run_time_waveform_old_1jobs / run_time_waveform_new_1jobs
print('speedup waveforms 1 jobs', speedup_waveform_1jobs)

speedup_waveform_10jobs = run_time_waveform_old_10jobs / run_time_waveform_new_10jobs
print('speedup waveformd 10jobs', speedup_waveform_10jobs)
speedup waveforms 1 jobs 4.464637064152789
speedup waveformd 10jobs 16.874299751754943
In [ ]: