diff --git a/.bumpversion.toml b/.bumpversion.toml new file mode 100644 index 00000000..3cdaae4f --- /dev/null +++ b/.bumpversion.toml @@ -0,0 +1,27 @@ + +[tool.bumpversion] +parse = "(?P\\d+)\\.(?P\\d+)\\.(?P\\d+)" +serialize = ["{major}.{minor}.{patch}"] +regex = false +current_version = "1.0.4" +ignore_missing_version = false +search = "{current_version}" +replace = "{new_version}" +tag = false +sign_tags = false +tag_name = "{new_version}" +tag_message = "Bump version: {current_version} → {new_version}" +allow_dirty = false +commit = false +message = "Bump version: {current_version} → {new_version}" +commit_args = "" + +[[tool.bumpversion.files]] +filename = "pyproject.toml" +search = "version = \"{current_version}\"" +replace = "version = \"{new_version}\"" + +[[tool.bumpversion.files]] +filename = "docs/conf.py" +search = "release = \"{current_version}\"" +replace = "release = \"{new_version}\"" \ No newline at end of file diff --git a/.github/workflows/publish-distribution.yaml b/.github/workflows/publish-distribution.yaml new file mode 100644 index 00000000..97fb9f75 --- /dev/null +++ b/.github/workflows/publish-distribution.yaml @@ -0,0 +1,36 @@ +name: Publish Distribution to PyPI + +on: + push: + tags: + - '[0-9]+.[0-9]+.[0-9]+' + +jobs: + build-and-publish-final-dist: + runs-on: ubuntu-latest + steps: + - name: Check out code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install Poetry + run: | + pip install poetry + + - name: Install dependencies + run: | + poetry install --without dev + + - name: Build the package + run: | + poetry build + + - name: Publish to PyPI + env: + POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }} + run: | + poetry publish diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 16a9598a..3b008d81 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -10,11 +10,17 @@ build: # https://python-poetry.org/docs/#installing-manually - pip install poetry # Tell poetry to not use a virtual environment - - poetry config virtualenvs.create false + # - poetry config virtualenvs.create false post_install: # Install dependencies with 'docs' dependency group # https://python-poetry.org/docs/managing-dependencies/#dependency-groups - - poetry install --with dev + # VIRTUAL_ENV needs to be set manually for now. + # See https://github.com/readthedocs/readthedocs.org/pull/11152/ + - VIRTUAL_ENV=$READTHEDOCS_VIRTUALENV_PATH poetry install --with dev sphinx: - configuration: docs/conf.py \ No newline at end of file + configuration: docs/conf.py + +# python: +# install: +# - requirements: docs/requirements.txt \ No newline at end of file diff --git a/RELEASE.rst b/RELEASE.rst new file mode 100644 index 00000000..761c8b77 --- /dev/null +++ b/RELEASE.rst @@ -0,0 +1,70 @@ +Version Release Guidelines +======================= + +This document describes the guidelines for releasing new versions of the library. We follow semantic versioning, which means our version numbers have three parts: MAJOR.MINOR.PATCH. + +- MAJOR version when you make incompatible API changes +- MINOR version when you add functionality in a backwards-compatible manner +- PATCH version when you make backwards-compatible bug fixes + + +1. Install the `bump-my-version` package: + + ``` + pip install --upgrade bump-my-version + ``` +-------------------- + +2. Create a new branch for the release from dev branch: + + ``` + git checkout -b release/x.y.z + ``` +-------------------- + +3. Update the version number using the `bump-my-version` command: + + ``` + bump-my-version bump path + ``` + or + ``` + bump-my-version bump minor + ``` + or + ``` + bump-my-version bump major + ``` +-------------------- + +4. Commit the changes with the following message and push the changes to the release branch: + + ``` + git commit -m "Bump version: {current_version} → {new_version}" + ``` + + ``` + git push origin release/x.y.z + ``` + +-------------------- + +5. Create a pull request from the release branch to the dev branch. + +6. Once the pull request is approved and merged, create a new pull request from the dev branch to the master branch. + +7. Once the pull request is approved and merged, create the tag on the main branch to invoke the package publishing workflow: + + ``` + git tag -a x.y.z -m "Release x.y.z" + ``` + + ``` + git push origin tag + ``` +-------------------- + +8. Once the tag is pushed, the package publishing workflow will be triggered and the package will be published to the PyPI. + +9. Once the package is published, create a new release on GitHub with the tag name and the release notes (generate them automatically). + diff --git a/docs/_static/.gitkeep b/docs/_static/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/docs/conf.py b/docs/conf.py index e17de93e..7854277a 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -14,7 +14,7 @@ project = "NeuroBench" copyright = "2024, Jason Yik, Noah Pacik-Nelson, Korneel Van Den Berghe" author = "Jason Yik, Noah Pacik-Nelson, Korneel Van Den Berghe" -release = "v1.0.0" +release = "1.0.4" # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration diff --git a/docs/neurobench.benchmarks.rst b/docs/neurobench.benchmarks.rst index 3f5d3bb6..19df9857 100644 --- a/docs/neurobench.benchmarks.rst +++ b/docs/neurobench.benchmarks.rst @@ -4,8 +4,8 @@ neurobench.benchmarks Benchmark ^^^^^^^^^ -.. automodule:: neurobench.benchmarks.benchmark - :members: +.. automodule:: neurobench.benchmarks + :members: Benchmark :undoc-members: :show-inheritance: diff --git a/docs/neurobench.datasets.rst b/docs/neurobench.datasets.rst index ae9c51b8..4ae1f52b 100644 --- a/docs/neurobench.datasets.rst +++ b/docs/neurobench.datasets.rst @@ -15,21 +15,6 @@ as linear 16-bit, single-channel, pulse code modulated values, at a 16 kHz sampl :show-inheritance: -DVS Gestures -^^^^^^^^^^^^ - -The IBM Dynamic Vision Sensor (DVS) Gesture dataset is composed of recordings of 29 distinct individuals executing 10 different -types of gestures, including but not limited to clapping, waving, etc. Additionally, an 11th gesture class is included that comprises -gestures that cannot be categorized within the first 10 classes. The gestures are recorded under four distinct lighting conditions, -and each gesture is associated with a label that indicates the corresponding lighting condition under which it was performed. - -.. automodule:: neurobench.datasets.dvs_gesture - :special-members: __init__, __getitem__ - :members: - :undoc-members: - :show-inheritance: - - Prophesee Megapixel Automotive ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 00000000..ef9bdecb --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,4 @@ +sphinx-rtd-theme +tqdm +tonic +numpy \ No newline at end of file diff --git a/docs/tutorial/index.rst b/docs/tutorial/index.rst index 7072657e..c4300457 100644 --- a/docs/tutorial/index.rst +++ b/docs/tutorial/index.rst @@ -52,7 +52,10 @@ using snnTorch. snn.Leaky(beta=beta, spike_grad=spike_grad, init_hidden=True, output=True), ) -To get started, we will load our desired dataset in a dataloader: +To get started, we will load our desired dataset in a dataloader. Note that any +torch.Dataloader can be used for the benchmark, it is not constrained to the datasets +available in the harness. Check out the `Tonic library `_ +for an excellent resource for neuromorphic datasets! .. code:: python diff --git a/neurobench/benchmarks/__init__.py b/neurobench/benchmarks/__init__.py index 16e605b0..1db9265e 100644 --- a/neurobench/benchmarks/__init__.py +++ b/neurobench/benchmarks/__init__.py @@ -1 +1 @@ -from .benchmark import * +from .benchmark import Benchmark diff --git a/neurobench/benchmarks/benchmark.py b/neurobench/benchmarks/benchmark.py index 39c5b803..183645da 100644 --- a/neurobench/benchmarks/benchmark.py +++ b/neurobench/benchmarks/benchmark.py @@ -5,7 +5,12 @@ from . import static_metrics, workload_metrics # workload metrics which require hooks -requires_hooks = ["activation_sparsity", "number_neuron_updates", "synaptic_operations"] +requires_hooks = [ + "activation_sparsity", + "number_neuron_updates", + "synaptic_operations", + "membrane_updates", +] class Benchmark: diff --git a/neurobench/benchmarks/hooks.py b/neurobench/benchmarks/hooks.py index c5390862..ee0ad6d8 100644 --- a/neurobench/benchmarks/hooks.py +++ b/neurobench/benchmarks/hooks.py @@ -21,6 +21,8 @@ def __init__(self, layer, connection_layer=None, prev_act_layer_hook=None): """ self.activation_outputs = [] self.activation_inputs = [] + self.pre_fire_mem_potential = [] + self.post_fire_mem_potential = [] if layer is not None: self.hook = layer.register_forward_hook(self.hook_fn) self.hook_pre = layer.register_forward_pre_hook(self.pre_hook_fn) @@ -46,6 +48,8 @@ def pre_hook_fn(self, layer, input): """ self.activation_inputs.append(input) + if self.spiking: + self.pre_fire_mem_potential.append(layer.mem) def hook_fn(self, layer, input, output): """ @@ -62,6 +66,7 @@ def hook_fn(self, layer, input, output): """ if self.spiking: self.activation_outputs.append(output[0]) + self.post_fire_mem_potential.append(layer.mem) else: self.activation_outputs.append(output) @@ -75,6 +80,8 @@ def reset(self): """Resets the stored activation outputs and inputs.""" self.activation_outputs = [] self.activation_inputs = [] + self.pre_fire_mem_potential = [] + self.post_fire_mem_potential = [] def close(self): """Remove the registered hook.""" diff --git a/neurobench/benchmarks/workload_metrics.py b/neurobench/benchmarks/workload_metrics.py index 5e5fca2f..114f0831 100644 --- a/neurobench/benchmarks/workload_metrics.py +++ b/neurobench/benchmarks/workload_metrics.py @@ -2,6 +2,7 @@ import numpy as np from ..utils import check_shape, make_binary_copy, single_layer_MACs from .hooks import ActivationHook, LayerHook +from collections import defaultdict class AccumulatedMetric: @@ -121,6 +122,68 @@ def activation_sparsity(model, preds, data): return sparsity +class membrane_updates(AccumulatedMetric): + """ + Number of membrane potential updates. + + This metric can only be used for spiking models implemented with SNNTorch. + + """ + + def __init__(self): + """Init metric state.""" + self.total_samples = 0 + self.neuron_membrane_updates = defaultdict(int) + + def reset(self): + """Reset metric state.""" + self.total_samples = 0 + self.neuron_membrane_updates = defaultdict(int) + + def __call__(self, model, preds, data): + """ + Number of membrane updates of the model forward. + + Args: + model: A NeuroBenchModel. + preds: A tensor of model predictions. + data: A tuple of data and labels. + Returns: + float: Number of membrane potential updates. + + """ + for hook in model.activation_hooks: + for index_mem in range(len(hook.pre_fire_mem_potential) - 1): + pre_fire_mem = hook.pre_fire_mem_potential[index_mem + 1] + post_fire_mem = hook.post_fire_mem_potential[index_mem + 1] + nr_updates = torch.count_nonzero(pre_fire_mem - post_fire_mem) + self.neuron_membrane_updates[str(type(hook.layer))] += int(nr_updates) + self.neuron_membrane_updates[str(type(hook.layer))] += int( + torch.numel(hook.post_fire_mem_potential[0]) + ) + self.total_samples += data[0].size(0) + return self.compute() + + def compute(self): + """ + Compute membrane updates using accumulated data. + + Returns: + float: Compute the total updates to each neuron's membrane potential within the model, + aggregated across all neurons and normalized by the number of samples processed. + + """ + if self.total_samples == 0: + return 0 + + total_mem_updates = 0 + for key in self.neuron_membrane_updates: + total_mem_updates += self.neuron_membrane_updates[key] + + total_updates_per_sample = total_mem_updates / self.total_samples + return total_updates_per_sample + + def number_neuron_updates(model, preds, data): """ Number of times each neuron type is updated. diff --git a/neurobench/datasets/MSWC_dataset.py b/neurobench/datasets/MSWC_dataset.py index 5a0bc1bc..c5eb92bf 100644 --- a/neurobench/datasets/MSWC_dataset.py +++ b/neurobench/datasets/MSWC_dataset.py @@ -257,6 +257,7 @@ class MSWC(Dataset): Subset version (https://huggingface.co/datasets/NeuroBench/mswc_fscil_subset) of the original MSWC dataset (https://mlcommons.org/en/multilingual-spoken-words/) for a few-shot class-incremental learning (FSCIL) task consisting of 200 voice commands keywords: + - 100 base classes available for pre-training with: - 500 train samples - 100 validation samples diff --git a/neurobench/datasets/__init__.py b/neurobench/datasets/__init__.py index 3a5110c8..07067cfa 100644 --- a/neurobench/datasets/__init__.py +++ b/neurobench/datasets/__init__.py @@ -21,12 +21,6 @@ def Gen4DetectionDataLoader(*args, **kwargs): )(*args, **kwargs) -def DVSGesture(*args, **kwargs): - return _lazy_import("neurobench.datasets", ".dvs_gesture", "DVSGesture")( - *args, **kwargs - ) - - def MackeyGlass(*args, **kwargs): return _lazy_import("neurobench.datasets", ".mackey_glass", "MackeyGlass")( *args, **kwargs diff --git a/neurobench/datasets/dvs_gesture.py b/neurobench/datasets/dvs_gesture.py deleted file mode 100644 index e4e1db05..00000000 --- a/neurobench/datasets/dvs_gesture.py +++ /dev/null @@ -1,294 +0,0 @@ -import torch -from torch.utils.data import DataLoader - -from tonic.datasets import DVSGesture as tonic_DVSGesture - -# from glob import glob - -from neurobench.datasets.dataset import NeuroBenchDataset - -import os -import numpy as np -import matplotlib.pyplot as plt - -# make animation -from matplotlib.animation import FuncAnimation - - -class DVSGesture(NeuroBenchDataset): - """ - Installs DVSGesture Dataset with individual events in each file, if not yet - installed, else pass the path of the tonic DVSGesture install. - - Data information: - - Event rate: 1MHz -> dt 1e-6 - - Sample length: 1.7 seconds - - Default timestep for frames: 5 ms - - For possible preprocessing functions, see: - https://docs.prophesee.ai/stable/tutorials/ml/data_processing/event_preprocessing.html?highlight=metavision_ml%20preprocessing - - """ - - def __init__( - self, path, split="testing", data_type="frames", preprocessing="stack" - ): - """ - Initialization will load in data from path if possible, else will download - dataset into path. - - Args: - path (str): Path of DVS Gesture dataset folder if applicable, else the destination of DVS Gesture dataset. - split (str): Return testing or training data. - data_type (str): If 'frames', returns frames with preprocessing applied; else returns raw events. - preprocessing (str): Preprocessing to get frames from raw events. - - """ - # download or load data - if split == "training": - self.dataset = tonic_DVSGesture(save_to=path) - else: - self.dataset = tonic_DVSGesture(save_to=path, train=False) - - self.filenames = self.dataset.data - self.path = path - self.prepr = preprocessing - self.data_type = data_type - - # sample parameters: - self._deltat = 5000 # DVS is in microseconds -> deltat = 5ms - self._T = 1700 # in ms, sample time is 1.7 sec - self.random_window = False - - def __len__(self): - """ - Returns the number of samples in the dataset. - - Returns: - int: The number of samples in the dataset. - - """ - return len(self.filenames) - - def __getitem__(self, idx): - """ - Getter method for test data in the DataLoader. - - Args: - idx (int): Index of the sample. - - Returns: - sample (tensor): Individual data sample, which can be a sequence of frames or raw data. - target (tensor): Corresponding gesture label. - - """ - structured_array = self.dataset[idx][0] - - # label = torch.nn.functional.one_hot(torch.tensor(self.dataset[idx][1]), num_classes=11) - label = torch.tensor(self.dataset[idx][1]) - - # get data - x_data = np.array(structured_array["x"], dtype=np.int16) - y_data = np.array(structured_array["y"], dtype=np.int16) - p_data = np.array(structured_array["p"], dtype=bool) - t_data = np.array( - structured_array["t"], dtype=np.int64 - ) # time is in microseconds - - xypt = torch.stack( - ( - torch.tensor(x_data), - torch.tensor(y_data), - torch.tensor(p_data), - torch.tensor(t_data), - ), - dim=1, - ) - - # create sample - t_end = ( - t_data[-1] - self._T * 1000 - ) # find latest time at which we can sample including buffer of factor 1.5 (*1000 to convert to microseconds) - start_time = np.random.randint(0, t_end) if self.random_window else 0 - sample = xypt[ - (start_time <= xypt[:, 3]) & (xypt[:, 3] <= (start_time + self._T * 1000)) - ] - sample[:, 3] = sample[:, 3] - sample[0, 3] # shift timestamps - tbins = self._T * 1000 // self._deltat - if self.data_type == "frames": - # add own preprocessing functions - if self.prepr == "histo_diff": - events = histogram_difference_preprocessing( - sample, - tbins=tbins, - delta_t=self._deltat, - h_og=128, - w_og=128, - display_frame=False, - ) - return events, label - - elif self.prepr == "stack": - events = stack_preprocessing( - sample, - delta_t=self._deltat, - tbins=tbins, - h_og=128, - w_og=128, - display_frame=False, - ) - return events, label - - return sample, label - - def set_sample_params(self, delta_t=5, length=1700, random_window=False): - """ - Sets sample parameters used if frames are created from events. - - Args: - delta_t (int): Time steps to stack events into frames (in milliseconds). - length (int): Length in milliseconds of each sample. - random_window (bool): If True, the sample will be a random time window of length within the gesture. - - """ - self._deltat = delta_t * 1000 # convert to microseconds - self._T = length - self.random_window = random_window - - -def stack_preprocessing( - xypt, delta_t=5000, tbins=200, h_og=128, w_og=128, channels=3, display_frame=False -): - """ - Applies stack preprocessing to events. If at least one event has occurred at (x,y) - in delta_t corresponding channel (pos or neg) will be 1, else zero. - - Args: - delta_t (int): Time steps to stack events into frames (in milliseconds). - tbins (int): Number of frames required. - h_og (int): Number of pixels in height. - w_og (int): Number of pixels in width. - channels (int): Number of channels in each frame (default 3 for plotting purposes). - display_frame (bool): If True, will create an animation to visualize event frames. - - """ - frames = np.zeros((tbins, channels, h_og, w_og)) - for frame in frames: - # delete prev neg times - xypt_new = xypt[xypt[:, 3] >= 0] - xypt = xypt_new - - # change timestamps - xypt[:, 3] = xypt[:, 3] - delta_t - - xypt_sub = xypt[xypt[:, 3] <= 0] # events for the current frame - pos_pol = np.unique(xypt_sub[xypt_sub[:, 2] is True][:, :2], axis=0) - neg_pol = np.unique(xypt_sub[xypt_sub[:, 2] is False][:, :2], axis=0) - - frame[0, :, :][pos_pol[:, 0], pos_pol[:, 1]] = 1 - frame[1, :, :][neg_pol[:, 0], neg_pol[:, 1]] = 1 - - if display_frame: - frame = frame.astype(float) / np.max(frame) - - animation = FuncAnimation( - fig, update, frames=tbins, fargs=(frames,), interval=delta_t / 1000 - ) - animation.save("test.gif") - plt.suptitle("Stack preprocessing") - plt.show() - - return frames - - -def histogram_difference_preprocessing( - xypt, delta_t=5000, tbins=200, h_og=128, w_og=128, channels=3, display_frame=False -): - """ - Applies histogram preprocessing to events. For every positive (pos) or negative - (neg) event that has occurred at (x,y) in delta_t, 1 will be added to (x,y) in the - corresponding channel (pos or neg). - - Args: - delta_t (int): Time steps to stack events into frames (in milliseconds). - tbins (int): Number of frames required. - h_og (int): Number of pixels in height. - w_og (int): Number of pixels in width. - channels (int): Number of channels in each frame (default 3 for plotting purposes). - display_frame (bool): If True, will create an animation to visualize event frames. - - """ - histogram = np.zeros((tbins, channels, h_og, w_og)) - for frame in histogram: - # delete prev neg times - xypt_new = xypt[xypt[:, 3] >= 0] - xypt = xypt_new - - # change timestamps - xypt[:, 3] = xypt[:, 3] - delta_t - - xypt_sub = xypt[xypt[:, 3] <= 0] # events for the current frame - pos_pol, pos_count = np.unique( - xypt_sub[xypt_sub[:, 2] is True][:, :2], axis=0, return_counts=True - ) - neg_pol, neg_count = np.unique( - xypt_sub[xypt_sub[:, 2] is False][:, :2], axis=0, return_counts=True - ) - - counts_dict = {} - - # Update counts from the positives - for value, count in zip(pos_pol, pos_count): - counts_dict[tuple(value)] = counts_dict.get(tuple(value), 0) + count - - # Update counts from the negatives - for value, count in zip(neg_pol, neg_count): - counts_dict[tuple(value)] = counts_dict.get(tuple(value), 0) - count - - # Convert the dictionary into a NumPy array - array_data = [[*key, value] for key, value in counts_dict.items()] - result_array = np.array(array_data) - pos_pol = result_array[result_array[:, 2] > 0] - neg_pol = result_array[result_array[:, 2] < 0] - frame[0, :, :][pos_pol[:, 0], pos_pol[:, 1]] = pos_pol[:, 2] - frame[1, :, :][neg_pol[:, 0], neg_pol[:, 1]] = -neg_pol[ - :, 2 - ] # avoid clipping between [0,1] - - if display_frame: - frame = frame.astype(float) / np.max(frame) - - animation = FuncAnimation( - fig, update, frames=tbins, fargs=(histogram,), interval=5 - ) - animation.save("waving_hand.gif", fps=1 / (5e-3)) - - plt.suptitle("Histogram difference method") - plt.show() - - return histogram - - -fig, ax = plt.subplots() - - -def update(frame, frames): - """Helper function for animation.""" - ax.clear() - image = frames[frame].transpose(1, 2, 0) - - ax.imshow(image, cmap="brg") # You can adjust the colormap as needed - ax.set_title(f"Frame {frame}") - - -if __name__ == "__main__": - path = os.curdir - dataset = DVSGesture( - os.path.join(path, "data/dvs_gesture"), - split="testing", - preprocessing="histo_diff", - ) - - dataloader = DataLoader(dataset, batch_size=16, shuffle=True) - for local_batch, local_labels in dataloader: - print(local_batch[0].shape, local_labels.shape) diff --git a/neurobench/datasets/primate_reaching.py b/neurobench/datasets/primate_reaching.py index 08ce2f49..60825c53 100644 --- a/neurobench/datasets/primate_reaching.py +++ b/neurobench/datasets/primate_reaching.py @@ -99,7 +99,7 @@ def __init__( "indy_20160630_01.mat": "197413a5339630ea926cbd22b8b43338", "indy_20160622_01.mat": "c33d5fff31320d709d23fe445561fb6e", "loco_20170301_05.mat": "47342da09f9c950050c9213c3df38ea3", - "loco_20170217_02.mat": "739b70762d838f3a1f358733c426bb02", + "loco_20170215_02.mat": "739b70762d838f3a1f358733c426bb02", "loco_20170210_03.mat": "4cae63b58c4cb9c8abd44929216c703b", } @@ -224,7 +224,7 @@ def load_data(self): # Define the segments' start & end indices self.start_end_indices = np.array(self.get_flag_index(target_pos)) - self.time_segments = np.array(self.split_into_segments(self.start_end_indices)) + self.time_segments = np.array(self.split_into_segments(self.start_end_indices, target_pos.shape[1])) spike_train = np.zeros((*spikes.shape, len(new_t)), dtype=np.int8) @@ -290,7 +290,8 @@ def split_data(self): train_len = math.floor(self.train_ratio * sub_length) val_len = math.floor((sub_length - train_len) / 2) - offset = int(np.round(self.bin_width / SAMPLING_RATE)) * self.num_steps + # offset = int(np.round(self.bin_width / SAMPLING_RATE)) * self.num_steps + offset = 0 # split the data into 4 equal parts # for each part, split the data according to training, testing and validation split @@ -331,8 +332,10 @@ def remove_segments_by_length(self): )[0] @staticmethod - def split_into_segments(indices): + def split_into_segments(indices, last_idx): """Combine the start and end index into a NumPy array.""" + indices = np.insert(indices, 0, 0) + indices = np.append(indices, [last_idx]) start_end = np.array([indices[:-1], indices[1:]]) return np.transpose(start_end) diff --git a/neurobench/examples/dvs_gesture/CSNN.py b/neurobench/examples/dvs_gesture/CSNN.py deleted file mode 100644 index 17c8d26e..00000000 --- a/neurobench/examples/dvs_gesture/CSNN.py +++ /dev/null @@ -1,137 +0,0 @@ -import torch -from torch.utils.data import DataLoader - -import snntorch as snn - -from torch import nn -from snntorch import surrogate -from snntorch import utils -# datasets -from neurobench.datasets.dvs_gesture import DVSGesture -from neurobench.models import SNNTorchModel -from neurobench.postprocessing.postprocessor import choose_max_count - -from tqdm import tqdm - -# from torch.profiler import profile, record_function, ProfilerActivity - -class Conv_SNN(nn.Module): - def __init__(self): - super(Conv_SNN,self).__init__() - beta = .9 - alpha = 0.95 # a 1st order if alpha = 0 - - self.reduce = nn.AvgPool3d(kernel_size=(1, 4, 4), stride=(1, 4, 4)) - grad = surrogate.fast_sigmoid() - stride = 2 - self.pool1 = nn.AvgPool2d(2,stride=stride) - self.conv1 = nn.Conv2d(3,24,3,1) - self.syn1 = snn.Synaptic(alpha=alpha, beta= beta, spike_grad = grad) - - self.pool2 = nn.AvgPool2d(2,stride=stride) - self.conv2 = nn.Conv2d(24,24,3, 1) - self.syn2 = snn.Synaptic(alpha=alpha, beta= beta, spike_grad = grad) - - self.pool3 = nn.AvgPool2d(2,stride=stride) - self.conv3 = nn.Conv2d(24, 64,3,1) - self.syn3 = snn.Synaptic(alpha=alpha, beta= beta, spike_grad = grad) - - self.lin1 = nn.Linear(1024,11) - self.syn4 = snn.Synaptic(alpha=alpha, beta= beta, spike_grad = grad) - - self.lin2 = nn.Linear(128,11) - - self.mem1, self.cur1 = self.syn1.init_synaptic() - self.mem2, self.cur2 = self.syn2.init_synaptic() - self.mem3, self.cur3 = self.syn3.init_synaptic() - self.mem4, self.cur4 = self.syn4.init_synaptic() - - - def forward(self, frame, warmup_frames = 0): - frame = self.reduce(frame).to(dtype=torch.float32) - # frame = frame.to(dtype=torch.float32) - # frame = transforms.Resize((32,32))(frame).to(dtype=torch.float32) - x = self.conv1(frame) - x = self.pool1(x) - # print(x.shape) - x, self.mem1, self.cur1 = self.syn1(x, self.mem1,self.cur1) - - x = self.conv2(x) - x = self.pool2(x) - x, self.mem2, self.cur2 = self.syn2(x, self.mem2,self.cur2) - - x = self.conv3(x) - - x, self.mem3, self.cur3 = self.syn3(x, self.mem3,self.cur3) - - x = x.view(x.shape[0],-1) - x = self.lin1(x) - - spks, self.mem4, self.cur4 = self.syn4(x, self.mem4,self.cur4) - return spks.reshape(-1,11).detach(), self.mem4 - def reset(self): - self.mem1, self.cur1 = self.syn1.init_synaptic() - self.mem2, self.cur2 = self.syn2.init_synaptic() - self.mem3, self.cur3 = self.syn3.init_synaptic() - self.mem4, self.cur4 = self.syn4.init_synaptic() - - def single_forward(self, frames, warmup_frames = 0): - self.reset() - - out_spk = 0 - # from [nr_batches,nr_frames,c,h,w] -> [nr_frames,nr_batches,c,h,w] - # frames = frames.transpose(1,0) - - # Data is expected to be shape (batch, timestep, features*) - for step in range(frames.shape[1]): - frame = frames[:,step,:,:,:] - frame = self.reduce(frame).to(dtype=torch.float32) - # frame = frame.to(dtype=torch.float32) - # frame = transforms.Resize((32,32))(frame).to(dtype=torch.float32) - x = self.conv1(frame) - x = self.pool1(x) - x, self.mem1, self.cur1 = self.syn1(x, self.mem1,self.cur1) - x = self.conv2(x) - x = self.pool2(x) - x, self.mem2, self.cur2 = self.syn2(x, self.mem2,self.cur2) - x = self.conv3(x) - - x, self.mem3, self.cur3 = self.syn3(x, self.mem3,self.cur3) - x = x.view(x.shape[0],-1) - x = self.lin1(x) - - x, self.mem4, self.cur4 = self.syn4(x, self.mem4,self.cur4) - - if step >= warmup_frames: - out_spk += x - - - prediction = torch.nn.functional.softmax(out_spk.reshape(11,-1), dim=0) - return prediction - - def fit(self, dataloader_training, warmup_frames, optimizer, device, nr_episodes = 10): - for _ in tqdm(range(nr_episodes)): - for frames, labels in dataloader_training: - # Print GPU memory - # with profile(activities=[ProfilerActivity.CPU], record_shapes=True, profile_memory=True) as prof: - # with record_function("model_inference"): - prediction = self.single_forward(frames.to(device), warmup_frames) - - # Add a delay to allow profiler to collect data - # import time - # time.sleep(5) - # print(prof.key_averages().table(sort_by="self_cpu_memory_usage")) - # prof.export_chrome_trace("trace.json") - # print(prof.key_averages(group_by_input_shape=True).table(sort_by="cuda_memory_usage", row_limit=10)) - - label_tensor = labels.clone().detach().to(device) - targets_one_hot = torch.nn.functional.one_hot(label_tensor, num_classes=11).transpose(1,0) - - loss = torch.nn.functional.smooth_l1_loss(prediction,targets_one_hot) - optimizer.zero_grad() - loss.backward() - optimizer.step() - print(loss.item()) - - - diff --git a/neurobench/examples/dvs_gesture/DVS_Gesture_tutorial.ipynb b/neurobench/examples/dvs_gesture/DVS_Gesture_tutorial.ipynb new file mode 100644 index 00000000..2f971fbf --- /dev/null +++ b/neurobench/examples/dvs_gesture/DVS_Gesture_tutorial.ipynb @@ -0,0 +1,306 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "yGm4fad3M-Sr" + }, + "source": [ + "# DVS Gesture Benchmark Tutorial\n", + "\n", + "This tutorial aims to provide an insight on how the NeuroBench framework is organized and how you can use it to benchmark your own models!\n", + "\n", + "## About DVS Gesture:\n", + "The IBM Dynamic Vision Sensor (DVS) Gesture dataset is composed of recordings of 29 distinct individuals executing 10 different types of gestures, including but not limited to clapping, waving, etc. Additionally, an 11th gesture class is included that comprises gestures that cannot be categorized within the first 10 classes. The gestures are recorded under four distinct lighting conditions, and each gesture is associated with a label that indicates the corresponding lighting condition under which it was performed.\n", + "\n", + "### Benchmark Task:\n", + "The task is to classify gestures and achieve high accuracy. This tutorial demonstrates with a trained convolutional spiking neural network." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "First we will import the relevant libraries. We will use the [Tonic library](https://tonic.readthedocs.io/en/latest/) for loading and pre-processing the data, and the model wrapper, post-processor, and benchmark object from NeuroBench." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "lqtM6XbMM_hO" + }, + "outputs": [], + "source": [ + "# Tonic library is used for DVS Gesture dataset loading and processing\n", + "import tonic\n", + "import tonic.transforms as transforms\n", + "from torch.utils.data import DataLoader\n", + "\n", + "from neurobench.models import SNNTorchModel\n", + "from neurobench.postprocessing import choose_max_count\n", + "from neurobench.benchmarks import Benchmark" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "R7HMjVPX7LZh" + }, + "source": [ + "For this tutorial, we will make use of a four-layer convolutional SNN, written using snnTorch." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "r0yYDNRZ7UxY" + }, + "outputs": [], + "source": [ + "import torch\n", + "import torch.nn as nn\n", + "import snntorch as snn\n", + "from snntorch import surrogate\n", + "\n", + "class Net(torch.nn.Module):\n", + " def __init__(self):\n", + " super().__init__()\n", + "\n", + " # Hyperparameters\n", + " beta_1 = 0.9999903192467171\n", + " beta_2 = 0.7291118090686332\n", + " beta_3 = 0.9364650136740154\n", + " beta_4 = 0.8348241794080301\n", + " threshold_1 = 3.511291184386264\n", + " threshold_2 = 3.494437965584431\n", + " threshold_3 = 1.5986853560315544\n", + " threshold_4 = 0.3641469130041378\n", + " spike_grad = surrogate.atan()\n", + " dropout = 0.5956071342984011\n", + " \n", + " # Initialize layers\n", + " self.conv1 = nn.Conv2d(2, 16, 5, padding=\"same\")\n", + " self.pool1 = nn.MaxPool2d(2)\n", + " self.lif1 = snn.Leaky(beta=beta_1, threshold=threshold_1, spike_grad=spike_grad, init_hidden=True)\n", + " \n", + " self.conv2 = nn.Conv2d(16, 32, 5, padding=\"same\")\n", + " self.pool2 = nn.MaxPool2d(2)\n", + " self.lif2 = snn.Leaky(beta=beta_2, threshold=threshold_2, spike_grad=spike_grad, init_hidden=True)\n", + " \n", + " self.conv3 = nn.Conv2d(32, 64, 5, padding=\"same\")\n", + " self.pool3 = nn.MaxPool2d(2)\n", + " self.lif3 = snn.Leaky(beta=beta_3, threshold=threshold_3, spike_grad=spike_grad, init_hidden=True)\n", + " \n", + " self.linear1 = nn.Linear(64*4*4, 11)\n", + " self.dropout_4 = nn.Dropout(dropout)\n", + " self.lif4 = snn.Leaky(beta=beta_4, threshold=threshold_4, spike_grad=spike_grad, init_hidden=True, output=True)\n", + "\n", + " def forward(self, x):\n", + " # x is expected to be in shape (batch, channels, height, width) = (B, 2, 32, 32)\n", + " \n", + " # Layer 1\n", + " y = self.conv1(x)\n", + " y = self.pool1(y)\n", + " spk1 = self.lif1(y)\n", + "\n", + " # Layer 2\n", + " y = self.conv2(spk1)\n", + " y = self.pool2(y)\n", + " spk2 = self.lif2(y)\n", + "\n", + " # Layer 3\n", + " y = self.conv3(spk2)\n", + " y = self.pool3(y)\n", + " spk3 = self.lif3(y)\n", + "\n", + " # Layer 4\n", + " y = self.linear1(spk3.flatten(1))\n", + " y = self.dropout_4(y)\n", + " spk4, mem4 = self.lif4(y)\n", + "\n", + " return spk4, mem4" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "VNIgTfvuOMe-" + }, + "source": [ + "We load a pre-trained model. The model is wrapped in the SNNTorchModel wrapper, which includes boilerplate inference code and interfaces with the top-level Benchmark class." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "chZeyUTAOQ6B" + }, + "outputs": [], + "source": [ + "device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n", + "\n", + "net = Net()\n", + "net.load_state_dict(torch.load(\"model_data/dvs_gesture_snn\", map_location=device))\n", + "\n", + "model = SNNTorchModel(net)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, we will load the dataset. Here, we are using the DVSGesture dataset from the Tonic library, as well as transforms to turn the events into frames that can be processed." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "x4jOfnt6OeIH" + }, + "outputs": [], + "source": [ + "# Load the dataset, here we are using the Tonic library\n", + "data_dir = \"../../../data/dvs_gesture\" # data in repo root dir\n", + "test_transform = transforms.Compose([transforms.Denoise(filter_time=10000),\n", + " transforms.Downsample(spatial_factor=0.25),\n", + " transforms.ToFrame(sensor_size=(32, 32, 2),\n", + " n_time_bins=150),\n", + " ])\n", + "test_set = tonic.datasets.DVSGesture(save_to=data_dir, transform=test_transform, train=False)\n", + "test_set_loader = DataLoader(test_set, batch_size=16,\n", + " collate_fn=tonic.collation.PadTensors(batch_first=True))" + ] + }, + { + "cell_type": "raw", + "metadata": { + "id": "UfRfdvXvOqRP" + }, + "source": [ + "Specify any pre-processors and post-processors you want to use. These will be applied to your data before feeding into the model, and to the output spikes respectively.\n", + "Here, the transforms listed above account for all necessary pre-processing. The post-processor counts up the spikes corresponding to the output labels, and chooses the label with the max count." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "3GHY8vTROwzP" + }, + "outputs": [], + "source": [ + "preprocessors = []\n", + "postprocessors = [choose_max_count]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "o9doNsI0O0Jl" + }, + "source": [ + "Next specify the metrics which you want to calculate. The metrics include static metrics, which are computed before any model inference, and workload metrics, which show inference results.\n", + "\n", + "- Footprint: Bytes used to store the model parameters and buffers.\n", + "- Connection sparsity: Proportion of zero weights in the model.\n", + "- Classification accuracy: Accuracy of keyword predictions.\n", + "- Activation sparsity: Proportion of zero activations, averaged over all neurons, timesteps, and samples.\n", + "- Synaptic operations: Number of weight-activation operations, averaged over keyword samples.\n", + " - Effective MACs: Number of non-zero multiply-accumulate synops, where the activations are not spikes with values -1 or 1.\n", + " - Effective ACs: Number of non-zero accumulate synops, where the activations are -1 or 1 only.\n", + " - Dense: Total zero and non-zero synops." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "sDUczVTkPOsQ" + }, + "outputs": [], + "source": [ + "static_metrics = [\"footprint\", \"connection_sparsity\"]\n", + "workload_metrics = [\"classification_accuracy\", \"activation_sparsity\", \"synaptic_operations\"]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "KXQYfiJpPTZb" + }, + "source": [ + "Next, we instantiate the benchmark. We pass the model, the dataloader, the preprocessors, the postprocessor and the list of the static and data metrics which we want to measure:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "U0_N96ADPeO5" + }, + "outputs": [], + "source": [ + "benchmark = Benchmark(model, test_set_loader, preprocessors, postprocessors, [static_metrics, workload_metrics])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6ytLJ-dUPp0b" + }, + "source": [ + "Now, let's run the benchmark and print our results!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Ldww7kiYPsU2" + }, + "outputs": [], + "source": [ + "results = benchmark.run()\n", + "print(results)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Expected output:\n", + "{'footprint': 304828, 'connection_sparsity': 0.0, \n", + "'classification_accuracy': 0.8636363636363633, 'activation_sparsity': 0.9507192967815323, \n", + "'synaptic_operations': {'Effective_MACs': 9227011.575757576, 'Effective_ACs': 30564577.174242426, 'Dense': 891206400.0}}" + ] + } + ], + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/neurobench/examples/dvs_gesture/benchmark.py b/neurobench/examples/dvs_gesture/benchmark.py deleted file mode 100644 index 817a09cc..00000000 --- a/neurobench/examples/dvs_gesture/benchmark.py +++ /dev/null @@ -1,36 +0,0 @@ -import torch -from torch.utils.data import DataLoader - -import snntorch as snn - -from torch import nn -from snntorch import surrogate - -from neurobench.datasets import DVSGesture -from neurobench.models import SNNTorchModel -from neurobench.benchmarks import Benchmark -from neurobench.postprocessing.postprocessor import aggregate,choose_max_count - -from CSNN import Conv_SNN - -# data in repo root dir -test_set = DVSGesture("../../../data/dvs_gesture/", split="testing", preprocessing="stack") -test_set_loader = DataLoader(test_set, batch_size=16, shuffle=True,drop_last=True) - -net = Conv_SNN() - -# The pre-trained model is not available, this demo loads an untrained model. -net.load_state_dict(torch.load('./model_data/DVS_SNN_untrained.pth')) - -## Define model ## -model = SNNTorchModel(net) - -# postprocessors -postprocessors = [choose_max_count] - -static_metrics = ["footprint", "connection_sparsity"] -workload_metrics = ["synaptic_operations", "activation_sparsity", "classification_accuracy"] - -benchmark = Benchmark(model, test_set_loader, [], postprocessors, [static_metrics, workload_metrics]) -results = benchmark.run() -print(results) \ No newline at end of file diff --git a/neurobench/examples/dvs_gesture/benchmark_snn.py b/neurobench/examples/dvs_gesture/benchmark_snn.py new file mode 100644 index 00000000..09df35fe --- /dev/null +++ b/neurobench/examples/dvs_gesture/benchmark_snn.py @@ -0,0 +1,44 @@ +import torch +from snn import Net + +# Tonic library is used for DVS Gesture dataset loading and processing +import tonic +import tonic.transforms as transforms +from torch.utils.data import DataLoader + +from neurobench.models import SNNTorchModel +from neurobench.postprocessing import choose_max_count +from neurobench.benchmarks import Benchmark + +device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") + +net = Net() +net.load_state_dict(torch.load("model_data/dvs_gesture_snn", map_location=device)) + +model = SNNTorchModel(net) + +# Load the dataset, here we are using the Tonic library +data_dir = "../../../data/dvs_gesture" # data in repo root dir +test_transform = transforms.Compose([transforms.Denoise(filter_time=10000), + transforms.Downsample(spatial_factor=0.25), + transforms.ToFrame(sensor_size=(32, 32, 2), + n_time_bins=150), + ]) +test_set = tonic.datasets.DVSGesture(save_to=data_dir, transform=test_transform, train=False) +test_set_loader = DataLoader(test_set, batch_size=16, + collate_fn=tonic.collation.PadTensors(batch_first=True)) + +preprocessors = [] +postprocessors = [choose_max_count] + +static_metrics = ["footprint", "connection_sparsity"] +workload_metrics = ["classification_accuracy", "activation_sparsity", "synaptic_operations"] + +benchmark = Benchmark(model, test_set_loader, preprocessors, postprocessors, [static_metrics, workload_metrics]) +results = benchmark.run(device=device) +print(results) + +# Results: +# {'footprint': 304828, 'connection_sparsity': 0.0, +# 'classification_accuracy': 0.8636363636363633, 'activation_sparsity': 0.9507192967815323, +# 'synaptic_operations': {'Effective_MACs': 9227011.575757576, 'Effective_ACs': 30564577.174242426, 'Dense': 891206400.0}} \ No newline at end of file diff --git a/neurobench/examples/dvs_gesture/model_data/DVS_SNN_untrained.pth b/neurobench/examples/dvs_gesture/model_data/DVS_SNN_untrained.pth deleted file mode 100644 index 0b7d5d3e..00000000 Binary files a/neurobench/examples/dvs_gesture/model_data/DVS_SNN_untrained.pth and /dev/null differ diff --git a/neurobench/examples/dvs_gesture/model_data/dvs_gesture_snn b/neurobench/examples/dvs_gesture/model_data/dvs_gesture_snn new file mode 100644 index 00000000..dd5eafa2 Binary files /dev/null and b/neurobench/examples/dvs_gesture/model_data/dvs_gesture_snn differ diff --git a/neurobench/examples/dvs_gesture/snn.py b/neurobench/examples/dvs_gesture/snn.py new file mode 100644 index 00000000..5364ed78 --- /dev/null +++ b/neurobench/examples/dvs_gesture/snn.py @@ -0,0 +1,62 @@ +import torch +import torch.nn as nn +import snntorch as snn +from snntorch import surrogate + +class Net(torch.nn.Module): + def __init__(self): + super().__init__() + + # Hyperparameters + beta_1 = 0.9999903192467171 + beta_2 = 0.7291118090686332 + beta_3 = 0.9364650136740154 + beta_4 = 0.8348241794080301 + threshold_1 = 3.511291184386264 + threshold_2 = 3.494437965584431 + threshold_3 = 1.5986853560315544 + threshold_4 = 0.3641469130041378 + spike_grad = surrogate.atan() + dropout = 0.5956071342984011 + + # Initialize layers + self.conv1 = nn.Conv2d(2, 16, 5, padding="same") + self.pool1 = nn.MaxPool2d(2) + self.lif1 = snn.Leaky(beta=beta_1, threshold=threshold_1, spike_grad=spike_grad, init_hidden=True) + + self.conv2 = nn.Conv2d(16, 32, 5, padding="same") + self.pool2 = nn.MaxPool2d(2) + self.lif2 = snn.Leaky(beta=beta_2, threshold=threshold_2, spike_grad=spike_grad, init_hidden=True) + + self.conv3 = nn.Conv2d(32, 64, 5, padding="same") + self.pool3 = nn.MaxPool2d(2) + self.lif3 = snn.Leaky(beta=beta_3, threshold=threshold_3, spike_grad=spike_grad, init_hidden=True) + + self.linear1 = nn.Linear(64*4*4, 11) + self.dropout_4 = nn.Dropout(dropout) + self.lif4 = snn.Leaky(beta=beta_4, threshold=threshold_4, spike_grad=spike_grad, init_hidden=True, output=True) + + def forward(self, x): + # x is expected to be in shape (batch, channels, height, width) = (B, 2, 32, 32) + + # Layer 1 + y = self.conv1(x) + y = self.pool1(y) + spk1 = self.lif1(y) + + # Layer 2 + y = self.conv2(spk1) + y = self.pool2(y) + spk2 = self.lif2(y) + + # Layer 3 + y = self.conv3(spk2) + y = self.pool3(y) + spk3 = self.lif3(y) + + # Layer 4 + y = self.linear1(spk3.flatten(1)) + y = self.dropout_4(y) + spk4, mem4 = self.lif4(y) + + return spk4, mem4 \ No newline at end of file diff --git a/neurobench/examples/dvs_gesture/train_snn.py b/neurobench/examples/dvs_gesture/train_snn.py new file mode 100644 index 00000000..75309418 --- /dev/null +++ b/neurobench/examples/dvs_gesture/train_snn.py @@ -0,0 +1,138 @@ +import snntorch as snn +from snntorch import functional as SF +from snntorch import surrogate + +import torch +import torch.nn as nn +import torch.backends.cudnn as cudnn +import snntorch.utils as utils + +import numpy as np + +import tonic +import tonic.transforms as transforms +from tonic import DiskCachedDataset +from torch.utils.data import DataLoader + +from snn import Net + +from tqdm import tqdm + +device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") + +# Set the random seed for PyTorch +def rand_seed(n): + torch.manual_seed(n) + if torch.cuda.is_available(): + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + torch.cuda.manual_seed_all(n) + +# The SNNTorch forward pass +def forward_pass(net, data): + spk_rec = [] + utils.reset(net) + for step in range(data.shape[1]): + spk_out, _ = net(data[:, step, ...]) + spk_rec.append(spk_out) + return torch.stack(spk_rec) + +lr = 0.008273059787948487 +batch_size = 64 +train_time_bin = 25 +test_time_bin = 150 +epochs = 100 +data_dir = './data' + +def dataloader(): + # sensor_size = tonic.datasets.DVSGesture.sensor_size + sensor_size = (32, 32, 2) + + train_transform = transforms.Compose([transforms.Denoise(filter_time=10000), + transforms.Downsample(spatial_factor=0.25), + transforms.ToFrame(sensor_size=sensor_size, + n_time_bins=train_time_bin), + ]) + + test_transform = transforms.Compose([transforms.Denoise(filter_time=10000), + transforms.Downsample(spatial_factor=0.25), + transforms.ToFrame(sensor_size=sensor_size, + n_time_bins=test_time_bin), + ]) + + trainset = tonic.datasets.DVSGesture(save_to=data_dir, transform=train_transform, train=True) + testset = tonic.datasets.DVSGesture(save_to=data_dir, transform=test_transform, train=False) + + cached_trainset = DiskCachedDataset(trainset, cache_path='./data/cache/dvs/train') + cached_testset = DiskCachedDataset(testset, cache_path='./data/cache/dvs/test') + + train_loader = DataLoader(cached_trainset, batch_size=batch_size, + collate_fn=tonic.collation.PadTensors(batch_first=True)) + # test whole validation set at once so that accuracy is exact + test_loader = DataLoader(cached_testset, batch_size=512, + collate_fn=tonic.collation.PadTensors(batch_first=True)) + + return train_loader, test_loader + +if __name__ == '__main__': + + rand_seed(1234) + + train_loader, test_loader = dataloader() + + net = Net().to(device) + + optimizer = torch.optim.Adam(net.parameters(), lr=lr, betas=(0.9, 0.999)) + + loss_fn = SF.mse_count_loss() + scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=8250, eta_min=0, last_epoch=-1) + + # Training Start + best_acc = 0 + for epoch in range(epochs): + print(f"Epoch {epoch}:") + train_loss = [] + train_acc = [] + net.train() + for data, targets in tqdm(train_loader): + data = data.to(device) + targets = targets.to(device) + + spk_rec = forward_pass(net, data) + loss_val = loss_fn(spk_rec, targets) + + train_loss.append(loss_val.item()) + train_acc.append(SF.accuracy_rate(spk_rec, targets)) + + optimizer.zero_grad() + loss_val.backward() + optimizer.step() + scheduler.step() + + print(f"Train Loss: {np.mean(train_loss):.3f}") + print(f"Train Accuracy: {np.mean(train_acc) * 100:.2f}%") + + val_loss = [] + val_acc = [] + net.eval() + for data, targets in tqdm(iter(test_loader)): + data = data.to(device) + targets = targets.to(device) + + spk_rec = forward_pass(net, data) + + val_loss.append(loss_fn(spk_rec, targets).item()) + val_acc.append(SF.accuracy_rate(spk_rec, targets)) + + print(f"Test Loss: {np.mean(val_loss):.3f}") + print(f"Test Accuracy: {np.mean(val_acc) * 100:.2f}%") + + if np.mean(val_acc) > best_acc: + print("New Best Test Accuracy. Saving...") + best_acc = np.mean(val_acc) + torch.save(net.state_dict(), "./model_data/dvs_gesture_snn") + + print(f"---------------------\n") + + # Load the weights into the network for inference and benchmarking + net.load_state_dict(torch.load("./model_data/dvs_gesture_snn")) diff --git a/neurobench/examples/dvs_gesture/training.py b/neurobench/examples/dvs_gesture/training.py deleted file mode 100644 index 3b1756f1..00000000 --- a/neurobench/examples/dvs_gesture/training.py +++ /dev/null @@ -1,31 +0,0 @@ -import torch -from torch.utils.data import DataLoader - -import snntorch as snn - -from torch import nn -from snntorch import surrogate - -from neurobench.datasets import DVSGesture -from neurobench.models import SNNTorchModel -from neurobench.benchmarks import Benchmark -from neurobench.postprocessing.postprocessor import aggregate,choose_max_count - -from CSNN import Conv_SNN - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -device = 'cpu' - -# data in repo root dir -data = DVSGesture('../../../data/dvs_gesture/', split='testing', preprocessing='stack') -dataloader_training = DataLoader(data, 10,shuffle=False) -model = Conv_SNN().to(device) -data_1 = [(torch.tensor(data[0][0]).unsqueeze(0),torch.tensor(data[0][1]).unsqueeze(0))] -data_2 = [next(iter(dataloader_training))] - -torch.save(model.state_dict(), 'model_data/DVS_SNN_untrained.pth') - -optimizer = torch.optim.Adamax(model.parameters(),lr=1.2e-3,betas=[0.9,0.95]) -# model.fit(dataloader_training=dataloader_training,device=device, warmup_frames=70, optimizer=optimizer, nr_episodes=1000) -# model.fit(dataloader_training=data_2,device=device, warmup_frames=70, optimizer=optimizer, nr_episodes=10) -# torch.save(model.state_dict(), 'neurobench/examples/dvs_gesture/model_data/DVS_SNN_trained.pth') \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 200c4d3c..5b27ff47 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "neurobench" -version = "1.0.3" +version = "1.0.4" description = "Collaborative, Fair, and Representative Benchmarks for Neuromorphic Computing" authors = ["NeuroBench Team "] readme = "README.rst" diff --git a/tests/test_datasets.py b/tests/test_datasets.py index 8ff2d2ad..3e216ef8 100644 --- a/tests/test_datasets.py +++ b/tests/test_datasets.py @@ -3,7 +3,6 @@ from neurobench.datasets import SpeechCommands from neurobench.datasets import Gen4DetectionDataLoader from neurobench.datasets import PrimateReaching -from neurobench.datasets import DVSGesture from neurobench.datasets import MackeyGlass from neurobench.datasets import WISDM from torch.utils.data import DataLoader @@ -61,21 +60,6 @@ def test_speech_commands(): assert int(ds[0][1]) == 0 -def test_dvs_gesture(): - path = dataset_path + "dvs_gesture/" - try: - assert os.path.exists(path) - except AssertionError: - raise FileExistsError(f"Can't find {path}") - ds = DVSGesture(path) - - assert len(ds) > 0 - assert list(ds[0][0].shape) == [340, 3, 128, 128] - - assert int(ds[0][1]) >= 0 - assert int(ds[0][1]) <= 10 - - def test_mackey_glass(): filepath = dataset_path + "mackey_glass/mg_17.npy" try: diff --git a/tests/test_metrics.py b/tests/test_metrics.py index 6d0b425c..2b6e221c 100644 --- a/tests/test_metrics.py +++ b/tests/test_metrics.py @@ -21,6 +21,7 @@ detect_activations_connections, synaptic_operations, number_neuron_updates, + membrane_updates, ) from torch.profiler import profile, record_function, ProfilerActivity @@ -201,7 +202,7 @@ def test_r2(): data = ( torch.randn(2, batch_size), - torch.tensor(targets).transpose(0, 1), + torch.tensor(targets, dtype=torch.float).transpose(0, 1), ) # input and targets preds = [ @@ -531,6 +532,33 @@ def test_neuron_update_metric(): print("Passed neuron update metric") +def test_membrane_potential_updates(): + + # test snn layers + net_snn = nn.Sequential( + # nn.Flatten(), + nn.Linear(20, 5, bias=False), + snn.Leaky( + beta=0.9, spike_grad=surrogate.fast_sigmoid(), init_hidden=True, output=True + ), + ) + + # simulate spiking input with only ones + inp = torch.ones(5, 10, 20) # batch size, time steps, input size + + model = SNNTorchModel(net_snn) + + detect_activations_connections(model) + + out = model(inp) + mem_updates = membrane_updates() + tot_mem_updates = mem_updates(model, out, (inp, 0)) + + assert tot_mem_updates == 50 + + print("Passed membrane updates") + + class simple_LSTM(nn.Module): """Nonsense LSTM for operations testing Should be 615 MACs."""