diff --git a/src/lava/__init__.py b/src/lava/__init__.py new file mode 100644 index 000000000..fd8f628c8 --- /dev/null +++ b/src/lava/__init__.py @@ -0,0 +1,56 @@ +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +# Creates a pkgutil-style namespace package that extends the namespace over +# multiple directory structures. +__path__ = __import__('pkgutil').extend_path(__path__, __name__) + +###### +# IMPORTS + +# Import the most common classes and functions to the top level to enable +# >>> import lava +# as the only required import for the most common Lava programs. + +# MOST COMMON PROCESSES +from lava.proc.conv.process import Conv +from lava.proc.dense.process import Dense, DelayDense, LearningDense +from lava.proc.io.dataloader import SpikeDataloader, StateDataloader +from lava.proc.io.source import RingBuffer as SourceRingBuffer +from lava.proc.io.sink import RingBuffer as SinkRingBuffer +from lava.proc.io.encoder import DeltaEncoder +from lava.proc.io.injector import Injector +from lava.proc.io.extractor import Extractor +from lava.proc.io.reset import Reset +from lava.proc.learning_rules.r_stdp_learning_rule import RewardModulatedSTDP +from lava.proc.learning_rules.stdp_learning_rule import STDPLoihi +from lava.proc.lif.process import LIF, LearningLIF, LIFRefractory +from lava.proc.monitor.process import Monitor +from lava.proc.receiver.process import Receiver +from lava.proc.rf.process import RF +from lava.proc.rf_iz.process import RF_IZ +from lava.proc.sdn.process import Sigma, Delta, SigmaDelta, ActivationMode +from lava.proc.sparse.process import Sparse, LearningSparse, DelaySparse + +# RUN CONFIGURATIONS & CONDITIONS +from lava.magma.core.run_configs import Loihi2SimCfg, Loihi2HwCfg +from lava.magma.core.run_conditions import RunContinuous, RunSteps + +# MAGMA +from lava.magma.core.process.process import LogConfig + +# UTILS +from lava.utils import loihi +from lava.utils import plots +from lava.utils.serialization import save, load + +__all__ = ['Conv', 'Dense', 'DelayDense', 'LearningDense', 'SpikeDataloader', + 'StateDataloader', 'SourceRingBuffer', 'SinkRingBuffer', + 'DeltaEncoder', 'Injector', 'Extractor', 'Reset', + 'RewardModulatedSTDP', 'STDPLoihi', 'LIF', 'LearningLIF', + 'LIFRefractory', 'Monitor', 'Receiver', 'RF', 'RF_IZ', + 'Sigma', 'Delta', 'SigmaDelta', 'ActivationMode', 'Sparse', + 'LearningSparse', 'DelaySparse', 'Loihi2HwCfg', 'Loihi2SimCfg', + 'RunContinuous', 'RunSteps', 'LogConfig', 'loihi', 'plots', + 'save', 'load'] diff --git a/src/lava/proc/io/__init__.py b/src/lava/proc/io/__init__.py index 4db497580..824c133ac 100644 --- a/src/lava/proc/io/__init__.py +++ b/src/lava/proc/io/__init__.py @@ -2,6 +2,6 @@ # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ -from . import reset, source, sink, dataloader, encoder +from lava.proc.io import reset, source, sink, dataloader, encoder __all__ = ['reset', 'source', 'sink', 'dataloader', 'encoder'] diff --git a/tests/lava/test_init_imports.py b/tests/lava/test_init_imports.py new file mode 100644 index 000000000..68ecfbbd1 --- /dev/null +++ b/tests/lava/test_init_imports.py @@ -0,0 +1,17 @@ +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import unittest +import importlib.util + + +class TestInitImports(unittest.TestCase): + + def test_lava_init_file_imports_lif_class(self) -> None: + module_spec = importlib.util.find_spec("lava") + lava_module = importlib.util.module_from_spec(module_spec) + module_spec.loader.exec_module(lava_module) + + lif_importable = hasattr(lava_module, "LIF") + self.assertTrue(lif_importable) diff --git a/tests/lava/tutorials/test_tutorials.py b/tests/lava/tutorials/test_tutorials.py index 3f752c081..05b192af3 100644 --- a/tests/lava/tutorials/test_tutorials.py +++ b/tests/lava/tutorials/test_tutorials.py @@ -1,7 +1,8 @@ # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause -# See: https://spdx.org/licenses/ +# See: https:// +import importlib.util import glob import os import platform @@ -12,11 +13,8 @@ import unittest from test import support -import lava import nbformat -import tutorials - class TestTutorials(unittest.TestCase): """Export notebook, execute to check for errors.""" @@ -75,15 +73,10 @@ def _update_pythonpath( os.chdir(base_dir + "/" + dir_name) env = os.environ.copy() - module_path = [lava.__path__.__dict__["_path"][0]] - - module_path.extend( - [os.path.dirname(module_path[0]), env.get("PYTHONPATH", "")] - ) sys_path = ":".join(map(str, sys.path)) env_path = env.get("PYTHONPATH", "") - mod_path = ":".join(map(str, module_path)) + mod_path = ":".join(map(str, [get_module_path("lava"), env_path])) env["PYTHONPATH"] = env_path + ":" + mod_path + ":" + sys_path @@ -157,15 +150,14 @@ def _run_notebook(self, notebook: str, e2e_tutorial: bool = False): end to end tutorial, by default False """ cwd = os.getcwd() - tutorials_temp_directory = tutorials.__path__.__dict__["_path"][0] - tutorials_directory = "" + tutorials_module_path = get_module_path("tutorials") if not e2e_tutorial: - tutorials_temp_directory = tutorials_temp_directory + "/in_depth" + tutorials_module_path = tutorials_module_path + "/in_depth" else: - tutorials_temp_directory = tutorials_temp_directory + "/end_to_end" + tutorials_module_path = tutorials_module_path + "/end_to_end" - tutorials_directory = os.path.realpath(tutorials_temp_directory) + tutorials_directory = os.path.realpath(tutorials_module_path) os.chdir(tutorials_directory) errors_record = {} @@ -178,7 +170,7 @@ def _run_notebook(self, notebook: str, e2e_tutorial: bool = False): self.assertTrue( len(discovered_notebooks) != 0, - "Notebook not found. Input to function {}".format(notebook), + f"Notebook not found. Input to function {notebook}", ) # If the notebook is found execute it and store any errors @@ -194,10 +186,8 @@ def _run_notebook(self, notebook: str, e2e_tutorial: bool = False): self.assertFalse( errors_record, - "Failed to execute Jupyter Notebooks \ - with errors: \n {}".format( - errors_record - ), + f"Failed to execute Jupyter Notebooks " + f"with errors: \n {errors_record}", ) finally: os.chdir(cwd) @@ -292,5 +282,15 @@ def test_in_depth_clp_01(self): "clp/tutorial01_one-shot_learning_with_novelty_detection.ipynb") +def get_module_path(module_name: str) -> str: + spec = importlib.util.find_spec(module_name) + + # Treat packages with init-files separately. + if spec.origin is None: + return spec.submodule_search_locations[0] + + return os.path.dirname(spec.origin) + + if __name__ == "__main__": support.run_unittest(TestTutorials) diff --git a/tutorials/end_to_end/tutorial00_tour_through_lava.ipynb b/tutorials/end_to_end/tutorial00_tour_through_lava.ipynb index 65e9abbf4..61e23ee0f 100644 --- a/tutorials/end_to_end/tutorial00_tour_through_lava.ipynb +++ b/tutorials/end_to_end/tutorial00_tour_through_lava.ipynb @@ -4,7 +4,11 @@ "attachments": {}, "cell_type": "markdown", "id": "3ebce42a", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "*Copyright (C) 2022 Intel Corporation*
\n", "*SPDX-License-Identifier: BSD-3-Clause*
\n", @@ -46,14 +50,18 @@ "cell_type": "code", "execution_count": 1, "id": "f5f304d1", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "\u001b[0;31mInit signature:\u001b[0m \u001b[0mLIF\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mDocstring:\u001b[0m \n", + "\u001B[0;31mInit signature:\u001B[0m \u001B[0mLIF\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m*\u001B[0m\u001B[0margs\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0;34m**\u001B[0m\u001B[0mkwargs\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n", + "\u001B[0;31mDocstring:\u001B[0m \n", "Leaky-Integrate-and-Fire (LIF) neural Process.\n", "\n", "LIF dynamics abstracts to:\n", @@ -91,16 +99,15 @@ ">>> lif = LIF(shape=(200, 15), du=10, dv=5)\n", "This will create 200x15 LIF neurons that all have the same current decay\n", "of 10 and voltage decay of 5.\n", - "\u001b[0;31mInit docstring:\u001b[0m Initializes a new Process.\n", - "\u001b[0;31mFile:\u001b[0m ~/Documents/lava/src/lava/proc/lif/process.py\n", - "\u001b[0;31mType:\u001b[0m ProcessPostInitCaller\n", - "\u001b[0;31mSubclasses:\u001b[0m LIFReset" + "\u001B[0;31mInit docstring:\u001B[0m Initializes a new Process.\n", + "\u001B[0;31mFile:\u001B[0m ~/Documents/lava/src/lava/proc/lif/process.py\n", + "\u001B[0;31mType:\u001B[0m ProcessPostInitCaller\n", + "\u001B[0;31mSubclasses:\u001B[0m LIFReset" ] } ], "source": [ - "from lava.proc.lif.process import LIF\n", - "from lava.proc.dense.process import Dense\n", + "from lava import LIF, Dense\n", "\n", "LIF?" ] @@ -108,7 +115,11 @@ { "cell_type": "markdown", "id": "b4dce60e", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "The docstring gives insights about the parameters and internal dynamics of the `LIF` neuron. `Dense` is used to connect to a neuron population in an all-to-all fashion, often implemented as a matrix-vector product.\n", "\n", @@ -119,7 +130,11 @@ "cell_type": "code", "execution_count": 2, "id": "dbd808cb", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "import numpy as np\n", @@ -147,7 +162,11 @@ "attachments": {}, "cell_type": "markdown", "id": "1fbfed43", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "As you can see, we can either specify parameters with scalars, then all units share the same initial value for this parameter, or with a tuple (or list, or numpy array) to set the parameter individually per unit.\n", "\n", @@ -177,7 +196,11 @@ "cell_type": "code", "execution_count": 3, "id": "3f8f656a", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", @@ -203,7 +226,11 @@ { "cell_type": "markdown", "id": "1c5da64b", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "Now that we know about the input and output `Ports` of the `LIF` and `Dense` `Processes`, we can `connect` the network to complete the LIF-Dense-LIF structure.\n", "\n", @@ -216,7 +243,11 @@ "cell_type": "code", "execution_count": 4, "id": "657063e9", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "lif1.s_out.connect(dense.s_in)\n", @@ -227,7 +258,11 @@ "attachments": {}, "cell_type": "markdown", "id": "7f0add01", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "### Variables\n", "\n", @@ -240,7 +275,11 @@ "cell_type": "code", "execution_count": 5, "id": "d6be4fa0", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", @@ -265,14 +304,22 @@ "attachments": {}, "cell_type": "markdown", "id": "971d5ed7", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [] }, { "attachments": {}, "cell_type": "markdown", "id": "7574279a", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "We can take a look at the random weights of `Dense` by calling the `get` function.\n", "\n", @@ -285,7 +332,11 @@ "cell_type": "code", "execution_count": 6, "id": "e60c16db", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "data": { @@ -307,7 +358,11 @@ "attachments": {}, "cell_type": "markdown", "id": "49a7f22e", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "### Record internal Vars over time\n", "\n", @@ -321,10 +376,14 @@ "cell_type": "code", "execution_count": 7, "id": "635bf66b", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ - "from lava.proc.monitor.process import Monitor\n", + "from lava import Monitor\n", "\n", "monitor_lif1 = Monitor()\n", "monitor_lif2 = Monitor()\n", @@ -338,7 +397,11 @@ { "cell_type": "markdown", "id": "ce0c6495", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "### Execution\n", "\n", @@ -350,7 +413,11 @@ { "cell_type": "markdown", "id": "9a43d818", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Run Conditions\n", "\n", @@ -361,10 +428,14 @@ "cell_type": "code", "execution_count": 8, "id": "0cf86c34", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ - "from lava.magma.core.run_conditions import RunContinuous\n", + "from lava import RunContinuous\n", "\n", "run_condition = RunContinuous()" ] @@ -372,7 +443,11 @@ { "cell_type": "markdown", "id": "865e2ca9", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "The second option is `RunSteps`, which allows you to define an exact amount of time steps the network should run." ] @@ -381,10 +456,14 @@ "cell_type": "code", "execution_count": 9, "id": "91fbce5e", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ - "from lava.magma.core.run_conditions import RunSteps\n", + "from lava import RunSteps\n", "\n", "run_condition = RunSteps(num_steps=num_steps)" ] @@ -392,7 +471,11 @@ { "cell_type": "markdown", "id": "2366d304", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "For this example. we will use `RunSteps` and let the network run exactly `num_steps` time steps.\n", "\n", @@ -409,18 +492,26 @@ "cell_type": "code", "execution_count": 10, "id": "14c301f7", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ - "from lava.magma.core.run_configs import Loihi1SimCfg\n", + "from lava import Loihi2SimCfg\n", "\n", - "run_cfg = Loihi1SimCfg(select_tag=\"floating_pt\")" + "run_cfg = Loihi2SimCfg(select_tag=\"floating_pt\")" ] }, { "cell_type": "markdown", "id": "baf95f1f", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Execute\n", "\n", @@ -431,7 +522,11 @@ "cell_type": "code", "execution_count": 11, "id": "331f71b7", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "lif2.run(condition=run_condition, run_cfg=run_cfg)" @@ -440,7 +535,11 @@ { "cell_type": "markdown", "id": "1d8ea488", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "### Retrieve recorded data\n", "\n", @@ -451,7 +550,11 @@ "cell_type": "code", "execution_count": 12, "id": "582215cd", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "data_lif1 = monitor_lif1.get_data()\n", @@ -461,7 +564,11 @@ { "cell_type": "markdown", "id": "22f44fba", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "Alternatively, we can also use the provided `plot` functionality of the `Monitor`, to plot the recorded data. As we can see, the bias of the first `LIF` population drives the membrane potential to the threshold which generates output spikes. Those output spikes are passed through the `Dense` layer as input to the second `LIF` population." ] @@ -471,7 +578,10 @@ "execution_count": 13, "id": "32f48b10", "metadata": { - "scrolled": true + "scrolled": true, + "pycharm": { + "name": "#%%\n" + } }, "outputs": [ { @@ -504,7 +614,11 @@ "attachments": {}, "cell_type": "markdown", "id": "cf5fcea8", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "As a last step we must stop the runtime by calling the `stop` function. `Stop` will terminate the `Runtime` and all states will be lost." ] @@ -513,7 +627,11 @@ "cell_type": "code", "execution_count": 14, "id": "0ddcd735", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "lif2.stop()" @@ -522,7 +640,11 @@ { "cell_type": "markdown", "id": "26af5f1d", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "### Summary\n", "\n", @@ -538,7 +660,11 @@ { "cell_type": "markdown", "id": "cf9529fa", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "### Learn more about\n", "- [Processes](https://github.com/lava-nc/lava/blob/main/tutorials/in_depth/tutorial02_processes.ipynb) and [hierarchical Processes](https://github.com/lava-nc/lava/blob/main/tutorials/in_depth/tutorial06_hierarchical_processes.ipynb)\n", @@ -550,7 +676,11 @@ { "cell_type": "markdown", "id": "be6d778c", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## 2. Create a custom Process\n", "\n", @@ -565,7 +695,11 @@ "cell_type": "code", "execution_count": 15, "id": "656ac8dc", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "from lava.magma.core.process.process import AbstractProcess\n", @@ -576,7 +710,11 @@ { "cell_type": "markdown", "id": "bbc3acf4", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "All `Processes` in Lava inherit from a common base class called `AbstractProcess`. Additionally, we need `Var` for storing the spike probability and `OutPort` to define the output connections for our `SpikeGenerator`." ] @@ -585,7 +723,11 @@ "cell_type": "code", "execution_count": 16, "id": "00bde8ce", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "class SpikeGenerator(AbstractProcess):\n", @@ -607,7 +749,11 @@ { "cell_type": "markdown", "id": "6f392041", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "The constructor of `Var` requires the shape of the data to be stored and some initial value. We use this functionality to store the spike data. Similarly, we define an `OutPort` for our `SpikeGenerator`. " ] @@ -615,7 +761,11 @@ { "cell_type": "markdown", "id": "ea8e287c", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "### Create a new ProcessModel \n", "As mentioned earlier, the `Process` only defines the interface but not the behavior of the `SpikeGenerator`. We will do that in a separate `ProcessModel` which has the advantage that we can define the behavior of a `Process` on different hardware backends without changing the interface (see figure below). More details about the different kinds of `ProcessModels` can be found in the dedicated in-depth tutorials ([here](https://github.com/lava-nc/lava/blob/main/tutorials/in_depth/tutorial03_process_models.ipynb) and [here](https://github.com/lava-nc/lava/blob/main/tutorials/in_depth/tutorial06_hierarchical_processes.ipynb)). Lava automatically selects the correct `ProcessModel` for each `Process` given the `RunConfig`.\n", @@ -631,7 +781,11 @@ "cell_type": "code", "execution_count": 17, "id": "c68411dc", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "from lava.magma.core.model.py.model import PyLoihiProcessModel\n", @@ -646,7 +800,11 @@ "attachments": {}, "cell_type": "markdown", "id": "524df362", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "All `ProcessModels` defined to run on CPU are written in Python and inherit from the common class called `PyLoihiProcessModel`. Further, we use the decorators `requires` and `implements` to define which computational resources (i.e. CPU, GPU, Loihi1NeuroCore, Loihi2NeuroCore) are required to execute this `ProcessModel` and which `Process` it implements. Finally, we need to specify the types of `Vars` and `Ports` in our `SpikeGenerator` using `LavaPyType` and `PyOutPort`.\n", "\n", @@ -663,7 +821,11 @@ "cell_type": "code", "execution_count": 18, "id": "068cb965", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "@implements(proc=SpikeGenerator, protocol=LoihiProtocol)\n", @@ -685,7 +847,11 @@ "attachments": {}, "cell_type": "markdown", "id": "642ad797", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "
\n", "Note: For the `SpikeGenerator` we only needed an `OutPort` which provides the `send` function to send data. For the `InPort` the corresponding function to receive data is called `recv`.\n", @@ -698,7 +864,11 @@ "cell_type": "code", "execution_count": 19, "id": "069e687d", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "# Create processes\n", @@ -737,7 +907,11 @@ { "cell_type": "markdown", "id": "a58971d8", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "### Use the custom SpikeGenerator\n", "\n", @@ -751,7 +925,11 @@ "cell_type": "code", "execution_count": 20, "id": "4e1a6b58", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "# Instantiate SpikeGenerator\n", @@ -770,7 +948,11 @@ { "cell_type": "markdown", "id": "b2d8fc88", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "### Execute and plot\n", "\n", @@ -781,7 +963,11 @@ "cell_type": "code", "execution_count": 21, "id": "af9da307", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "lif2.run(condition=run_condition, run_cfg=run_cfg)" @@ -790,7 +976,11 @@ { "cell_type": "markdown", "id": "6227e1bd", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "And now, we can retrieve the recorded data and plot the membrane potentials of the two `LIF` populations." ] @@ -799,7 +989,11 @@ "cell_type": "code", "execution_count": 22, "id": "7cbc2b21", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "data": { @@ -826,7 +1020,11 @@ { "cell_type": "markdown", "id": "12243ae4", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "As we can see, the spikes provided by the `SpikeGenerator` are sucessfully sent to the first `LIF` population. which in turn sends its output spikes to the second `LIF` population." ] @@ -835,7 +1033,11 @@ "cell_type": "code", "execution_count": 23, "id": "a922b4aa", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "lif2.stop()" @@ -845,7 +1047,11 @@ "attachments": {}, "cell_type": "markdown", "id": "a9fadbe3", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "### Summary\n", "\n", @@ -889,4 +1095,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} +} \ No newline at end of file diff --git a/tutorials/end_to_end/tutorial01_mnist_digit_classification.ipynb b/tutorials/end_to_end/tutorial01_mnist_digit_classification.ipynb index 51ee4b7e8..28f88f5b3 100644 --- a/tutorials/end_to_end/tutorial01_mnist_digit_classification.ipynb +++ b/tutorials/end_to_end/tutorial01_mnist_digit_classification.ipynb @@ -3,7 +3,11 @@ { "attachments": {}, "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "*Copyright (C) 2021 Intel Corporation*
\n", "*SPDX-License-Identifier: BSD-3-Clause*
\n", @@ -59,7 +63,11 @@ { "cell_type": "code", "execution_count": 1, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "import os\n", @@ -69,7 +77,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Lava Processes\n", "\n", @@ -85,7 +97,11 @@ { "cell_type": "code", "execution_count": 2, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "# Import Process level primitives\n", @@ -97,7 +113,11 @@ { "cell_type": "code", "execution_count": 3, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "class SpikeInput(AbstractProcess):\n", @@ -176,7 +196,10 @@ { "cell_type": "markdown", "metadata": { - "tags": [] + "tags": [], + "pycharm": { + "name": "#%% md\n" + } }, "source": [ "#### ProcessModels for Python execution\n" @@ -185,7 +208,11 @@ { "cell_type": "code", "execution_count": 4, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "# Import parent classes for ProcessModels\n", @@ -427,7 +454,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Connecting Processes" ] @@ -435,7 +466,11 @@ { "cell_type": "code", "execution_count": 8, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "num_images = 25\n", @@ -459,7 +494,11 @@ { "attachments": {}, "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "If you receive an ``UnpicklingError`` when instantiating the ``ImageClassifier``, make sure to download the pretrained weights from GitHub LFS in the current directory using:\n", "```bash\n", @@ -489,7 +528,11 @@ { "cell_type": "code", "execution_count": 9, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", @@ -541,7 +584,11 @@ { "attachments": {}, "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "> **Important Note**:\n", ">\n", @@ -592,4 +639,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} +} \ No newline at end of file diff --git a/tutorials/end_to_end/tutorial02_excitatory_inhibitory_network.ipynb b/tutorials/end_to_end/tutorial02_excitatory_inhibitory_network.ipynb index 73db52e1b..0872d6e05 100644 --- a/tutorials/end_to_end/tutorial02_excitatory_inhibitory_network.ipynb +++ b/tutorials/end_to_end/tutorial02_excitatory_inhibitory_network.ipynb @@ -3,7 +3,11 @@ { "cell_type": "markdown", "id": "3fd51524", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "*Copyright (C) 2022-23 Intel Corporation*
\n", "*SPDX-License-Identifier: BSD-3-Clause*
\n", @@ -17,7 +21,11 @@ { "cell_type": "markdown", "id": "8ac8ba24", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "**Motivation**: In this tutorial, we will build a Lava Process for a neural networks of excitatory and inhibitory neurons (E/I network).
\n", "E/I networks are a fundamental example of neural networks mimicking the structure of the brain and exhibiting rich dynamical behavior.
" @@ -26,7 +34,11 @@ { "cell_type": "markdown", "id": "3fbb06e6", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### This tutorial assumes that you:\n", "- have the [Lava framework installed](../in_depth/tutorial01_installing_lava.ipynb \"Tutorial on Installing Lava\")\n", @@ -52,7 +64,11 @@ { "cell_type": "markdown", "id": "89344cf6", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### General imports" ] @@ -61,7 +77,11 @@ "cell_type": "code", "execution_count": 1, "id": "257a6fe8", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "import numpy as np\n", @@ -71,7 +91,11 @@ { "cell_type": "markdown", "id": "de1acd9c", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### E/I Network Lava Process\n", "We define the structure of the E/I Network Lava Process class.
" @@ -81,7 +105,11 @@ "cell_type": "code", "execution_count": 2, "id": "497c0d06", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "# Import Process level primitives.\n", @@ -94,7 +122,11 @@ "cell_type": "code", "execution_count": 3, "id": "159d9263", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "class EINetwork(AbstractProcess):\n", @@ -132,7 +164,11 @@ { "cell_type": "markdown", "id": "01b9eabc", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### ProcessModels for Python execution" ] @@ -141,7 +177,11 @@ "cell_type": "code", "execution_count": 4, "id": "bc319315", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "# Import parent classes for ProcessModels for Hierarchical Processes.\n", @@ -158,7 +198,11 @@ { "cell_type": "markdown", "id": "4ed07fb5", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "### Rate neurons\n", "We next turn to the different implementations of the E/I Network.\n", @@ -181,13 +225,16 @@ "cell_type": "code", "execution_count": 5, "id": "44795c5b", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "from lava.magma.core.model.py.type import LavaPyType\n", "from lava.magma.core.model.py.ports import PyInPort, PyOutPort\n", "from lava.magma.core.resources import CPU\n", - "from lava.magma.core.model.model import AbstractProcessModel\n", "\n", "from scipy.special import erf\n", "\n", @@ -276,7 +323,11 @@ { "cell_type": "markdown", "id": "1cd93212", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Defining the parameters for the network\n", "Next, we need to constrain the network with the needed parameters.
\n", @@ -291,7 +342,11 @@ "cell_type": "code", "execution_count": 6, "id": "1caca08a", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "# Fix the randomness.\n", @@ -334,7 +389,11 @@ { "cell_type": "markdown", "id": "7e500634", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "Finally, we have to set the weights given the above constraints. To this end, we sample the weights randomly from a Gaussian distribution with zero-mean and a standard deviation that scales with the ```q_factor```." ] @@ -343,7 +402,11 @@ "cell_type": "code", "execution_count": 7, "id": "7cadbf55", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "def generate_gaussian_weights(dim, num_neurons_exc, q_factor, g_factor):\n", @@ -403,7 +466,11 @@ { "cell_type": "markdown", "id": "ac80469e", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Execution and Results" ] @@ -412,17 +479,18 @@ "cell_type": "code", "execution_count": 8, "id": "e609ac8c", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ - "from lava.magma.core.run_conditions import RunSteps\n", - "from lava.magma.core.run_configs import Loihi1SimCfg\n", - "# Import monitoring Process.\n", - "from lava.proc.monitor.process import Monitor\n", + "from lava import RunSteps, Loihi2SimCfg, Monitor\n", "\n", "# Configurations for execution.\n", "num_steps = 1000\n", - "rcfg = Loihi1SimCfg(select_tag='rate_neurons')\n", + "rcfg = Loihi2SimCfg(select_tag='rate_neurons')\n", "run_cond = RunSteps(num_steps=num_steps)\n", "\n", "# Instantiating network and IO processes.\n", @@ -440,7 +508,11 @@ { "cell_type": "markdown", "id": "7febf4a0", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Visualizing the activity\n", "We first have a look at the activity of the network by plotting the numerical value of the state of the first $50$ neurons." @@ -450,7 +522,11 @@ "cell_type": "code", "execution_count": 9, "id": "f4cdddfe", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "data": { @@ -474,7 +550,11 @@ { "cell_type": "markdown", "id": "70982ef0", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "We observe that after an initial period the network settles in a fixed point.
\n", "As it turns out, this is a global stable fixed point of the network dynamics: If we applied a small perturbation, the network would return to the stable state.
\n", @@ -485,7 +565,11 @@ { "cell_type": "markdown", "id": "0f48bb1a", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Further analysis\n", "We introduce the *auto-correlation function* $c(\\tau)$.
\n", @@ -504,7 +588,11 @@ "cell_type": "code", "execution_count": 10, "id": "67d93c4f", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "def auto_cov_fct(acts, max_lag=100, offset=200):\n", @@ -547,7 +635,11 @@ "cell_type": "code", "execution_count": 11, "id": "830d4529", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "data": { @@ -574,7 +666,11 @@ { "cell_type": "markdown", "id": "7815504e", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "As expected, there is covariance has its maximum at a time lag of $0$.
\n", "Examining the covariance function, we first note its values are small ($<<1$) implying low dimensional dynamics of the network.
\n", @@ -584,7 +680,11 @@ { "cell_type": "markdown", "id": "25538ff1", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Controlling the network\n", "We saw that the states of the neurons quickly converged to a globally stable fixed point.
\n", @@ -596,7 +696,11 @@ "cell_type": "code", "execution_count": 12, "id": "d12da1b0", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "# Defining new, larger q_factor.\n", @@ -613,7 +717,7 @@ "\n", "# Configurations for execution.\n", "num_steps = 1000\n", - "rcfg = Loihi1SimCfg(select_tag='rate_neurons')\n", + "rcfg = Loihi2SimCfg(select_tag='rate_neurons')\n", "run_cond = RunSteps(num_steps=num_steps)\n", "\n", "# Instantiating network and IO processes.\n", @@ -632,7 +736,11 @@ "cell_type": "code", "execution_count": 13, "id": "a708a851", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "data": { @@ -656,7 +764,11 @@ { "cell_type": "markdown", "id": "57019969", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "We find that after increasing the `q_factor`, the network shows a very different behavior. The stable fixed point is gone, instead we observe chaotic network dynamics:
\n", "The single neuron trajectories behave unpredictably and fluctuate widely, a small perturbation would lead to completely different state." @@ -666,7 +778,11 @@ "cell_type": "code", "execution_count": 14, "id": "36411f14", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "data": { @@ -693,7 +809,11 @@ { "cell_type": "markdown", "id": "4341b0b7", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "We moreover see that for positive time lags the auto-covariance function still is large.
\n", "This means that the network has memory of its previous states: The state at a given point in time influences strongly the subsequent path of the trajectories of the neurons.
\n", @@ -703,7 +823,11 @@ { "cell_type": "markdown", "id": "596b5dab", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "### LIF Neurons\n", "We now turn to a E/I networks implementing its dynamic behavior with leaky integrate-and-fire neurons.
\n", @@ -717,11 +841,14 @@ "cell_type": "code", "execution_count": 15, "id": "6dc54408", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ - "from lava.proc.dense.process import Dense\n", - "from lava.proc.lif.process import LIF\n", + "from lava import Dense, LIF\n", "from convert_params import convert_rate_to_lif_params\n", "\n", "@implements(proc=EINetwork, protocol=LoihiProtocol)\n", @@ -809,7 +936,11 @@ { "cell_type": "markdown", "id": "bd11399c", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Execution and Results\n", "In order to execute the LIF E/I network and the infrastructure to monitor the activity, we introduce a ```CustomRunConfig``` where we specify which ProcessModel we select for execution." @@ -819,11 +950,14 @@ "cell_type": "code", "execution_count": 16, "id": "37865890", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ - "from lava.magma.core.run_conditions import RunSteps\n", - "from lava.magma.core.run_configs import Loihi1SimCfg\n", + "from lava import RunSteps, Loihi2SimCfg, SinkRingBuffer\n", "# Import io processes.\n", "from lava.proc import io\n", "\n", @@ -835,7 +969,7 @@ "num_steps = 1000\n", "run_cond = RunSteps(num_steps=num_steps)\n", "\n", - "class CustomRunConfigFloat(Loihi1SimCfg):\n", + "class CustomRunConfigFloat(Loihi2SimCfg):\n", " def select(self, proc, proc_models):\n", " # Customize run config to always use float model for io.sink.RingBuffer.\n", " if isinstance(proc, io.sink.RingBuffer):\n", @@ -851,7 +985,7 @@ "\n", "# Instantiating network and IO processes.\n", "lif_network_balanced = EINetwork( **network_params_balanced, convert=True)\n", - "outport_plug = io.sink.RingBuffer(shape=shape, buffer=num_steps)\n", + "outport_plug = SinkRingBuffer(shape=shape, buffer=num_steps)\n", "\n", "# Instantiate Monitors to record the voltage and the current of the LIF neurons.\n", "monitor_v = Monitor()\n", @@ -874,7 +1008,11 @@ { "cell_type": "markdown", "id": "3c02ce1d", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Visualizing the activity\n", "First, we visually inspect to spiking activity of the neurons in the network.
\n", @@ -885,7 +1023,11 @@ "cell_type": "code", "execution_count": 17, "id": "80307fe2", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "data": { @@ -899,14 +1041,18 @@ } ], "source": [ - "from lava.utils.plots import raster_plot\n", - "fig = raster_plot(spikes=spks_balanced)" + "from lava import plots\n", + "fig = plots.raster_plot(spikes=spks_balanced)" ] }, { "cell_type": "markdown", "id": "81e1c3a9", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "After an initial synchronous burst (all neurons are simultaneously driven to the threshold by the external current), we observe an immediate decoupling of the single neuron activities due to the recurrent connectivity.
\n", "Overall, we see a heterogeneous network state with asynchronous as well as synchronous spiking across neurons.
\n", @@ -920,7 +1066,11 @@ "cell_type": "code", "execution_count": 18, "id": "f5cc39b8", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "window_size = 25\n", @@ -931,7 +1081,11 @@ { "cell_type": "markdown", "id": "5b858cd5", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "After having an estimate of the rate, we compare the temporally-averaged mean rate of both networks in the first state.
\n", "To avoid boundary effects of the binning, we disregard time steps at the beginning and the end." @@ -941,7 +1095,11 @@ "cell_type": "code", "execution_count": 19, "id": "d6c68e05", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "data": { @@ -974,7 +1132,11 @@ { "cell_type": "markdown", "id": "db10dec2", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "Both networks behave similarly inasmuch the rates are stationary with only very small fluctuations around the baseline in the LIF case.
\n", "Next, we turn to the auto-covariance function." @@ -984,7 +1146,11 @@ "cell_type": "code", "execution_count": 20, "id": "9036e802", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "data": { @@ -1020,7 +1186,11 @@ { "cell_type": "markdown", "id": "52339454", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "Examining the auto-covariance function, we first note that again the overall values are small. Moreover, we see that for non-vanishing time lags the auto-covariance function quickly decays.
\n", "This means that the network has no memory of its previous states: Already after few time step we lost almost all information of the previous network state, former states leave little trace in the overall network activity.
\n", @@ -1030,7 +1200,11 @@ { "cell_type": "markdown", "id": "91374486", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Controlling the network\n", "Next, we pass the rate network parameters for which we increased the `q_factor` to the spiking E/I network.
\n", @@ -1041,7 +1215,11 @@ "cell_type": "code", "execution_count": 21, "id": "78d091ea", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "num_steps = 1000\n", @@ -1074,7 +1252,11 @@ "cell_type": "code", "execution_count": 22, "id": "bd6aba46", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "data": { @@ -1088,13 +1270,17 @@ } ], "source": [ - "fig = raster_plot(spikes=spks_critical)" + "fig = plots.raster_plot(spikes=spks_critical)" ] }, { "cell_type": "markdown", "id": "3fcf5169", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "Here we see a qualitatively different network activity where the recurrent connections play a more dominant role:
\n", "At seemingly random times, single neurons enter an active states of variable length.
\n", @@ -1105,7 +1291,11 @@ "cell_type": "code", "execution_count": 23, "id": "36559ace", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "window = np.ones(window_size)\n", @@ -1116,7 +1306,11 @@ { "cell_type": "markdown", "id": "94745334", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "We again compare the rate of both networks in the same state." ] @@ -1125,7 +1319,11 @@ "cell_type": "code", "execution_count": 24, "id": "25e27549", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "data": { @@ -1158,7 +1356,11 @@ { "cell_type": "markdown", "id": "a1134ac2", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "Again, we observe a similar behavior on the rate level:
\n", "In both networks the mean rate fluctuates on a longer time scale with larger values around the baseline in a similar range.
\n", @@ -1169,7 +1371,11 @@ "cell_type": "code", "execution_count": 25, "id": "2c79c458", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "data": { @@ -1198,7 +1404,11 @@ { "cell_type": "markdown", "id": "302dc265", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "We observe in the auto-covariance function of the LIF network a slowly decay, akin to the rate network.
\n", "Even though both auto-covariance functions are not identical, they qualitatively match in that both networks exhibit long-lasting temporal correlations and an activity at the edge of chaos.
\n", @@ -1208,7 +1418,11 @@ { "cell_type": "markdown", "id": "c4278dc3", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### DIfferent recurrent activation regimes\n", "After having observed these two radically different dynamical states also in the LIF network, we next turn to the question how they come about.
\n", @@ -1220,7 +1434,11 @@ "cell_type": "code", "execution_count": 26, "id": "a9f2f809", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "def calculate_activation(weights, spks, num_exc_neurons):\n", @@ -1264,7 +1482,11 @@ { "cell_type": "markdown", "id": "3cc203e4", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "Since the network needs some time to settle in it's dynamical state, we discard the first $200$ time steps." ] @@ -1273,7 +1495,11 @@ "cell_type": "code", "execution_count": 27, "id": "f199fbe1", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "offset = 200\n", @@ -1292,7 +1518,11 @@ { "cell_type": "markdown", "id": "ac240c27", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "First, we look at the distribution of activation of a random neuron in both network states." ] @@ -1301,7 +1531,11 @@ "cell_type": "code", "execution_count": 28, "id": "fb934b23-0bdd-41fe-a798-493e0fd75024", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "data": { @@ -1341,7 +1575,11 @@ { "cell_type": "markdown", "id": "a3afd82d", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "Next, we plot the distribution of the temporal average:" ] @@ -1350,7 +1588,11 @@ "cell_type": "code", "execution_count": 29, "id": "22933de6", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "data": { @@ -1388,7 +1630,11 @@ { "cell_type": "markdown", "id": "a22be55c", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "We first note that the the total activation is close to zero with a slight shift to negative values, this prevents the divergence of activity.
\n", "Secondly, we observe that the width of the distributions is orders of magnitude larger in the high weight case as compared to the low weight network.
\n", @@ -1400,7 +1646,10 @@ "execution_count": 30, "id": "d7d38f3d", "metadata": { - "scrolled": true + "scrolled": true, + "pycharm": { + "name": "#%%\n" + } }, "outputs": [ { @@ -1438,7 +1687,11 @@ { "cell_type": "markdown", "id": "88197c24", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "We see that the temporal evolution of the total activation in the low weights case is much narrower than in the high weights network.
\n", "Moreover, we see that in the high weights network, the fluctuations of the activations evolve on a very long time scale as compared to the other network.
\n", @@ -1448,7 +1701,11 @@ { "cell_type": "markdown", "id": "a3358a9d", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "### Running a ProcessModel bit-accurate with Loihi\n", "So far, we have used neuron models and weights that are internally represented as floating point numbers.
\n", @@ -1460,7 +1717,11 @@ "cell_type": "code", "execution_count": 31, "id": "3cf38774", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "def _scaling_funct(params):\n", @@ -1575,7 +1836,11 @@ { "cell_type": "markdown", "id": "4b43d69c", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "After having defined some primitive conversion functionality we next convert the parameters for the critical network.
\n", "To constrain the values that we need to represent in the bit-accurate model, we have to find the dynamical range of the state parameters of the network, namely ```u``` and ```v``` of the LIF neurons." @@ -1585,7 +1850,11 @@ "cell_type": "code", "execution_count": 32, "id": "3d0043ce-2f6f-4f37-8b61-4a8607bac86b", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "data": { @@ -1619,7 +1888,11 @@ { "cell_type": "markdown", "id": "933c833f-f492-46ad-b2af-d101cb401f33", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "We note that for both variables the distributions attain large (small) values with low probability. We hence will remove them in the dynamical range to increase the precision of the overall representation. We do so by choosing $0.2$ and $0.8$ quantiles as minimal resp. maximal values for the dynamic ranges.
\n", "We finally also need to pass some information about the concrete implementation, e.g. the precision and the bit shifts performed.
" @@ -1629,7 +1902,11 @@ "cell_type": "code", "execution_count": 33, "id": "daab0580-90a7-4e55-97c3-fcf596399f74", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "u_low = np.quantile(data_u_critical.flatten(), 0.2)\n", @@ -1653,7 +1930,11 @@ { "cell_type": "markdown", "id": "792f70a8-4ad6-4827-a131-9606df1026c7", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "Using the mapped parameters, we construct the fully-fledged parameter dictionary for the E/I network Process using the LIF SubProcessModel." ] @@ -1662,7 +1943,11 @@ "cell_type": "code", "execution_count": 34, "id": "c3cfecc6", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "# Set up parameters for bit accurate model\n", @@ -1687,7 +1972,11 @@ { "cell_type": "markdown", "id": "a51256aa", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Execution of bit accurate model" ] @@ -1696,7 +1985,11 @@ "cell_type": "code", "execution_count": 35, "id": "8b10fe25", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "# Import bit accurate ProcessModels.\n", @@ -1708,7 +2001,7 @@ "run_cond = RunSteps(num_steps=num_steps)\n", "\n", "# Define custom Run Config for execution of bit accurate models.\n", - "class CustomRunConfigFixed(Loihi1SimCfg):\n", + "class CustomRunConfigFixed(Loihi2SimCfg):\n", " def select(self, proc, proc_models):\n", " # Customize run config to always use float model for io.sink.RingBuffer.\n", " if isinstance(proc, io.sink.RingBuffer):\n", @@ -1739,7 +2032,11 @@ "cell_type": "code", "execution_count": 36, "id": "a29e3abe", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "data": { @@ -1753,15 +2050,19 @@ } ], "source": [ - "fig = raster_plot(spikes=spks_critical, color='orange', alpha=0.3)\n", - "raster_plot(spikes=spks_critical_fixed, fig=fig, alpha=0.3, color='b')\n", + "fig = plots.raster_plot(spikes=spks_critical, color='orange', alpha=0.3)\n", + "plots.raster_plot(spikes=spks_critical_fixed, fig=fig, alpha=0.3, color='b')\n", "plt.show()" ] }, { "cell_type": "markdown", "id": "836dc55a", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "Comparing the spike times after the parameter conversion, we find that after the first initial time steps, the spike times start diverging, even though certain structural similarities remain.
\n", "This, however, is expected: Since the systems is in a chaotic state, slight differences in the variables lead to a completely different output after some time steps. This is generally the behavior in spiking neural network.
\n", @@ -1772,7 +2073,11 @@ "cell_type": "code", "execution_count": 37, "id": "b5881949", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "window = np.ones(window_size)\n", @@ -1785,7 +2090,11 @@ "cell_type": "code", "execution_count": 38, "id": "4fb468dd", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "data": { @@ -1820,7 +2129,11 @@ "cell_type": "code", "execution_count": 39, "id": "f2b8de22", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "data": { @@ -1847,7 +2160,11 @@ { "cell_type": "markdown", "id": "f142a6bd", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## How to learn more?\n", "\n", @@ -1885,4 +2202,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} +} \ No newline at end of file diff --git a/tutorials/in_depth/three_factor_learning/tutorial01_Reward_Modulated_STDP.ipynb b/tutorials/in_depth/three_factor_learning/tutorial01_Reward_Modulated_STDP.ipynb index 233c79130..485f1464f 100644 --- a/tutorials/in_depth/three_factor_learning/tutorial01_Reward_Modulated_STDP.ipynb +++ b/tutorials/in_depth/three_factor_learning/tutorial01_Reward_Modulated_STDP.ipynb @@ -2,7 +2,11 @@ "cells": [ { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "*Copyright (C) 2022 Intel Corporation*
\n", "*SPDX-License-Identifier: BSD-3-Clause*
\n", @@ -26,7 +30,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "### Defining three-factor learning rule interfaces in Lava\n", "\n", @@ -47,7 +55,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "NOTE: Learning parameters are adapted from online implemention of [Spike-Timing Dependent Plasticity (STDP)](http://www.scholarpedia.org/article/Spike-timing_dependent_plasticity \"Spike-Timing Dependent Plasticity\") and can vary based on implementation. " ] @@ -55,7 +67,11 @@ { "cell_type": "code", "execution_count": 1, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "from lava.proc.learning_rules.r_stdp_learning_rule import RewardModulatedSTDP\n", @@ -74,7 +90,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "### Defining a simple learning network with localized reward signals\n", "\n", @@ -87,28 +107,44 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "![R_STDP_structure](https://raw.githubusercontent.com/lava-nc/lava-docs/three_factor_learning/_static/images/tutorial_learning/r_stdp__tutorial_structure.svg)" ] }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "The simple spiking network shown above translates to a connected Lava process architecture as shown below. " ] }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "![R_STDP_architecture](https://raw.githubusercontent.com/lava-nc/lava-docs/three_factor_learning/_static/images/tutorial_learning/r_stdp_tutorial_architecture.svg)" ] }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "_NOTE : Though the RSTDPLIF Process can currently only be executed on CPU backend, it is modeled from Loihi 2's ability to compute custom post-traces in microcoded neuron instructions. Lava support for on-chip execution will be available soon!_" ] @@ -116,7 +152,11 @@ { "cell_type": "code", "execution_count": 2, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "import numpy as np\n", @@ -136,7 +176,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Initialize network parameters and weights" ] @@ -144,7 +188,11 @@ { "cell_type": "code", "execution_count": 3, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "# Pre-synaptic neuron parameters \n", @@ -178,7 +226,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Generate binary input and graded reward spikes" ] @@ -186,7 +238,11 @@ { "cell_type": "code", "execution_count": 4, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "from utils import generate_post_spikes" @@ -195,7 +251,11 @@ { "cell_type": "code", "execution_count": 5, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "# Number of simulation time steps\n", @@ -224,7 +284,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Initialize Network Processes" ] @@ -232,7 +296,11 @@ { "cell_type": "code", "execution_count": 6, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "from lava.proc.lif.process import LIF\n", @@ -244,7 +312,11 @@ { "cell_type": "code", "execution_count": 7, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "# Create input devices\n", @@ -290,7 +362,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "### Connect Network Processes" ] @@ -298,7 +374,11 @@ { "cell_type": "code", "execution_count": 8, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "# Connect network\n", @@ -339,7 +419,11 @@ { "cell_type": "code", "execution_count": 9, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "from lava.proc.monitor.process import Monitor\n", @@ -367,7 +451,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "### Run the network\n", "\n", @@ -377,7 +465,11 @@ { "cell_type": "code", "execution_count": 10, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "from lava.magma.core.run_conditions import RunSteps\n", @@ -387,7 +479,11 @@ { "cell_type": "code", "execution_count": 11, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "# Running\n", @@ -397,7 +493,11 @@ { "cell_type": "code", "execution_count": 12, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "# Get data from monitors\n", @@ -415,7 +515,11 @@ { "cell_type": "code", "execution_count": 13, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "# Stopping\n", @@ -424,14 +528,22 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "### Visualize the learning results" ] }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Plot eligibility trace dynamics" ] @@ -439,7 +551,11 @@ { "cell_type": "code", "execution_count": 14, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "from utils import plot_spikes, plot_time_series, plot_time_series_subplots, plot_spikes_time_series" @@ -448,7 +564,11 @@ { "cell_type": "code", "execution_count": 15, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "data": { @@ -474,7 +594,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "This plot shows the spike times at which the pre-synaptic neuron and the post-synaptic neurons 'A' and 'B' fired across time-steps. The co-relation between the spike timing of the pre-synaptic neuron and the post-synaptic neurons are used to do learning updates throughout the rest of the tutorial. " ] @@ -482,7 +606,11 @@ { "cell_type": "code", "execution_count": 16, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "data": { @@ -509,7 +637,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "The first plot shows the spike times at which the pre-synaptic neuron fired. The pre-traces are updated based on the pre-synaptic spike train shown in the first plot. At the instant of a pre-synaptic spike, the pre-trace value is incremented by the 'pre_trace_kernel_magnitude' described in the learning rule. Subsequently, the trace value is decayed by a factor of 'pre_trace_decay_tau', with respect to the trace value at that instant, until the event of the next pre-synaptic spike. For example, the first pre-synaptic spike happens at time step 15 as shown in the first plot by the first red dash. Therefore, the pre-trace value is increamented by a value of 16 ('pre_trace_kernel_magnitude'), at time step 15, after which the decaying of the trace ensues until the next pre-synaptic spike at time step 46. Similar update dynamics are used to update the post-traces also. " ] @@ -517,7 +649,11 @@ { "cell_type": "code", "execution_count": 17, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "data": { @@ -566,7 +702,11 @@ { "cell_type": "code", "execution_count": 18, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "data": { @@ -595,14 +735,22 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "The tag dynamics replicate the STDP learning rule with an additional decay behaviour to represent an eligibility trace. The update to the tag trace is based on the co-relation between pre and post-synaptic spikes. Consider the trace dynamics for 'Neuron A', the post-synaptic neuron A, fires at time-step 6, which is indicated by the first green dash in the 'spike arrival' plot. In Loihi 2, the update of the traces are done after both the pre-synaptic and post-synaptic neurons have fired. At the advent of pre-synaptic spike at time step 15, the tag trace of the post-synaptic neuron A which fired before the pre-synaptic neuron, is decremented by the dot product of the pre-trace value and the post-trace value at time step 15. This behaviour can be seen in the plot shown above. The evolution of the tag dynamics follows the same principle to do both depression and potentiation with respect to the co-relation between pre and post-spikes. " ] }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Plot reward trace dynamics" ] @@ -610,7 +758,11 @@ { "cell_type": "code", "execution_count": 19, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "data": { @@ -659,14 +811,22 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "In Loihi 2, individual post-synaptic neurons can be microcoded with reward traces that can be driven heterogeneously by synaptic inputs that differ in both magnitude and time. The reward trace plot shows the difference in third-factor trace received by the two-post synaptic neurons. The learnable synaptic weight is updated at every 't_epoch' based on the instantanious value of the eligibility trace and the reward trace. In an 'R-STDP' rule, the synaptic weight is updated only when a reward/third-factor value is non-zero. The weight dynamics of 'Neuron A' can be seen only being modified during the interval from time step 25 to time step 50, after which the weight trace value remains constant. This behaviour explains the functionality of the Reward-Modulated Spike-Timing Dependent Plasticity rule we defined in this tutorial." ] }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "### Advanced Topic: Implementing custom learning rule interfaces\n", "\n", @@ -692,7 +852,11 @@ { "cell_type": "code", "execution_count": 20, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "from lava.magma.core.learning.learning_rule import Loihi3FLearningRule\n", @@ -779,7 +943,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "### How to learn more?\n", "\n", @@ -821,4 +989,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} +} \ No newline at end of file diff --git a/tutorials/in_depth/tutorial01_installing_lava.ipynb b/tutorials/in_depth/tutorial01_installing_lava.ipynb index afb67b042..5f43c188b 100644 --- a/tutorials/in_depth/tutorial01_installing_lava.ipynb +++ b/tutorials/in_depth/tutorial01_installing_lava.ipynb @@ -2,7 +2,11 @@ "cells": [ { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "*Copyright (C) 2021 Intel Corporation*
\n", "*SPDX-License-Identifier: BSD-3-Clause*
\n", @@ -131,7 +135,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## 5. Tutorials\n", "\n", @@ -154,7 +162,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## How to learn more?\n", "\n", @@ -187,4 +199,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} +} \ No newline at end of file diff --git a/tutorials/in_depth/tutorial02_processes.ipynb b/tutorials/in_depth/tutorial02_processes.ipynb index b2041db8c..a63cba73a 100644 --- a/tutorials/in_depth/tutorial02_processes.ipynb +++ b/tutorials/in_depth/tutorial02_processes.ipynb @@ -2,7 +2,11 @@ "cells": [ { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "*Copyright (C) 2021 Intel Corporation*
\n", "*SPDX-License-Identifier: BSD-3-Clause*
\n", @@ -17,7 +21,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Recommended tutorials before starting:\n", "\n", @@ -47,7 +55,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## How to build a _Process_?\n", "\n", @@ -59,7 +71,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### _AbstractProcess_: Defining _Vars_, _Ports_, and the API\n", "\n", @@ -87,7 +103,11 @@ { "cell_type": "code", "execution_count": 1, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "import numpy as np\n", @@ -129,7 +149,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "You may have noticed that most of the _Vars_ were initialized by scalar integers. But the synaptic current _u_ illustrates that _Vars_ can in general be initialized with numeric objects that have a dimensionality equal or less than specified by its _shape_ argument. The initial value will be scaled up to match the _Var_ dimension at run time.\n", "\n", @@ -142,7 +166,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### _ProcessModel_: Defining the behavior of a _Process_\n", "\n", @@ -156,7 +184,11 @@ { "cell_type": "code", "execution_count": 2, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "import numpy as np\n", @@ -193,7 +225,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Instantiating the _Process_\n", "\n", @@ -203,7 +239,11 @@ { "cell_type": "code", "execution_count": 3, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "n_neurons = 3\n", @@ -213,7 +253,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Interacting with _Processes_\n", "\n", @@ -227,7 +271,11 @@ { "cell_type": "code", "execution_count": 4, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", @@ -243,14 +291,22 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "As described above, the _Var_ _v_ has in this example been initialized as a scalar value that describes the membrane voltage of all three neurons simultaneously." ] }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Using custom APIs\n", "\n", @@ -260,7 +316,11 @@ { "cell_type": "code", "execution_count": 5, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", @@ -282,7 +342,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Executing a _Process_\n", "\n", @@ -294,18 +358,25 @@ { "cell_type": "code", "execution_count": 6, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ - "from lava.magma.core.run_configs import Loihi1SimCfg\n", - "from lava.magma.core.run_conditions import RunSteps\n", + "from lava import Loihi2SimCfg, RunSteps\n", "\n", - "lif.run(condition=RunSteps(num_steps=1), run_cfg=Loihi1SimCfg())" + "lif.run(condition=RunSteps(num_steps=1), run_cfg=Loihi2SimCfg())" ] }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "The voltage of each LIF neuron should now have increased by the bias value, 3, from their initial values of 0. Check if the neurons have evolved as expected." ] @@ -313,7 +384,11 @@ { "cell_type": "code", "execution_count": 7, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", @@ -329,7 +404,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Update _Vars_\n", "\n", @@ -339,7 +418,11 @@ { "cell_type": "code", "execution_count": 8, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", @@ -356,7 +439,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "Note that the _set()_ method becomes available once the _Process_ has been run. Prior to the first run, use the *\\_\\_init\\_\\_* function of the _Process_ to set _Vars_.\n", "\n", @@ -368,7 +455,11 @@ { "cell_type": "code", "execution_count": 9, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "lif.stop()" @@ -376,7 +467,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## How to learn more?\n", "\n", diff --git a/tutorials/in_depth/tutorial03_process_models.ipynb b/tutorials/in_depth/tutorial03_process_models.ipynb index 198cb3562..564760900 100644 --- a/tutorials/in_depth/tutorial03_process_models.ipynb +++ b/tutorials/in_depth/tutorial03_process_models.ipynb @@ -2,7 +2,11 @@ "cells": [ { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "*Copyright (C) 2021 Intel Corporation*
\n", "*SPDX-License-Identifier: BSD-3-Clause*
\n", @@ -19,21 +23,33 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "" ] }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "In this tutorial, we walk through the creation of multiple _LeafProcessModels_ that could be used to implement the behavior of a Leaky Integrate-and-Fire (LIF) neuron _Process_." ] }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Recommended tutorials before starting: \n", "- [Installing Lava](./tutorial01_installing_lava.ipynb \"Tutorial on Installing Lava\")\n", @@ -42,14 +58,22 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Create a LIF _Process_" ] }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "First, we will define our LIF _Process_ exactly as it is defined in the `Magma` core library of Lava. (For more information on defining Lava Processes, see the [previous tutorial](./tutorial02_processes.ipynb).) Here the LIF neural _Process_ accepts activity from synaptic inputs via _InPort_ `a_in` and outputs spiking activity via _OutPort_ `s_out`." ] @@ -57,7 +81,11 @@ { "cell_type": "code", "execution_count": 1, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "from lava.magma.core.process.process import AbstractProcess\n", @@ -106,28 +134,44 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Create a Python _LeafProcessModel_ that implements the LIF _Process_" ] }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "Now, we will create a Python _ProcessModel_, or _PyProcessModel_, that runs on a CPU compute resource and implements the LIF _Process_ behavior." ] }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Setup" ] }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "We begin by importing the required Lava classes.\n", "First, we setup our compute resources (CPU) and our _SyncProtocol_. A _SyncProtocol_ defines how and when parallel _Processes_ synchronize. Here we use the _LoihiProtoicol_ which defines the synchronization phases required for execution on the Loihi chip, but users may also specify a completely asynchronous protocol or define a custom _SyncProtocol_. The decorators imported will be necessary to specify the resource _Requirements_ and _SyncProtocol_ of our _ProcessModel_. " @@ -136,7 +180,11 @@ { "cell_type": "code", "execution_count": 2, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "import numpy as np\n", @@ -148,7 +196,10 @@ { "cell_type": "markdown", "metadata": { - "tags": [] + "tags": [], + "pycharm": { + "name": "#%% md\n" + } }, "source": [ "Now we import the parent class from which our _ProcessModel_ inherits, as well as our required _Port_ and _Variable_ types. _PyLoihiProcessModel_ is the abstract class for a Python _ProcessModel_ that implements the _LoihiProtocol_. Our _ProcessModel_ needs _Ports_ and _Variables_ that mirror those the LIF _Process_. The in-ports and out-ports of a Python _ProcessModel_ have types _PyInPort_ and _PyOutPort_, respectively, while variables have type _LavaPyType_." @@ -157,7 +208,11 @@ { "cell_type": "code", "execution_count": 3, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "from lava.magma.core.model.py.model import PyLoihiProcessModel\n", @@ -167,7 +222,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Defining a _PyLifModel_ for LIF" ] @@ -175,7 +234,10 @@ { "cell_type": "markdown", "metadata": { - "tags": [] + "tags": [], + "pycharm": { + "name": "#%% md\n" + } }, "source": [ "We now define a _LeafProcessModel_ `PyLifModel` that implements the behavior of the LIF _Process_.\n", @@ -190,7 +252,11 @@ { "cell_type": "code", "execution_count": 4, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "import numpy as np\n", @@ -228,7 +294,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Compile and run _PyLifModel_" ] @@ -236,7 +306,11 @@ { "cell_type": "code", "execution_count": 5, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", @@ -259,14 +333,22 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Selecting 1 _ProcessModel_: More on _LeafProcessModel_ attributes and relations" ] }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "We have demonstrated multiple _ProcessModel_ implementations of a single LIF _Process_. How is one of several _ProcessModels_ then selected as the implementation of a _Process_ during runtime? To answer that question, we take a deeper dive into the attributes of a _LeafProcessModel_ and the relationship between a _LeafProcessModel_, a _Process_, and a _SyncProtocol_. \n", "\n", @@ -278,7 +360,10 @@ { "cell_type": "markdown", "metadata": { - "tags": [] + "tags": [], + "pycharm": { + "name": "#%% md\n" + } }, "source": [ "## How to learn more?\n", @@ -312,4 +397,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} +} \ No newline at end of file diff --git a/tutorials/in_depth/tutorial04_execution.ipynb b/tutorials/in_depth/tutorial04_execution.ipynb index 35544e1ac..85ab999eb 100644 --- a/tutorials/in_depth/tutorial04_execution.ipynb +++ b/tutorials/in_depth/tutorial04_execution.ipynb @@ -44,14 +44,18 @@ }, "outputs": [], "source": [ - "from lava.magma.core.run_conditions import RunSteps\n", + "from lava import RunSteps\n", "\n", "run_condition = RunSteps(num_steps=42, blocking=False)" ] }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "The run condition _RunContinuous_ enables you to run a _Process_ continuously. In this case, the _Process_ will run indefinitely until you explicitly call `pause()` or `stop()` (see below). This call never blocks the program flow (blocking=False)." ] @@ -66,14 +70,18 @@ }, "outputs": [], "source": [ - "from lava.magma.core.run_conditions import RunContinuous\n", + "from lava import RunContinuous\n", "\n", "run_condition = RunContinuous()" ] }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Run configurations\n", "A _RunConfig_ specifies on what devices the _Processes_ should be executed.\n", @@ -99,14 +107,18 @@ }, "outputs": [], "source": [ - "from lava.magma.core.run_configs import Loihi1SimCfg\n", + "from lava import Loihi2SimCfg\n", "\n", - "run_cfg = Loihi1SimCfg()" + "run_cfg = Loihi2SimCfg()" ] }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "We can now use both a _RunCondition_ and a _RunConfig_ to execute a simple leaky integrate-and-fire (LIF) neuron." ] @@ -114,23 +126,29 @@ { "cell_type": "code", "execution_count": 4, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ - "from lava.proc.lif.process import LIF\n", - "from lava.magma.core.run_conditions import RunSteps\n", - "from lava.magma.core.run_configs import Loihi1SimCfg\n", + "from lava import LIF, RunSteps, Loihi2SimCfg\n", "\n", "# create a Process for a LIF neuron\n", "lif = LIF(shape=(1,))\n", "\n", "# execute that Process for 42 time steps in simulation\n", - "lif.run(condition=RunSteps(num_steps=42), run_cfg=Loihi1SimCfg())" + "lif.run(condition=RunSteps(num_steps=42), run_cfg=Loihi2SimCfg())" ] }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Running multiple _Processes_\n", "\n", @@ -140,14 +158,15 @@ { "cell_type": "code", "execution_count": 5, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "import numpy as np\n", - "from lava.proc.lif.process import LIF\n", - "from lava.proc.dense.process import Dense\n", - "from lava.magma.core.run_conditions import RunSteps\n", - "from lava.magma.core.run_configs import Loihi1SimCfg\n", + "from lava import LIF, Dense, RunSteps, Loihi2SimCfg\n", "\n", "# create processes\n", "lif1 = LIF(shape=(1,))\n", @@ -160,12 +179,16 @@ "dense.a_out.connect(lif2.a_in)\n", "\n", "# execute Process lif2 and all Processes connected to it (dense, lif1)\n", - "lif2.run(condition=RunSteps(num_steps=42), run_cfg=Loihi1SimCfg())" + "lif2.run(condition=RunSteps(num_steps=42), run_cfg=Loihi2SimCfg())" ] }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Pausing, resuming, and stopping execution\n", "\n", @@ -201,14 +224,12 @@ ], "source": [ "import numpy as np\n", - "from lava.proc.lif.process import LIF\n", - "from lava.magma.core.run_conditions import RunContinuous\n", - "from lava.magma.core.run_configs import Loihi1SimCfg\n", + "from lava import LIF, RunContinuous, Loihi2SimCfg\n", "\n", "lif3 = LIF(shape=(1, ))\n", "\n", "# start continuous execution\n", - "lif3.run(condition=RunContinuous(), run_cfg=Loihi1SimCfg())\n", + "lif3.run(condition=RunContinuous(), run_cfg=Loihi2SimCfg())\n", "\n", "# pause execution\n", "lif3.pause()\n", @@ -220,7 +241,7 @@ "lif3.v.set(np.array([0]))\n", "\n", "# resume continuous execution\n", - "lif3.run(condition=RunContinuous(), run_cfg=Loihi1SimCfg())\n", + "lif3.run(condition=RunContinuous(), run_cfg=Loihi2SimCfg())\n", "\n", "# terminate execution;\n", "# after this, you no longer have access to the state of lif\n", @@ -229,7 +250,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Manual compilation and execution\n", "\n", @@ -253,8 +278,7 @@ }, "outputs": [], "source": [ - "from lava.proc.lif.process import LIF\n", - "from lava.proc.dense.process import Dense\n", + "from lava import LIF, Dense\n", "\n", "lif1 = LIF(shape=(1,))\n", "dense = Dense(weights=np.eye(1))\n", @@ -314,13 +338,13 @@ "outputs": [], "source": [ "from lava.magma.compiler.compiler import Compiler\n", - "from lava.magma.core.run_configs import Loihi1SimCfg\n", + "from lava import Loihi2SimCfg\n", "\n", "# create a compiler\n", "compiler = Compiler()\n", "\n", "# compile the Process (and all connected Processes) into an executable\n", - "executable = compiler.compile(lif2, run_cfg=Loihi1SimCfg())" + "executable = compiler.compile(lif2, run_cfg=Loihi2SimCfg())" ] }, { @@ -347,7 +371,7 @@ "outputs": [], "source": [ "from lava.magma.runtime.runtime import Runtime\n", - "from lava.magma.core.run_conditions import RunSteps\n", + "from lava import RunSteps\n", "from lava.magma.core.process.message_interface_enum import ActorType\n", "\n", "# create and initialize a runtime\n", @@ -384,10 +408,7 @@ }, "outputs": [], "source": [ - "from lava.proc.lif.process import LIF\n", - "from lava.proc.dense.process import Dense\n", - "from lava.magma.core.run_conditions import RunSteps\n", - "from lava.magma.core.run_configs import Loihi1SimCfg\n", + "from lava import LIF, Dense, RunSteps, Loihi2SimCfg\n", "\n", "# create Processes\n", "lif = LIF(shape=(1,))\n", @@ -397,7 +418,7 @@ "lif.s_out.connect(dense.s_in)\n", "\n", "# execute Processes\n", - "lif.run(condition=RunSteps(num_steps=42), run_cfg=Loihi1SimCfg())\n", + "lif.run(condition=RunSteps(num_steps=42), run_cfg=Loihi2SimCfg())\n", "\n", "# stop Processes\n", "lif.stop()" @@ -405,7 +426,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## How to learn more?\n", "\n", @@ -443,4 +468,4 @@ }, "nbformat": 4, "nbformat_minor": 1 -} +} \ No newline at end of file diff --git a/tutorials/in_depth/tutorial05_connect_processes.ipynb b/tutorials/in_depth/tutorial05_connect_processes.ipynb index cb4ce250e..b001c190a 100644 --- a/tutorials/in_depth/tutorial05_connect_processes.ipynb +++ b/tutorials/in_depth/tutorial05_connect_processes.ipynb @@ -42,7 +42,11 @@ "cell_type": "code", "execution_count": 1, "id": "911c7c62", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "from lava.magma.core.process.process import AbstractProcess\n", @@ -52,7 +56,11 @@ { "cell_type": "markdown", "id": "b359cb60", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "As first step we define the _Processes_ _P1_ and _P2_ with their respective _Ports_ _out_ and _inp_." ] @@ -61,7 +69,11 @@ "cell_type": "code", "execution_count": 2, "id": "628ec281", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "# Minimal process with an OutPort\n", @@ -83,7 +95,11 @@ { "cell_type": "markdown", "id": "5517b6a2", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "_Process_ _P1_ and _P2_ require a corresponding _ProcessModel_ which implements their _Ports_ and a simple RunConfig for sending and receiving data.\n", "\n", @@ -96,7 +112,11 @@ "cell_type": "code", "execution_count": 3, "id": "1bf11a81", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "import numpy as np\n", @@ -112,7 +132,11 @@ "cell_type": "code", "execution_count": 4, "id": "4cab9b45", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "# A minimal PyProcModel implementing P1\n", @@ -144,7 +168,11 @@ { "cell_type": "markdown", "id": "b5fdc5ac", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "Next, the processes _P1_ and _P2_ are instantiated and the output _Port_ _out_ from _Process_ _P1_ is connected with the input _Port_ _inp_ of _Process_ _P2_." ] @@ -153,7 +181,11 @@ "cell_type": "code", "execution_count": 5, "id": "9f3ff826", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "sender = P1()\n", @@ -172,7 +204,11 @@ { "cell_type": "markdown", "id": "dc243685", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "Calling `run()` on either of these _Processes_ will first call the _Compiler_. During compilation the specified connection is setup by creating a channel between _P1_ and _P2_. Now data can be transfered during execution as seen by the output print statements." ] @@ -181,7 +217,11 @@ "cell_type": "code", "execution_count": 6, "id": "41a6fb52", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "from lava.magma.core.run_configs import Loihi1SimCfg\n", @@ -193,7 +233,10 @@ "execution_count": 7, "id": "f457f0ce", "metadata": { - "scrolled": true + "scrolled": true, + "pycharm": { + "name": "#%%\n" + } }, "outputs": [ { @@ -213,7 +256,11 @@ { "cell_type": "markdown", "id": "f7f639ca", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "The instance `sender` of P1 sent the data `[1 2]` via its _OutPort_ `out` to the _InPort_ `in` of the instance `recv` of P2, where the data is received." ] @@ -221,7 +268,11 @@ { "cell_type": "markdown", "id": "708a1c95", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Possible connections\n", "This first example was very simple. In principle, _Processes_ can have multiple input and output _Ports_ which can be freely connected with each other. Also, _Processes_ which execute on different compute resources can be connected in the same way.\n", @@ -238,7 +289,11 @@ { "cell_type": "markdown", "id": "490422b6", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Connect multiple _InPorts_ from a single _OutPort_\n", "\n", @@ -249,7 +304,11 @@ "cell_type": "code", "execution_count": 8, "id": "597e7d19", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "sender = P1()\n", @@ -276,7 +335,11 @@ "cell_type": "code", "execution_count": 9, "id": "f2dccd5f", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", @@ -297,7 +360,11 @@ { "cell_type": "markdown", "id": "d6a69af4", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "The instance `sender` of P1 sent the data `[1 2]` to the 3 instances `recv1, recv2, recv3` of P2." ] @@ -305,7 +372,11 @@ { "cell_type": "markdown", "id": "69ae6cee", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Connecting multiple _InPorts_ to a single _OutPort_\n", "\n", @@ -318,7 +389,11 @@ "cell_type": "code", "execution_count": 10, "id": "b1f8659a", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "sender1 = P1()\n", @@ -345,7 +420,11 @@ "cell_type": "code", "execution_count": 11, "id": "b94ed812", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", @@ -366,7 +445,11 @@ { "cell_type": "markdown", "id": "17348f04", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "The 3 instances `sender1, sender2, sender3` of P1 sent the data `[1 2]` to the instance `recv` of P2, where the data was summed up to `[3 6]`." ] @@ -374,7 +457,11 @@ { "cell_type": "markdown", "id": "24ceb7ca", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## How to learn more?\n", "\n", @@ -407,4 +494,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} +} \ No newline at end of file diff --git a/tutorials/in_depth/tutorial06_hierarchical_processes.ipynb b/tutorials/in_depth/tutorial06_hierarchical_processes.ipynb index 575c027dd..eb777e6ad 100644 --- a/tutorials/in_depth/tutorial06_hierarchical_processes.ipynb +++ b/tutorials/in_depth/tutorial06_hierarchical_processes.ipynb @@ -3,7 +3,11 @@ { "cell_type": "markdown", "id": "3874ace0", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "*Copyright (C) 2021 Intel Corporation*
\n", "*SPDX-License-Identifier: BSD-3-Clause*
\n", @@ -23,7 +27,11 @@ { "cell_type": "markdown", "id": "caf49931", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Recommended tutorials before starting: \n", "\n", @@ -37,7 +45,11 @@ { "cell_type": "markdown", "id": "93b41ea3", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Create LIF and Dense _Processes_ and _ProcessModels_" ] @@ -45,7 +57,11 @@ { "cell_type": "markdown", "id": "0bdc5ce8", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "The [ProcessModel Tutorial](./tutorial03_process_models.ipynb) walks through the creation of a LIF _Process_ and an implementing _PyLoihiProcessModel_. Our DenseLayer _Process_ additionally requires a Dense Lava _Process_ and _ProcessModel_ that have the behavior of a dense set of synaptic connections and weights. The Dense Connection _Process_ can be used to connect neural _Processes_. For completeness, we'll first briefly show an example LIF and Dense _Process_ and _PyLoihiProcessModel_." ] @@ -53,7 +69,11 @@ { "cell_type": "markdown", "id": "d60a928a", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Create a Dense connection _Process_" ] @@ -62,7 +82,11 @@ "cell_type": "code", "execution_count": 1, "id": "4fc1ffa1", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "from lava.magma.core.process.process import AbstractProcess\n", @@ -87,7 +111,11 @@ { "cell_type": "markdown", "id": "b8767584", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Create a Python Dense connection _ProcessModel_ implementing the Loihi Sync Protocol and requiring a CPU compute resource" ] @@ -96,7 +124,11 @@ "cell_type": "code", "execution_count": 2, "id": "019c4fc7", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "import numpy as np\n", @@ -124,7 +156,11 @@ { "cell_type": "markdown", "id": "f40e9bb8", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Create a LIF neuron _Process_" ] @@ -133,7 +169,11 @@ "cell_type": "code", "execution_count": 3, "id": "ff1de3df", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "from lava.magma.core.process.process import AbstractProcess\n", @@ -177,7 +217,11 @@ { "cell_type": "markdown", "id": "978efd0d", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Create a Python LIF neuron _ProcessModel_ implementing the Loihi Sync Protocol and requiring a CPU compute resource" ] @@ -186,7 +230,11 @@ "cell_type": "code", "execution_count": 4, "id": "d3a66cf5", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "import numpy as np\n", @@ -222,7 +270,11 @@ { "cell_type": "markdown", "id": "1583a5dc", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Create a DenseLayer Hierarchical _Process_ that encompasses Dense and LIF _Process_ behavior" ] @@ -230,7 +282,11 @@ { "cell_type": "markdown", "id": "894846d3", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "Now, we create a DenseLayer _Hierarchical Process_ combining LIF neural _Processes_ and Dense connection _Processes_. Our _Hierarchical Process_ contains all of the variables (`u`, `v`, `bias`, `du`, `dv` and `vth`) native to the LIF _Process_ plus the `weights` variable native to the Dense _Process_. The InPort to our _Hierarchical Process_ is `s_in`, which represents the spike inputs to our Dense synaptic connections. These Dense connections synapse onto a population of LIF neurons. The OutPort of our _Hierarchical Process_ is `s_out`, which represents the spikes output by the layer of LIF neurons. We do not have to define the _PortOut_ of the _Dense_ Process nor the _PortIn_ of the _LIF_ Process in the _DenseLayer_ Process, as they are only used internally and won't be exposed." ] @@ -239,7 +295,11 @@ "cell_type": "code", "execution_count": 5, "id": "7c646865", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "class DenseLayer(AbstractProcess):\n", @@ -270,7 +330,11 @@ { "cell_type": "markdown", "id": "476d0b27", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Create a _SubProcessModel_ that implements the DenseLayer _Process_ using Dense and LIF child _Processes_" ] @@ -278,7 +342,11 @@ { "cell_type": "markdown", "id": "bade89b3", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "Now, we will create the _SubProcessModel_ that implements our DenseLayer _Process_. This inherits from the _AbstractSubProcessModel_ class. Recall that _SubProcessModels_ also inherit the compute resource requirements from the _ProcessModels_ of their child _Processes_. In this example, we will use the LIF and Dense _ProcessModels_ requiring a CPU compute resource that were defined earlier in the tutorial, and `SubDenseLayerModel` will therefore implicitly require the CPU compute resource. \n", "\n", @@ -299,7 +367,11 @@ "cell_type": "code", "execution_count": 6, "id": "fd97abd6", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "import numpy as np\n", @@ -349,7 +421,11 @@ { "cell_type": "markdown", "id": "a75db393", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Run the DenseLayer _Process_" ] @@ -357,7 +433,11 @@ { "cell_type": "markdown", "id": "96b73f3e", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Run Connected DenseLayer _Processes_" ] @@ -366,7 +446,11 @@ "cell_type": "code", "execution_count": 7, "id": "b1db8925", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", @@ -447,9 +531,7 @@ } ], "source": [ - "from lava.magma.core.run_configs import RunConfig, Loihi1SimCfg\n", - "from lava.magma.core.run_conditions import RunSteps\n", - "from lava.proc.io import sink, source\n", + "from lava import Loihi2SimCfg, RunSteps\n", "\n", "dim = (3, 3)\n", "# Create the weight matrix.\n", @@ -465,7 +547,7 @@ "print('Layer 1 weights: \\n', layer1.weights.get(),'\\n')\n", "print('\\n ----- \\n')\n", "\n", - "rcfg = Loihi1SimCfg(select_tag='floating_pt', select_sub_proc_model=True)\n", + "rcfg = Loihi2SimCfg(select_tag='floating_pt', select_sub_proc_model=True)\n", "\n", "for t in range(9):\n", " # Run the entire network of Processes.\n", @@ -483,7 +565,11 @@ { "cell_type": "markdown", "id": "0d41b968", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## How to learn more?\n", "\n", @@ -521,4 +607,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} +} \ No newline at end of file diff --git a/tutorials/in_depth/tutorial07_remote_memory_access.ipynb b/tutorials/in_depth/tutorial07_remote_memory_access.ipynb index b7c67abee..b94f74c97 100644 --- a/tutorials/in_depth/tutorial07_remote_memory_access.ipynb +++ b/tutorials/in_depth/tutorial07_remote_memory_access.ipynb @@ -3,7 +3,11 @@ { "cell_type": "markdown", "id": "8ff39dda", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "*Copyright (C) 2021 Intel Corporation*
\n", "*SPDX-License-Identifier: BSD-3-Clause*
\n", @@ -30,7 +34,11 @@ { "cell_type": "markdown", "id": "d3568645", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Recommended tutorials before starting: \n", "\n", @@ -45,7 +53,11 @@ { "cell_type": "markdown", "id": "b2e5ed15", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Create a minimal _Process_ and _ProcessModel_ with a _RefPort_" ] @@ -53,7 +65,11 @@ { "cell_type": "markdown", "id": "9407a4f8", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "The [ProcessModel Tutorial](./tutorial03_process_models.ipynb) walks through the creation of _Processes_ and corresponding _ProcessModels_. In order to demonstrate RefPorts we create a minimal process P1 with a _RefPort_ `ref` and a minimal process P2 with a _Var_ `var`. \n", "\n", @@ -64,7 +80,11 @@ "cell_type": "code", "execution_count": 1, "id": "b72d269a", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "from lava.magma.core.process.process import AbstractProcess\n", @@ -89,7 +109,11 @@ { "cell_type": "markdown", "id": "d3beee93", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Create a Python Process Model implementing the Loihi Sync Protocol and requiring a CPU compute resource\n", "We also create the corresponding _ProcessModels_ PyProcModel1 and PyProcModel2 which implement the process P1 and P2. The value of the _Var_ of P2 `var` is initialized with the value 5. The behavior we implement prints out the value of the `var` in P1 every time step, demonstrating the **read** ability of a _RefPort_ `ref`. Afterwards we set the value of `var` by adding the current time step to it and write it with `ref`, demonstrating the **write** abiltity of a _RefPort_." @@ -99,7 +123,11 @@ "cell_type": "code", "execution_count": 2, "id": "b9b8bad0", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "import numpy as np\n", @@ -142,7 +170,11 @@ { "cell_type": "markdown", "id": "614aa4be", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Run the _Processes_\n", "The _RefPort_ `ref` needs to be connected with the _Var_ `var`, before execution. The expected output will be the initial value 5 of `var` at the beginning, followed by 6 (5+1), 8 (6+2), 11 (8+3), 15 (11+4)." @@ -152,7 +184,11 @@ "cell_type": "code", "execution_count": 3, "id": "47698fc0", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", @@ -167,8 +203,7 @@ } ], "source": [ - "from lava.magma.core.run_configs import Loihi1SimCfg\n", - "from lava.magma.core.run_conditions import RunSteps\n", + "from lava import Loihi2SimCfg, RunSteps\n", "\n", "# Create process P1 and P2\n", "proc1 = P1()\n", @@ -178,14 +213,18 @@ "proc1.ref.connect_var(proc2.var)\n", "\n", "# Run the network for 5 time steps\n", - "proc1.run(condition=RunSteps(num_steps=5), run_cfg=Loihi1SimCfg())\n", + "proc1.run(condition=RunSteps(num_steps=5), run_cfg=Loihi2SimCfg())\n", "proc1.stop()" ] }, { "cell_type": "markdown", "id": "b9bf440f", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Implicit and explicit VarPorts\n", "In the example above we demonstrated the read and write ability of a _RefPort_ which used an **implicit** _VarPort_ to connect to the _Var_. An implicit _VarPort_ is created when `connect_var(..)` is used to connect a _RefPort_ with a _Var_. A _RefPort_ can also be connected to a _VarPort_ **explicitly** defined in a _Process_ using `connect(..)`. In order to demonstrate explicit _VarPorts_ we redefine _Process_ P2 and the corresponding _ProcessModel_." @@ -195,7 +234,11 @@ "cell_type": "code", "execution_count": 4, "id": "d9dd7405", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "from lava.magma.core.process.ports.ports import VarPort\n", @@ -212,7 +255,11 @@ "cell_type": "code", "execution_count": 5, "id": "e2215c6d", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "from lava.magma.core.model.py.ports import PyVarPort\n", @@ -228,7 +275,11 @@ { "cell_type": "markdown", "id": "683df3ff", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "This time the _RefPort_ `ref` is connected to the explicitly defined _VarPort_ `var_port`. The output is the same as before." ] @@ -237,7 +288,11 @@ "cell_type": "code", "execution_count": 6, "id": "bfc0ec3b", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", @@ -260,14 +315,18 @@ "proc1.ref.connect(proc2.var_port)\n", "\n", "# Run the network for 5 time steps\n", - "proc1.run(condition=RunSteps(num_steps=5), run_cfg=Loihi1SimCfg())\n", + "proc1.run(condition=RunSteps(num_steps=5), run_cfg=Loihi2SimCfg())\n", "proc1.stop()" ] }, { "cell_type": "markdown", "id": "ff8db42e", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Options to connect RefPorts and VarPorts\n", "_RefPorts_ can be connected in different ways to _Vars_ and _VarPorts_. _RefPorts_ and _VarPorts_ can also be connected to themselves in case of hierarchical processes. \n", @@ -285,7 +344,11 @@ { "cell_type": "markdown", "id": "1eb2d987", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## How to learn more?\n", "\n", @@ -316,4 +379,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} +} \ No newline at end of file diff --git a/tutorials/in_depth/tutorial08_stdp.ipynb b/tutorials/in_depth/tutorial08_stdp.ipynb index 24b40adec..0b020638d 100644 --- a/tutorials/in_depth/tutorial08_stdp.ipynb +++ b/tutorials/in_depth/tutorial08_stdp.ipynb @@ -118,7 +118,7 @@ }, "outputs": [], "source": [ - "from lava.proc.learning_rules.stdp_learning_rule import STDPLoihi" + "from lava import STDPLoihi" ] }, { @@ -207,9 +207,7 @@ }, "outputs": [], "source": [ - "from lava.proc.lif.process import LIF\n", - "from lava.proc.io.source import RingBuffer\n", - "from lava.proc.dense.process import LearningDense, Dense" + "from lava import LIF, SourceRingBuffer, LearningDense, Dense" ] }, { @@ -223,8 +221,8 @@ "outputs": [], "source": [ "# Create input devices\n", - "pattern_pre = RingBuffer(data=spike_raster_pre.astype(int))\n", - "pattern_post = RingBuffer(data=spike_raster_post.astype(int))\n", + "pattern_pre = SourceRingBuffer(data=spike_raster_pre.astype(int))\n", + "pattern_post = SourceRingBuffer(data=spike_raster_post.astype(int))\n", "\n", "# Create input connectivity\n", "conn_inp_pre = Dense(weights=wgt_inp)\n", @@ -293,7 +291,7 @@ }, "outputs": [], "source": [ - "from lava.proc.monitor.process import Monitor" + "from lava import Monitor" ] }, { @@ -344,8 +342,7 @@ }, "outputs": [], "source": [ - "from lava.magma.core.run_conditions import RunSteps\n", - "from lava.magma.core.run_configs import Loihi2SimCfg" + "from lava import RunSteps, Loihi2SimCfg" ] }, { diff --git a/tutorials/in_depth/tutorial09_custom_learning_rules.ipynb b/tutorials/in_depth/tutorial09_custom_learning_rules.ipynb index fc933fcd1..1a7cb1e59 100644 --- a/tutorials/in_depth/tutorial09_custom_learning_rules.ipynb +++ b/tutorials/in_depth/tutorial09_custom_learning_rules.ipynb @@ -222,7 +222,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "### Network parameters" ] @@ -348,9 +352,7 @@ }, "outputs": [], "source": [ - "from lava.proc.lif.process import LIF\n", - "from lava.proc.io.source import RingBuffer\n", - "from lava.proc.dense.process import LearningDense, Dense" + "from lava import LIF, SourceRingBuffer, LearningDense, Dense" ] }, { @@ -364,8 +366,8 @@ "outputs": [], "source": [ "# Create input devices\n", - "pattern_pre = RingBuffer(data=spike_raster_pre.astype(int))\n", - "pattern_post = RingBuffer(data=spike_raster_post.astype(int))\n", + "pattern_pre = SourceRingBuffer(data=spike_raster_pre.astype(int))\n", + "pattern_post = SourceRingBuffer(data=spike_raster_post.astype(int))\n", "\n", "# Create input connectivity\n", "conn_inp_pre = Dense(weights=wgt_inp)\n", @@ -408,7 +410,7 @@ "lif_pre.s_out.connect(plast_conn.s_in)\n", "plast_conn.a_out.connect(lif_post.a_in)\n", "\n", - "# Connect back-propagating actionpotential (BAP)\n", + "# Connect back-propagating action potential (BAP)\n", "lif_post.s_out.connect(plast_conn.s_in_bap)" ] }, @@ -434,7 +436,7 @@ }, "outputs": [], "source": [ - "from lava.proc.monitor.process import Monitor" + "from lava import Monitor" ] }, { @@ -485,8 +487,7 @@ }, "outputs": [], "source": [ - "from lava.magma.core.run_conditions import RunSteps\n", - "from lava.magma.core.run_configs import Loihi2SimCfg" + "from lava import RunSteps, Loihi2SimCfg" ] }, { @@ -838,4 +839,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} +} \ No newline at end of file diff --git a/tutorials/in_depth/tutorial10_sigma_delta_neurons.ipynb b/tutorials/in_depth/tutorial10_sigma_delta_neurons.ipynb index a1f973055..5d59ad195 100644 --- a/tutorials/in_depth/tutorial10_sigma_delta_neurons.ipynb +++ b/tutorials/in_depth/tutorial10_sigma_delta_neurons.ipynb @@ -3,7 +3,11 @@ { "cell_type": "markdown", "id": "cd1ecf7e-d99e-412c-ace3-57c24c5710f0", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "*Copyright (C) 2022 Intel Corporation*
\n", "*SPDX-License-Identifier: BSD-3-Clause*
\n", @@ -37,25 +41,28 @@ "cell_type": "code", "execution_count": 1, "id": "dd2f6874-4968-4770-a1eb-3a8e53527926", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "import numpy as np\n", "import matplotlib.pyplot as plt\n", "import PIL\n", "import urllib\n", - "from lava.proc.io.source import RingBuffer as Input\n", - "from lava.proc.dense.process import Dense\n", - "from lava.proc.sdn.process import SigmaDelta, ActivationMode, Delta\n", - "from lava.proc.monitor.process import Monitor\n", - "from lava.magma.core.run_conditions import RunSteps\n", - "from lava.magma.core.run_configs import Loihi2SimCfg" + "from lava import SourceRingBuffer, Dense, SigmaDelta, Delta, Monitor, RunSteps, Loihi2SimCfg, ActivationMode" ] }, { "cell_type": "markdown", "id": "d80bf4a2-656c-45cb-8f85-2ce2164e7085", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Create input time-series and neuron\n", "\n", @@ -66,7 +73,11 @@ "cell_type": "code", "execution_count": 2, "id": "faf81413-6ede-4390-b44a-43ead0a0ccd3", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", @@ -125,7 +136,11 @@ { "cell_type": "markdown", "id": "01adb8d7-bca3-44f5-ae8a-0f35a44cc67f", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "For this, and any other time-series, a standard ANN must multiply and accumulate (MAC) once per timestep." ] @@ -133,7 +148,11 @@ { "cell_type": "markdown", "id": "ef2cac1a-04e6-4745-a395-ca83483ed83c", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Create encoding and Sigma-Delta neuron\n", "\n", @@ -144,11 +163,15 @@ "cell_type": "code", "execution_count": 3, "id": "601ef3dd-59c7-4f80-b55c-dc70d2eff893", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "# Input process\n", - "inp = Input(data=inp_data)\n", + "inp = SourceRingBuffer(data=inp_data)\n", "inp_proj = Dense(weights=np.eye(1),\n", " num_message_bits=8)\n", "\n", @@ -176,7 +199,11 @@ { "cell_type": "markdown", "id": "7e830eb9-c503-44b8-816b-030dd1e8a597", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Record states\n", "\n", @@ -187,7 +214,11 @@ "cell_type": "code", "execution_count": 4, "id": "c42f0be4-79c8-4058-8cad-c553009c99fa", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "mon = Monitor()\n", @@ -205,7 +236,11 @@ { "cell_type": "markdown", "id": "28906da2-d392-4360-bcfc-0786ae2bd8c7", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Execution\n", "\n", @@ -216,7 +251,11 @@ "cell_type": "code", "execution_count": 5, "id": "79bed4bf-7c10-4933-8294-2277e92e0919", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "neuron.run(condition=RunSteps(num_steps=t_sim), \n", @@ -233,7 +272,11 @@ { "cell_type": "markdown", "id": "475cf815-0e2e-438b-b592-6a6a3204932e", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Plot\n", "\n", @@ -248,7 +291,11 @@ "cell_type": "code", "execution_count": 6, "id": "c4fa00cf-a964-4d35-abdc-1a03a49a6d38", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "data": { @@ -303,7 +350,11 @@ { "cell_type": "markdown", "id": "c3f7c1b2-4c3f-4151-9a57-cb2d6381e817", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Computational costs \n", "\n", @@ -314,7 +365,11 @@ "cell_type": "code", "execution_count": 7, "id": "c56bfd8a-1933-44fd-a2b2-b81623bce771", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", @@ -334,7 +389,11 @@ { "cell_type": "markdown", "id": "72fbcd55-1d37-4b29-8083-56027e4ce53d", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## How to learn more?\n", "\n", @@ -367,4 +426,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} +} \ No newline at end of file diff --git a/tutorials/in_depth/tutorial11_serialization.ipynb b/tutorials/in_depth/tutorial11_serialization.ipynb index 0c4362900..5f7dd57fc 100644 --- a/tutorials/in_depth/tutorial11_serialization.ipynb +++ b/tutorials/in_depth/tutorial11_serialization.ipynb @@ -2,7 +2,11 @@ "cells": [ { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "*Copyright (C) 2023 Intel Corporation*
\n", "*SPDX-License-Identifier: BSD-3-Clause*
\n", @@ -28,7 +32,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Interface\n", "\n", @@ -47,7 +55,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Example" ] @@ -55,21 +67,25 @@ { "cell_type": "code", "execution_count": 17, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "import numpy as np\n", "import tempfile\n", - "from lava.proc.lif.process import LIF\n", - "from lava.proc.dense.process import Dense\n", - "from lava.magma.core.run_configs import Loihi2SimCfg\n", - "from lava.magma.core.run_conditions import RunSteps\n", - "from lava.utils.serialization import save, load" + "from lava import LIF, Dense, Loihi2SimCfg, RunSteps, save, load" ] }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "A simple LIF->Dense->LIF network is configured." ] @@ -77,7 +93,11 @@ { "cell_type": "code", "execution_count": 18, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "pre_size = 2\n", @@ -99,7 +119,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "In order to store a compiled version of the Lava network an `executable` is needed. Calling `compile` on a Lava process compiles the process and all its connected processes and returns an `executable`." ] @@ -107,7 +131,11 @@ { "cell_type": "code", "execution_count": 19, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "# Create executable\n", @@ -116,7 +144,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "Storing all Lava processes and the corresponding executable in a file using `save`.\n", "> **ℹ️ Note** \n", @@ -128,7 +160,11 @@ { "cell_type": "code", "execution_count": 20, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "# Context manager is used only to ensure clean up\n", @@ -144,7 +180,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "The list of processes was assigned to `procs`. These process instances are not equal to the instances created initially, although they contain the same information and behave the same way. Also the loaded executable is not equal to the initially created executable.\n", "\n", @@ -154,7 +194,11 @@ { "cell_type": "code", "execution_count": 21, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", @@ -172,7 +216,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "Using the loaded `executable` a runtime needs to be created in order to execute the compiled Lava network. If `create_runtime` is not used, then the processes will be compiled again when calling `run`. For pre-compiled networks this should be avoided." ] @@ -180,7 +228,11 @@ { "cell_type": "code", "execution_count": 22, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "# Create the runtime from the loaded executable\n", @@ -189,7 +241,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "Finally, the network is executed and the voltage and current of the input and output LIF neurons are stored." ] @@ -197,7 +253,11 @@ { "cell_type": "code", "execution_count": 23, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "num_steps = 5\n", @@ -217,7 +277,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "Showing the results." ] @@ -225,7 +289,11 @@ { "cell_type": "code", "execution_count": 24, - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", @@ -250,14 +318,22 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "A hierarchical process can also be stored to a file and all used sub-processes will be available after retrieving it again from a file. This is helpful to create a pre-compiled network, which can be accessed and executed through a single variable." ] }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## How to learn more?\n", "\n", @@ -289,4 +365,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} +} \ No newline at end of file