diff --git a/documentation/5/_downloads/1eed1518b8928bc24e721527ef1f9970/mnist_mb_classifier.ipynb b/documentation/5/_downloads/1eed1518b8928bc24e721527ef1f9970/mnist_mb_classifier.ipynb index 2928dc2d7..a61527000 100644 --- a/documentation/5/_downloads/1eed1518b8928bc24e721527ef1f9970/mnist_mb_classifier.ipynb +++ b/documentation/5/_downloads/1eed1518b8928bc24e721527ef1f9970/mnist_mb_classifier.ipynb @@ -35,7 +35,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.12" + "version": "3.8.0" } }, "nbformat": 4, diff --git a/documentation/5/_downloads/26866b29b5e6aaf47b3ba4c4dbee6718/userproject_jupyter.zip b/documentation/5/_downloads/26866b29b5e6aaf47b3ba4c4dbee6718/userproject_jupyter.zip index 6664f33ff..6d4cf95b6 100644 Binary files a/documentation/5/_downloads/26866b29b5e6aaf47b3ba4c4dbee6718/userproject_jupyter.zip and b/documentation/5/_downloads/26866b29b5e6aaf47b3ba4c4dbee6718/userproject_jupyter.zip differ diff --git a/documentation/5/_downloads/3e0a0a579abc8e5181823f00a5779b71/userproject_python.zip b/documentation/5/_downloads/3e0a0a579abc8e5181823f00a5779b71/userproject_python.zip index 851c38301..cacf56dfa 100644 Binary files a/documentation/5/_downloads/3e0a0a579abc8e5181823f00a5779b71/userproject_python.zip and b/documentation/5/_downloads/3e0a0a579abc8e5181823f00a5779b71/userproject_python.zip differ diff --git a/documentation/5/_downloads/64c7fcd62013f68609d54d02d741b663/potjans_microcircuit.ipynb b/documentation/5/_downloads/64c7fcd62013f68609d54d02d741b663/potjans_microcircuit.ipynb index 2ae0ae9fc..008f11e6a 100644 --- a/documentation/5/_downloads/64c7fcd62013f68609d54d02d741b663/potjans_microcircuit.ipynb +++ b/documentation/5/_downloads/64c7fcd62013f68609d54d02d741b663/potjans_microcircuit.ipynb @@ -35,7 +35,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.12" + "version": "3.8.0" } }, "nbformat": 4, diff --git a/documentation/5/_downloads/f57116a727438d4e47eb66f1b799aa48/superspike_demo.ipynb b/documentation/5/_downloads/f57116a727438d4e47eb66f1b799aa48/superspike_demo.ipynb index 8263a6d20..1ff964dcb 100644 --- a/documentation/5/_downloads/f57116a727438d4e47eb66f1b799aa48/superspike_demo.ipynb +++ b/documentation/5/_downloads/f57116a727438d4e47eb66f1b799aa48/superspike_demo.ipynb @@ -35,7 +35,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.12" + "version": "3.8.0" } }, "nbformat": 4, diff --git a/documentation/5/_sources/sg_execution_times.rst.txt b/documentation/5/_sources/sg_execution_times.rst.txt index a0875a4e2..4957555dd 100644 --- a/documentation/5/_sources/sg_execution_times.rst.txt +++ b/documentation/5/_sources/sg_execution_times.rst.txt @@ -32,12 +32,12 @@ Computation times * - Example - Time - Mem (MB) - * - :ref:`sphx_glr_userproject_mnist_mb_classifier.py` (``..\userproject\mnist_mb_classifier.py``) + * - :ref:`sphx_glr_userproject_mnist_mb_classifier.py` (``../userproject/mnist_mb_classifier.py``) - 00:00.000 - 0.0 - * - :ref:`sphx_glr_userproject_potjans_microcircuit.py` (``..\userproject\potjans_microcircuit.py``) + * - :ref:`sphx_glr_userproject_potjans_microcircuit_pygenn.py` (``../userproject/potjans_microcircuit_pygenn.py``) - 00:00.000 - 0.0 - * - :ref:`sphx_glr_userproject_superspike_demo.py` (``..\userproject\superspike_demo.py``) + * - :ref:`sphx_glr_userproject_superspike_demo.py` (``../userproject/superspike_demo.py``) - 00:00.000 - 0.0 diff --git a/documentation/5/_sources/source/pygenn.rst.txt b/documentation/5/_sources/source/pygenn.rst.txt index a1937c5ce..6875f8b1b 100644 --- a/documentation/5/_sources/source/pygenn.rst.txt +++ b/documentation/5/_sources/source/pygenn.rst.txt @@ -73,6 +73,30 @@ pygenn.init\_var\_snippets module :undoc-members: :show-inheritance: +pygenn.libgenn\_cuda\_backend\_dynamic module +--------------------------------------------- + +.. automodule:: pygenn.libgenn_cuda_backend_dynamic + :members: + :undoc-members: + :show-inheritance: + +pygenn.libgenn\_dynamic module +------------------------------ + +.. automodule:: pygenn.libgenn_dynamic + :members: + :undoc-members: + :show-inheritance: + +pygenn.libgenn\_single\_threaded\_cpu\_backend\_dynamic module +-------------------------------------------------------------- + +.. automodule:: pygenn.libgenn_single_threaded_cpu_backend_dynamic + :members: + :undoc-members: + :show-inheritance: + pygenn.model\_preprocessor module --------------------------------- diff --git a/documentation/5/_sources/userproject/mnist_mb_classifier.rst.txt b/documentation/5/_sources/userproject/mnist_mb_classifier.rst.txt index 1705ea0ba..b5e4e8cc6 100644 --- a/documentation/5/_sources/userproject/mnist_mb_classifier.rst.txt +++ b/documentation/5/_sources/userproject/mnist_mb_classifier.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "userproject\mnist_mb_classifier.py" +.. "userproject/mnist_mb_classifier.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html diff --git a/documentation/5/_sources/userproject/potjans_microcircuit.rst.txt b/documentation/5/_sources/userproject/potjans_microcircuit.rst.txt index cd828a02f..4a0be75c8 100644 --- a/documentation/5/_sources/userproject/potjans_microcircuit.rst.txt +++ b/documentation/5/_sources/userproject/potjans_microcircuit.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "userproject\potjans_microcircuit.py" +.. "userproject/potjans_microcircuit.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html diff --git a/documentation/5/_sources/userproject/superspike_demo.rst.txt b/documentation/5/_sources/userproject/superspike_demo.rst.txt index 4f260b334..816c74a21 100644 --- a/documentation/5/_sources/userproject/superspike_demo.rst.txt +++ b/documentation/5/_sources/userproject/superspike_demo.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "userproject\superspike_demo.py" +.. "userproject/superspike_demo.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html diff --git a/documentation/5/bibliography.html b/documentation/5/bibliography.html index 996da7dd8..cd0bee059 100644 --- a/documentation/5/bibliography.html +++ b/documentation/5/bibliography.html @@ -3,7 +3,7 @@
- +MNIST classification using an insect-inspired mushroom body model (..\userproject\mnist_mb_classifier.py
)
MNIST classification using an insect-inspired mushroom body model (../userproject/mnist_mb_classifier.py
)
00:00.000
0.0
PyGeNN implementation of local cortical microcircuit model (..\userproject\potjans_microcircuit.py
)
sphx_glr_userproject_potjans_microcircuit_pygenn.py (../userproject/potjans_microcircuit_pygenn.py
)
00:00.000
0.0
PyGeNN implementation of SuperSpike (..\userproject\superspike_demo.py
)
PyGeNN implementation of SuperSpike (../userproject/superspike_demo.py
)
00:00.000
0.0
current_source_models
) or an instance of CurrentSourceModelBase
(for example returned by create_current_source_model()
)
pop (NeuronGroup) – neuron population to inject current into
params (Dict[str, int | float]) – parameter values for the current source model (see `Parameters`_)
params (Dict[str, int | float]) – parameter values for the current source model (see `Parameters`_)
vars (Dict[str, VarInit | int | float | ndarray | Sequence]) – initial variable values or initialisers -for the current source model (see `Variables`_)
var_refs (Dict[str, VarReference]) – variables references to neuron variables in pop
,
typically created using create_var_ref()
-(see `Variables references`_)
custom_connectivity_update_models
) or an instance of
CustomConnectivityUpdateModelBaseUpdateModelBase
(for example returned by create_custom_connectivity_update_model()
)
-params (Dict[str, int | float]) – parameter values for the custom connectivity model (see `Parameters`_)
params (Dict[str, int | float]) – parameter values for the custom connectivity model (see `Parameters`_)
vars (Dict[str, VarInit | int | float | ndarray | Sequence]) – initial synaptic variable values or -initialisers (see `Variables`_)
pre_vars (Dict[str, VarInit | int | float | ndarray | Sequence]) – initial presynaptic variable values or initialisers (see `Variables`_)
pre_vars (Dict[str, VarInit | int | float | ndarray | Sequence]) – initial presynaptic variable values or +initialisers (see `Variables`_)
post_vars (Dict[str, VarInit | int | float | ndarray | Sequence]) – initial postsynaptic variable values or initialisers -(see `Variables`_)
var_refs (Dict[str, WUVarReference]) – references to synaptic variables,
typically created using create_wu_var_ref()
-(see `Variables references`_)
pre_var_refs (Dict[str, VarReference]) – references to presynaptic variables,
typically created using create_var_ref()
-(see `Variables references`_)
post_var_refs (Dict[str, VarReference]) – references to postsynaptic variables,
typically created using create_var_ref()
-(see `Variables references`_)
custom_update_models
) or an instance of
CustomUpdateModelBase
(for example returned by create_custom_update_model()
)
-params (Dict[str, int | float]) – parameter values for the custom update model (see `Parameters`_)
params (Dict[str, int | float]) – parameter values for the custom update model (see `Parameters`_)
vars (Dict[str, VarInit | int | float | ndarray | Sequence]) – initial variable values or initialisers -for the custom update model (see `Variables`_)
var_refs (Dict[str, VarReference] | Dict[str, WUVarReference]) – references to variables in other populations to
access from this update, typically created using either
create_var_ref()
or create_wu_var_ref()
-(see `Variables references`_).
egp_refs (Dict[str, EGPReference]) – references to extra global parameters in other populations
to access from this update, typically created using
-create_egp_ref()
(see `Extra global parameter references`_).
create_egp_ref()
(see `Extra global parameter references`_).
@@ -410,9 +410,9 @@ neuron (NeuronModelBase | str) – neuron model either as a string referencing a built in model
(see neuron_models
) or an instance of NeuronModelBase
(for example returned by create_neuron_model()
)
params (Dict[str, int | float]) – parameter values for the neuron model (see `Parameters`_)
params (Dict[str, int | float]) – parameter values for the neuron model (see `Parameters`_)
vars (Dict[str, VarInit | int | float | ndarray | Sequence]) – initial variable values or initialisers -for the neuron model (see `Variables`_)
Bases: pybind11_object
Flags defining differnet types of synaptic matrix connectivity
Members:
-DENSE
++DENSE
BITMASK
SPARSE
PROCEDURAL
TOEPLITZ
+
PostsynapticModelBase
(for example returned
by create_postsynaptic_model()
)
-params (Dict[str, int | float]) – parameter values for the postsynaptic model (see `Parameters`_)
params (Dict[str, int | float]) – parameter values for the postsynaptic model (see `Parameters`_)
vars (Dict[str, VarInit | int | float | ndarray | Sequence]) – initial synaptic variable values or initialisers -for the postsynaptic model (see `Variables`_)
var_refs (Dict[str, VarInit | int | float | ndarray | Sequence]) – references to postsynaptic neuron variables,
typically created using create_var_ref()
-(see `Variables references`_)
init_sparse_connectivity_snippets
)
or an instance of InitSparseConnectivitySnippetBase
(for example returned by create_sparse_connect_init_snippet()
)
-params (Dict[str, int | float]) – parameter values for the sparse connectivity init snippet (see `Parameters`_)
params (Dict[str, int | float]) – parameter values for the sparse connectivity init snippet (see `Parameters`_)
init_toeplitz_connectivity_snippets
)
or an instance of InitToeplitzConnectivitySnippetBase
(for example returned by create_toeplitz_connect_init_snippet()
)
-params – parameter values for the toeplitz connectivity init snippet (see `Parameters`_)
params – parameter values for the toeplitz connectivity init snippet (see `Parameters`_)
init_var_snippets
)
or an instance of InitVarSnippetBase
(for example returned by create_var_init_snippet()
)
-params (Dict[str, int | float]) – parameter values for the variable init snippet (see `Parameters`_)
params (Dict[str, int | float]) – parameter values for the variable init snippet (see `Parameters`_)
weight_update_models
) or an instance of
WeightUpdateModelBase
(for example returned
by create_weight_update_model()
)
-params (Dict[str, int | float]) – parameter values (see `Parameters`_)
params (Dict[str, int | float]) – parameter values (see `Parameters`_)
vars (Dict[str, VarInit | int | float | ndarray | Sequence]) – initial synaptic variable values or -initialisers (see `Variables`_)
pre_vars (Dict[str, VarInit | int | float | ndarray | Sequence]) – initial presynaptic variable values or initialisers (see `Variables`_)
pre_vars (Dict[str, VarInit | int | float | ndarray | Sequence]) – initial presynaptic variable values or +initialisers (see `Variables`_)
post_vars (Dict[str, VarInit | int | float | ndarray | Sequence]) – initial postsynaptic variable values or initialisers -(see `Variables`_)
pre_var_refs (Dict[str, VarReference]) – references to presynaptic neuron variables,
typically created using create_var_ref()
-(see `Variables references`_)
post_var_refs (Dict[str, VarReference]) – references to postsynaptic neuron variables,
typically created using create_var_ref()
-(see `Variables references`_)
DC source
+It has a single parameter:
+- amp
- amplitude of the current [nA]
Noisy current source with noise drawn from normal distribution
+It has 2 parameters:
+- mean
- mean of the normal distribution [nA]
+- sd
- standard deviation of the normal distribution [nA]
Current source for injecting a current equivalent to a population of
+Poisson spike sources, one-to-one connected with exponential synapses
+It has 3 parameters:
+- weight
- synaptic weight of the Poisson spikes [nA]
+- tauSyn
- decay time constant [ms]
+- rate
- mean firing rate [Hz]
Initialises convolutional connectivity +Row build state variables are used to convert presynaptic neuron index to rows, columns and channels and, +from these, to calculate the range of postsynaptic rows, columns and channels connections will be made within. +This sparse connectivity snippet does not support multiple threads per neuron
+Initialises connectivity with a fixed number of random synapses per row. +The postsynaptic targets of the synapses can be initialised in parallel by sampling from the discrete +uniform distribution. However, to sample connections in ascending order, we sample from the 1st order statistic +of the uniform distribution – Beta[1, Npost] – essentially the next smallest value. In this special case +this is equivalent to the exponential distribution which can be sampled in constant time using the inversion method.
+Initialises connectivity with a fixed number of random synapses per column. +No need for ordering here so fine to sample directly from uniform distribution
+Initialises connectivity with a total number of random synapses. +The first stage in using this connectivity is to determine how many of the total synapses end up in each row. +This can be determined by sampling from the multinomial distribution. However, this operation cannot be +efficiently parallelised so must be performed on the host and the result passed as an extra global parameter array. +Once the length of each row is determined, the postsynaptic targets of the synapses can be initialised in parallel +by sampling from the discrete uniform distribution. However, to sample connections in ascending order, we sample +from the 1st order statistic of the uniform distribution – Beta[1, Npost] – essentially the next smallest value. +In this special case this is equivalent to the exponential distribution which can be sampled in constant time using the inversion method.
+Initialises connectivity with a fixed probability of a synapse existing +between a pair of pre and postsynaptic neurons. +Whether a synapse exists between a pair of pre and a postsynaptic +neurons can be modelled using a Bernoulli distribution. While this COULD +be sampled directly by repeatedly drawing from the uniform distribution, +this is inefficient. Instead we sample from the geometric distribution +which describes “the probability distribution of the number of Bernoulli +trials needed to get one success” – essentially the distribution of the +‘gaps’ between synapses. We do this using the “inversion method” +described by Devroye (1986) – essentially inverting the CDF of the +equivalent continuous distribution (in this case the exponential distribution)
+Initialises connectivity with a fixed probability of a synapse existing +between a pair of pre and postsynaptic neurons. This version ensures there +are no autapses - connections between neurons with the same id +so should be used for recurrent connections. +Whether a synapse exists between a pair of pre and a postsynaptic +neurons can be modelled using a Bernoulli distribution. While this COULD +br sampling directly by repeatedly drawing from the uniform distribution, +this is innefficient. Instead we sample from the gemetric distribution +which describes “the probability distribution of the number of Bernoulli +trials needed to get one success” – essentially the distribution of the +‘gaps’ between synapses. We do this using the “inversion method” +described by Devroye (1986) – essentially inverting the CDF of the +equivalent continuous distribution (in this case the exponential distribution)
+Initialises connectivity to a ‘one-to-one’ diagonal matrix
+Used to mark connectivity as uninitialised - no initialisation code will be run
+Izhikevich neuron with fixed parameters cite izhikevich2003simple. +It is usually described as +f{eqnarray*} +frac{dV}{dt} &=& 0.04 V^2 + 5 V + 140 - U + I, \ +frac{dU}{dt} &=& a (bV-U), +f} +I is an external input current and the voltage V is reset to parameter c and U incremented by parameter d, whenever V >= 30 mV. This is paired with a particular integration procedure of two 0.5 ms Euler time steps for the V equation followed by one 1 ms time step of the U equation. Because of its popularity we provide this model in this form here event though due to the details of the usual implementation it is strictly speaking inconsistent with the displayed equations.
+Variables are:
+V
- Membrane potential
U
- Membrane recovery variable
Parameters are:
+a
- time scale of U
b
- sensitivity of U
c
- after-spike reset value of V
d
- after-spike reset value of U
Izhikevich neuron with variable parameters cite izhikevich2003simple. +This is the same model as NeuronModels::Izhikevich but parameters are defined as +“variables” in order to allow users to provide individual values for each +individual neuron instead of fixed values for all neurons across the population.
+Accordingly, the model has the Variables:
+- V
- Membrane potential
+- U
- Membrane recovery variable
+- a
- time scale of U
+- b
- sensitivity of U
+- c
- after-spike reset value of V
+- d
- after-spike reset value of U
and no parameters.
+Poisson neurons
+Poisson neurons have constant membrane potential (Vrest
) unless they are
+activated randomly to the Vspike
value if (t- spikeTime
) > trefract
.
It has 2 variables:
+V
- Membrane potential (mV)
spikeTime
- Time at which the neuron spiked for the last time (ms)
and 4 parameters:
+trefract
- Refractory period (ms)
tspike
- duration of spike (ms)
Vspike
- Membrane potential at spike (mV)
Vrest
- Membrane potential at rest (mV)
note The initial values array for the Poisson type needs two entries +for V, and spikeTime and the parameter array needs four entries for +trefract, tspike, Vspike and Vrest, in that order. +note The refractory period and the spike duration both start at the beginning of the spike. That means that the refractory period should be longer or equal to the spike duration. If this is not the case, undefined model behaviour occurs.
+It has two extra global parameters:
+firingProb
- an array of firing probabilities/ average rates; this can extend to \(n \cdot N\), where \(N\) is the number of neurons, for \(n > 0\) firing patterns
offset
- an unsigned integer that points to the start of the currently used input pattern; typically taking values of \(i \cdot N\), \(0 \leq i < n\).
note This model uses a linear approximation for the probability +of firing a spike in a given time step of size DT, i.e. the +probability of firing is \(\lambda\) times DT: :math:` p = lambda Delta t`, +where $lambda$ corresponds to the value of the relevant entry of firingProb. +This approximation is usually very good, especially for typical, +quite small time steps and moderate firing rates. However, it is worth +noting that the approximation becomes poor for very high firing rates +and large time steps.
+Poisson neurons +This neuron model emits spikes according to the Poisson distribution with a mean firing +rate as determined by its single parameter. +It has 1 state variable:
+timeStepToSpike
- Number of timesteps to next spike
and 1 parameter:
+rate
- Mean firing rate (Hz)
note Internally this samples from the exponential distribution using +the C++ 11 <random> library on the CPU and by transforming the +uniform distribution, generated using cuRAND, with a natural log on the GPU.
+Rulkov Map neuron +The RulkovMap type is a map based neuron model based on cite Rulkov2002 but in +the 1-dimensional map form used in cite nowotny2005self : +f{eqnarray*}{ +V(t+Delta t) &=& left{ begin{array}{ll} +V_{rm spike} Big(frac{alpha V_{rm spike}}{V_{rm spike}-V(t) beta I_{rm syn}} + y Big) & V(t) leq 0 \ +V_{rm spike} big(alpha+ybig) & V(t) leq V_{rm spike} big(alpha + ybig) ; & ; V(t-Delta t) leq 0 \ +-V_{rm spike} & {rm otherwise} +end{array} +right. +f} +note +The RulkovMap type only works as intended for the single time step size of `DT`= 0.5.
+The RulkovMap type has 2 variables:
+- V
- the membrane potential
+- preV
- the membrane potential at the previous time step
and it has 4 parameters:
+- Vspike
- determines the amplitude of spikes, typically -60mV
+- alpha
- determines the shape of the iteration function, typically \(\alpha `= 3
+- ``y`\) - “shift / excitation” parameter, also determines the iteration function,originally, y= -2.468
+- beta
- roughly speaking equivalent to the input resistance, i.e. it regulates the scale of the input into the neuron, typically \(\beta`= 2.64 :math:`{\rm M}\Omega\).
note +The initial values array for the RulkovMap type needs two entries for V and preV and the +parameter array needs four entries for Vspike, alpha, y and beta, in that order.
+Empty neuron which allows setting spikes from external sources +This model does not contain any update code and can be used to implement +the equivalent of a SpikeGeneratorGroup in Brian or a SpikeSourceArray in PyNN.
+Spike source array +A neuron which reads spike times from a global spikes array. +It has 2 variables:
+startSpike
- Index of the next spike in the global array
endSpike
- Index of the spike next to the last in the globel array
and 1 extra global parameter:
+spikeTimes
- Array with all spike times
Hodgkin-Huxley neurons with Traub & Miles algorithm. +This conductance based model has been taken from cite Traub1991 and can be described by the equations: +f{eqnarray*}{ +C frac{d V}{dt} &=& -I_{{rm Na}} -I_K-I_{{rm leak}}-I_M-I_{i,DC}-I_{i,{rm syn}}-I_i, \ +I_{{rm Na}}(t) &=& g_{{rm Na}} m_i(t)^3 h_i(t)(V_i(t)-E_{{rm Na}}) \ +I_{{rm K}}(t) &=& g_{{rm K}} n_i(t)^4(V_i(t)-E_{{rm K}}) \ +frac{dy(t)}{dt} &=& alpha_y (V(t))(1-y(t))-beta_y(V(t)) y(t), f} +where \(y_i= m, h, n\), and +f{eqnarray*}{ +alpha_n&=& 0.032(-50-V)/big(exp((-50-V)/5)-1big) \ +beta_n &=& 0.5exp((-55-V)/40) \ +alpha_m &=& 0.32(-52-V)/big(exp((-52-V)/4)-1big) \ +beta_m &=& 0.28(25+V)/big(exp((25+V)/5)-1big) \ +alpha_h &=& 0.128exp((-48-V)/18) \ +beta_h &=& 4/big(exp((-25-V)/5)+1big). +f} +and typical parameters are \(C=0.143\) nF, \(g_{{\rm leak}}= 0.02672\) +\(\mu`S, :math:`E_{{\rm leak}}= -63.563\) mV, \(g_{{\rm Na}}=7.15\) \(\mu`S, +:math:`E_{{\rm Na}}= 50\) mV, \(g_{{\rm {\rm K}}}=1.43\) \(\mu`S, +:math:`E_{{\rm K}}= -95\) mV.
+It has 4 variables:
+V
- membrane potential E
m
- probability for Na channel activation m
h
- probability for not Na channel blocking h
n
- probability for K channel activation n
and 7 parameters:
+gNa
- Na conductance in 1/(mOhms * cm^2)
ENa
- Na equi potential in mV
gK
- K conductance in 1/(mOhms * cm^2)
EK
- K equi potential in mV
gl
- Leak conductance in 1/(mOhms * cm^2)
El
- Leak equi potential in mV
C
- Membrane capacity density in muF/cm^2
note +Internally, the ordinary differential equations defining the model are integrated with a +linear Euler algorithm and GeNN integrates 25 internal time steps for each neuron for each +network time step. I.e., if the network is simulated at DT= 0.1 ms, then the neurons are +integrated with a linear Euler algorithm with lDT= 0.004 ms. +This variant uses IF statements to check for a value at which a singularity would be hit. +If so, value calculated by L’Hospital rule is used.
+Hodgkin-Huxley neurons with Traub & Miles algorithm +Using a workaround to avoid singularity: adding the munimum numerical value of the floating point precision used. +note See NeuronModels::TraubMiles for variable and parameter names.
+Hodgkin-Huxley neurons with Traub & Miles algorithm: Original fast implementation, using 25 inner iterations. +There are singularities in this model, which can be easily hit in float precision +note See NeuronModels::TraubMiles for variable and parameter names.
+Hodgkin-Huxley neurons with Traub & Miles algorithm. +Same as standard TraubMiles model but number of inner loops can be set using a parameter +note See NeuronModels::TraubMiles for variable and parameter names.
+Uninitialised()
Array
Array.view
usage: potjans_microcircuit [-h] [--duration DURATION] [--neuron-scale NEURON_SCALE]
- [--connectivity-scale CONNECTIVITY_SCALE] [--kernel-profiling]
- [--procedural-connectivity] [--save-data]
+usage: potjans_microcircuit [-h] [--duration DURATION] [--neuron-scale NEURON_SCALE] [--connectivity-scale CONNECTIVITY_SCALE] [--kernel-profiling] [--procedural-connectivity]
+ [--save-data]
diff --git a/documentation/5/userproject/sg_execution_times.html b/documentation/5/userproject/sg_execution_times.html
index e2784a77d..487dbdaa4 100644
--- a/documentation/5/userproject/sg_execution_times.html
+++ b/documentation/5/userproject/sg_execution_times.html
@@ -3,7 +3,7 @@
-
+
Computation times — PyGeNN documentation
diff --git a/documentation/5/userproject/superspike_demo.html b/documentation/5/userproject/superspike_demo.html
index 6165b5117..8639c5800 100644
--- a/documentation/5/userproject/superspike_demo.html
+++ b/documentation/5/userproject/superspike_demo.html
@@ -3,7 +3,7 @@
-
+
PyGeNN implementation of SuperSpike — PyGeNN documentation
@@ -60,9 +60,7 @@ Navigation
learning rule to learn the transformation between fixed spike trains of
Poisson noise and a target spiking output (by default the Radcliffe Camera at Oxford).
This example can be used as follows:
-usage: superspike_demo [-h] --record-trial [RECORD_TRIAL [RECORD_TRIAL ...]]
- [--target-file TARGET_FILE] [--num-trials NUM_TRIALS] [--kernel-profiling]
- [--save-data]
+usage: superspike_demo [-h] --record-trial [RECORD_TRIAL [RECORD_TRIAL ...]] [--target-file TARGET_FILE] [--num-trials NUM_TRIALS] [--kernel-profiling] [--save-data]