diff --git a/Dockerfile b/Dockerfile index 60a7a5f366154f8b2b2dac580220f129ba4a3ae9..f8cd9105c05d6937222ebe69cbe2376b427b4525 100644 --- a/Dockerfile +++ b/Dockerfile @@ -70,7 +70,6 @@ RUN /opt/conda/bin/pip install rickpy RUN /opt/conda/bin/pip install git+https://github.com/scidash/sciunit@dev RUN echo "Redo 1" WORKDIR $HOME/work -RUN echo "redo" RUN git clone -b unittest https://github.com/russelljjarvis/NeuronunitOpt WORKDIR NeuronunitOpt RUN /opt/conda/bin/pip install -e . @@ -79,16 +78,15 @@ WORKDIR $HOME/work ADD . SpikingNeuralNetworks WORKDIR SpikingNeuralNetworks/examples RUN julia -e "using Pkg;Pkg.clone(\"https://github.com/gsoleilhac/NSGAII.jl\")" - - -# RUN python simple_with_injection.py RUN julia install.jl -#RUN julia lhhneuron.jl -USER root chown -R jovyan $HOME +USER root +RUN chown -R jovyan $HOME RUN conda clean --all -f -y && \ fix-permissions $CONDA_DIR && \ fix-permissions /home/$NB_USER -USER $NB_UID \ No newline at end of file + + +USER $NB_UID diff --git a/README.md b/README.md index c872a62c06e3b3f3f0ac7be9df8c565425c9303b..1d18d6241f8b3b297fad8ced7f99884a08bd5e20 100644 --- a/README.md +++ b/README.md @@ -1,48 +1,40 @@ # A Julia port of [NeuronUnitOpt](https://github.com/russelljjarvis/NeuronunitOpt) -This project glues together three repositories (two Julia, one python) with the result that spiking neural [models expressed in Julia](https://github.com/AStupidBear/SpikingNeuralNetworks.jl) to facilitate python neuronunit/sciunit testing of neuron model fitness and julia genetic algorith optimization. +A toolchain that facilitates optimization of spiking neuronal [models evaluated in Julia](https://github.com/AStupidBear/SpikingNeuralNetworks.jl) against experimental electrophysiology data via a [genetic algorith optimization](https://github.com/gsoleilhac/NSGAII.jl/). [](https://travis-ci.org/russelljjarvis/SpikingNeuralNetworks.jl)  +Currently Julia/Python interoperability is hard. I imagine this will improve again soon. +# For now Docker is used. +The Docker environment functions to ensure Julia/Python interoperability. + # Installation: ``` -sudo pip install neuronunit-opt==0.1 +git clone https://github.com/russelljjarvis/NeuronUnitOpt.jl +cd NeuronUnitOpt.jl +docker build -t nuopt . +``` + +# Getting Started: +``` +docker run -it nuopt /bin/bash +cd work/NeuronUnitOpt.jl/examples/ +julia +include("lhhneuron.jl") ``` # Description -* A very Julia+Python model-data optimization toolchain derived from ***neuronunit*** and tightly interfaced with other community supported modules, levarging standard workflows in: feature extraction, data scraping and model simulation. +* A Julia+Python model-data optimization toolchain derived from ***neuronunit*** and tightly interfaced with other community supported modules, levarging standard workflows in: feature extraction, data scraping and model simulation. * A collection of compact, parsimonious biological neuronal models, implemented in community supported python modules and tightly integrated into a fast data driven muliti-objective optimization routine (deap, numba, dask etc). * Neo, elephant, interoperability ships with the most minimal install. A wide range of feature extraction, data source, and interfaces and simulator backend support: NeuroML-DB, NeurML, Allen-SDK, PyNN and NEURON are provided with an easy to use [Docker container](). A wide range of interfaces and simulator backend support: Allen-SDK, PyNN and NEURON are provided with an [easy to use Docker container](https://github.com/russelljjarvis/docker-stacks-returned/blob/scidash/efel_dm/Dockerfile). # Advantages * Appeals to interest in AP shape, electrophysiology and Rheobase current injection value. -* -* Relatively fast, but with little administrative overhead. Optionally no C/NEURON building required. -<img src="docs/numba.png" width="200" height="150" /> <img src="docs/dask_logo.png" width="175" height="125" /> <img src="docs/deap.png" width="200" height="150" /> - +* Relatively fast, but with little administrative overhead. * Feature extraction routines: AllenSDK, Druckman, Elephant. -* Simulator Backends: brian2, Allen-GLIF, NEURON, PyNN - -``` BASH -docker pull russelljarvis/efel_allen_dm -docker run russelljarvis/efel_allen_dm neuronunit/examples/use_edt.py -``` - -# Optimization specific: - - Assumptions, the environment for running this notebook was arrived at by building a dedicated docker file. - - https://cloud.docker.com/repository/registry-1.docker.io/russelljarvis/nuo - or more recently: - https://cloud.docker.com/u/russelljarvis/repository/docker/russelljarvis/network_unit_opt - You can run use dockerhub to get the appropriate file, and launch this notebook using Kitematic. - -# Import libraries -To keep the standard running version of minimal and memory efficient, not all available packages are loaded by default. In the cell below I import a mixture common python modules, and custom developed modules associated with NeuronUnit (NU) development -#!pip install dask distributed seaborn -#!bash after_install.sh +* Simulator Backends: Julia # Broad Aims: diff --git a/examples/call_from_python.py b/examples/call_from_python.py deleted file mode 100644 index 7580283b6cdd142da10b870f1e9cf21164441d93..0000000000000000000000000000000000000000 --- a/examples/call_from_python.py +++ /dev/null @@ -1,6 +0,0 @@ -import julia -j = julia.Julia() -j.eval('using Pkg; Pkg.add("SNN")') -x1 = j.include("hh_neuron.jl") -print(x1) -x2 = j.include("hh_net.jl") diff --git a/examples/chain.jl b/examples/chain.jl deleted file mode 100644 index a5b918ee896f4020c79f93dfe92360866dc74e7f..0000000000000000000000000000000000000000 --- a/examples/chain.jl +++ /dev/null @@ -1,13 +0,0 @@ -using Plots, SNN - -N = 3 -E = SNN.IF(;N = N) -EE = SNN.SpikingSynapse(E, E, :ge; σ=0.5, p=0.8) -for n in 1:(N - 1) - SNN.connect!(EE, n, n + 1, 50) -end -E.I[1] = 30 - -SNN.monitor(E, [(:v, [1, N])]) -SNN.sim!([E], [EE]; duration = 100ms) -SNN.vecplot(E, :v) |> display diff --git a/examples/rate_net.jl b/examples/rate_net.jl deleted file mode 100644 index 86165ba9c08f38927efc1e1bec2fbe06266cab6f..0000000000000000000000000000000000000000 --- a/examples/rate_net.jl +++ /dev/null @@ -1,8 +0,0 @@ -using Plots, SNN - -G = SNN.Rate(;N = 100) -GG = SNN.RateSynapse(G, G; σ = 1.2, p = 1.0) -SNN.monitor(G, [(:r, [1, 50, 100])]) - -SNN.sim!([G], [GG]; duration = 100ms) -SNN.vecplot(G, :r) |> display diff --git a/hh.jl b/hh.jl deleted file mode 100644 index 8043135cd1e53a67aedea6f54b2c7476424d33ce..0000000000000000000000000000000000000000 --- a/hh.jl +++ /dev/null @@ -1,78 +0,0 @@ -using PyCall -neuronunit = pyimport("neuronunit") -#om = neuronunit#.optimisation.optimization.management -#neuronunit = pyimport("neuronunit") - -#] add https://github.com/AStupidBear/SpikingNeuralNetworks.jl - -# Pkg.add("https://github.com/AStupidBear/SpikingNeuralNetworks.jl") - - -using GR - - -import SpikingNeuralNetworks - - - -using Unitful - -using Plots, SpikingNeuralNetworks -SNN = SpikingNeuralNetworks -E = SNN.HH(;N = 1) -E.I = [0.003] - -SNN.monitor(E, [:v]) -SNN.sim!([E], []; dt = 0.01, duration = 102) - - -SNN.vecplot(E, :v) |> display - -#= -using Pkg -try - using UnicodePlotsi - #using NSGAII -catch - Pkg.add("UnicodePlots") - #Pkg.add("NSGAII") - using UnicodePlots - #using NSGAII -end - -using NSGAII, vOptSpecific, vOptGeneric, GLPK, GLPKMathProgInterface, PyPlot -m = vModel(solver = GLPKSolverMIP()) -id = load2UKP("2KP500-1A.DAT") - -p1, p2, w, c = id.P1, id.P2, id.W, id.C - -@variable(m, x[1:length(p1)], Bin) -@addobjective(m, Max, dot(x, p1)) -@addobjective(m, Max, dot(x, p2)) -@constraint(m, dot(x, w) <= c) - -function plot_pop(P, titre) - clf() - ax = gca() - ax[:set_xlim]([15800, 20570]) - ax[:set_ylim]([15630, 20877]) - p = plot(map(x -> x.y[1], P), map(x -> x.y[2], P), "bo", markersize=1, label="nsga") - title(titre) - !isinteractive() && show() - sleep(0.1) -end - -nsga(100, 5000, m, fplot = p->plot_pop(p, "without seed"), plotevery=500) - -solve(m, method=:dichotomy) - -Y_N = getY_N(m) -seed = [getvalue(x, 1), getvalue(x, length(Y_N)), getvalue(x, length(Y_N)÷2)] -nsga(100, 5000, m, fplot = p->plot_pop(p, "with seed"), seed=seed, plotevery=500) - -f1 = map(y -> y[1], Y_N) -f2 = map(y -> y[2], Y_N) -xlabel("z1") ; ylabel("z2") -p = plot(f1,f2,"kx", markersize = "2", label="exact") -legend() ; display(p) -=# diff --git a/src/main.jl b/src/main.jl index 63d950907f0766b2d1c2e696f01ec4d3fbe706c6..256ad5c070f91249a4198754ac9bf35b4a4f4005 100644 --- a/src/main.jl +++ b/src/main.jl @@ -1,63 +1,28 @@ function sim!(P, C, dt) integrate!(P[1], P[1].param, SNNFloat(dt)) record!(P[1]) - #for c in C - # forward!(c, c.param) - # record!(c) - #end end function sim!(P, C; dt = 0.25ms, simulation_duration = 1300ms, delay = 300ms,stimulus_duration=1000ms) - temp = deepcopy(P[1].I) - cnt = 0 - for t = 0ms:dt:simulation_duration - cnt+=1 - end size = simulation_duration/dt - cnt1 = 0 for t = 0ms:dt:simulation_duration cnt1+=1 - #@show(cnt1) if cnt1 < delay/dt - #3*size/4 # if cnt1 > delay P[1].I[1] = 0.0 end if cnt1 > (delay/dt + stimulus_duration/dt) P[1].I[1] = 0.0 - #convert(Array{Float32,1},0.0) end if (delay/dt) < cnt1 < (stimulus_duration/dt) P[1].I[1] = maximum(temp[1]) end sim!(P, C, dt) - end - #delta = stimulus_duration-delay - end -#= - -function sim!(P, C, dt) - for p in P - integrate!(p, p.param, SNNFloat(dt)) - record!(p) - end - for c in C - forward!(c, c.param) - record!(c) - end -end - -function sim!(P, C; dt = 0.1ms, duration = 10ms) - for t = 0ms:dt:duration - sim!(P, C, dt) - end -end -=# function train!(P, C, dt, t = 0) for p in P integrate!(p, p.param, SNNFloat(dt))